diff --git a/src/main/resources/db/changelog/6-hs-booking/630-booking-item/6200-hs-booking-item.sql b/src/main/resources/db/changelog/6-hs-booking/630-booking-item/6300-hs-booking-item.sql similarity index 100% rename from src/main/resources/db/changelog/6-hs-booking/630-booking-item/6200-hs-booking-item.sql rename to src/main/resources/db/changelog/6-hs-booking/630-booking-item/6300-hs-booking-item.sql diff --git a/src/main/resources/db/changelog/6-hs-booking/630-booking-item/6208-hs-booking-item-test-data.sql b/src/main/resources/db/changelog/6-hs-booking/630-booking-item/6308-hs-booking-item-test-data.sql similarity index 100% rename from src/main/resources/db/changelog/6-hs-booking/630-booking-item/6208-hs-booking-item-test-data.sql rename to src/main/resources/db/changelog/6-hs-booking/630-booking-item/6308-hs-booking-item-test-data.sql diff --git a/src/main/resources/db/changelog/7-hs-hosting/701-hosting-asset/7016-hs-hosting-asset-migration.sql b/src/main/resources/db/changelog/7-hs-hosting/701-hosting-asset/7016-hs-hosting-asset-migration.sql new file mode 100644 index 00000000..0cff5fbe --- /dev/null +++ b/src/main/resources/db/changelog/7-hs-hosting/701-hosting-asset/7016-hs-hosting-asset-migration.sql @@ -0,0 +1,96 @@ +--liquibase formatted sql + +-- TODO: These changesets are just for the external remote views to simulate the legacy tables. +-- Once we don't need the external remote views anymore, create revert changesets. + +-- ============================================================================ +--changeset hs-hosting-asset-MIGRATION-mapping:1 endDelimiter:--// +-- ---------------------------------------------------------------------------- + +CREATE TABLE hs_hosting_asset_legacy_id +( + uuid uuid NOT NULL REFERENCES hs_hosting_asset(uuid), + legacy_id integer NOT NULL +); +--// + + +-- ============================================================================ +--changeset hs-hosting-asset-MIGRATION-sequence:1 endDelimiter:--// +-- ---------------------------------------------------------------------------- + +CREATE SEQUENCE IF NOT EXISTS hs_hosting_asset_legacy_id_seq + AS integer + START 1000000000 + OWNED BY hs_hosting_asset_legacy_id.legacy_id; +--// + + +-- ============================================================================ +--changeset hs-hosting-asset-MIGRATION-default:1 endDelimiter:--// +-- ---------------------------------------------------------------------------- + +ALTER TABLE hs_hosting_asset_legacy_id + ALTER COLUMN legacy_id + SET DEFAULT nextVal('hs_hosting_asset_legacy_id_seq'); +--/ + + +-- ============================================================================ +--changeset hs-hosting-asset-MIGRATION-insert:1 endDelimiter:--// +-- ---------------------------------------------------------------------------- + +CALL defineContext('schema-migration'); +INSERT INTO hs_hosting_asset_legacy_id(uuid, legacy_id) + SELECT uuid, nextVal('hs_hosting_asset_legacy_id_seq') FROM hs_hosting_asset; +--/ + + +-- ============================================================================ +--changeset hs-hosting-asset-MIGRATION-insert-trigger:1 endDelimiter:--// +-- ---------------------------------------------------------------------------- +create or replace function insertassetLegacyIdMapping() + returns trigger + language plpgsql + strict as $$ +begin + if TG_OP <> 'INSERT' then + raise exception 'invalid usage of trigger'; + end if; + + INSERT INTO hs_hosting_asset_legacy_id VALUES + (NEW.uuid, nextVal('hs_hosting_asset_legacy_id_seq')); + + return NEW; +end; $$; + +create trigger createassetLegacyIdMapping + after insert on hs_hosting_asset + for each row + execute procedure insertassetLegacyIdMapping(); +--/ + + +-- ============================================================================ +--changeset hs-hosting-asset-MIGRATION-delete-trigger:1 endDelimiter:--// +-- ---------------------------------------------------------------------------- +create or replace function deleteassetLegacyIdMapping_tf() + returns trigger + language plpgsql + strict as $$ +begin + if TG_OP <> 'DELETE' then + raise exception 'invalid usage of trigger'; + end if; + + DELETE FROM hs_hosting_asset_legacy_id + WHERE uuid = OLD.uuid; + + return OLD; +end; $$; + +create trigger deleteassetLegacyIdMapping_tg + before delete on hs_hosting_asset + for each row + execute procedure deleteassetLegacyIdMapping_tf(); +--/ diff --git a/src/main/resources/db/changelog/db.changelog-master.yaml b/src/main/resources/db/changelog/db.changelog-master.yaml index 69072cf1..428daf4c 100644 --- a/src/main/resources/db/changelog/db.changelog-master.yaml +++ b/src/main/resources/db/changelog/db.changelog-master.yaml @@ -150,15 +150,17 @@ databaseChangeLog: - include: file: db/changelog/6-hs-booking/620-booking-project/6208-hs-booking-project-test-data.sql - include: - file: db/changelog/6-hs-booking/630-booking-item/6200-hs-booking-item.sql + file: db/changelog/6-hs-booking/630-booking-item/6300-hs-booking-item.sql - include: file: db/changelog/6-hs-booking/630-booking-item/6203-hs-booking-item-rbac.sql - include: - file: db/changelog/6-hs-booking/630-booking-item/6208-hs-booking-item-test-data.sql + file: db/changelog/6-hs-booking/630-booking-item/6308-hs-booking-item-test-data.sql - include: file: db/changelog/7-hs-hosting/701-hosting-asset/7010-hs-hosting-asset.sql - include: file: db/changelog/7-hs-hosting/701-hosting-asset/7013-hs-hosting-asset-rbac.sql + - include: + file: db/changelog/7-hs-hosting/701-hosting-asset/7016-hs-hosting-asset-migration.sql - include: file: db/changelog/7-hs-hosting/701-hosting-asset/7018-hs-hosting-asset-test-data.sql - include: diff --git a/src/test/java/net/hostsharing/hsadminng/hs/migration/BaseOfficeDataImport.java b/src/test/java/net/hostsharing/hsadminng/hs/migration/BaseOfficeDataImport.java index 62427802..c8f107f1 100644 --- a/src/test/java/net/hostsharing/hsadminng/hs/migration/BaseOfficeDataImport.java +++ b/src/test/java/net/hostsharing/hsadminng/hs/migration/BaseOfficeDataImport.java @@ -17,7 +17,6 @@ import net.hostsharing.hsadminng.hs.office.relation.HsOfficeRelation; import net.hostsharing.hsadminng.hs.office.relation.HsOfficeRelationRealEntity; import net.hostsharing.hsadminng.hs.office.relation.HsOfficeRelationType; import net.hostsharing.hsadminng.hs.office.sepamandate.HsOfficeSepaMandateEntity; -import net.hostsharing.hsadminng.rbac.object.BaseEntity; import org.apache.commons.lang3.ArrayUtils; import org.apache.commons.lang3.StringUtils; import org.junit.jupiter.api.BeforeAll; @@ -615,7 +614,7 @@ public abstract class BaseOfficeDataImport extends CsvDataImport { jpaAttempt.transacted(() -> { context(rbacSuperuser); contacts.forEach(this::persist); - updateLegacyIds(contacts, "hs_office_contact_legacy_id", "contact_id"); + updateLegacyIds(contacts, "hs_office_contact_legacy_id", "contact_id"); }).assertSuccessful(); jpaAttempt.transacted(() -> { @@ -699,24 +698,6 @@ public abstract class BaseOfficeDataImport extends CsvDataImport { assumeThat(partners.size()).isLessThanOrEqualTo(MAX_NUMBER_OF_TEST_DATA_PARTNERS); } - private void updateLegacyIds( - Map entities, - final String legacyIdTable, - final String legacyIdColumn) { - em.flush(); - entities.forEach((id, entity) -> em.createNativeQuery(""" - UPDATE ${legacyIdTable} - SET ${legacyIdColumn} = :legacyId - WHERE uuid = :uuid - """ - .replace("${legacyIdTable}", legacyIdTable) - .replace("${legacyIdColumn}", legacyIdColumn)) - .setParameter("legacyId", id) - .setParameter("uuid", entity.getUuid()) - .executeUpdate() - ); - } - @Test @Order(9999) @ContinueOnFailure diff --git a/src/test/java/net/hostsharing/hsadminng/hs/migration/CsvDataImport.java b/src/test/java/net/hostsharing/hsadminng/hs/migration/CsvDataImport.java index 7230cfff..66cfc5e7 100644 --- a/src/test/java/net/hostsharing/hsadminng/hs/migration/CsvDataImport.java +++ b/src/test/java/net/hostsharing/hsadminng/hs/migration/CsvDataImport.java @@ -334,6 +334,24 @@ public class CsvDataImport extends ContextBasedTest { errors.clear(); assertThat(errorsToLog).isEmpty(); } + + protected void updateLegacyIds( + Map entities, + final String legacyIdTable, + final String legacyIdColumn) { + em.flush(); + entities.forEach((id, entity) -> em.createNativeQuery(""" + UPDATE ${legacyIdTable} + SET ${legacyIdColumn} = :legacyId + WHERE uuid = :uuid + """ + .replace("${legacyIdTable}", legacyIdTable) + .replace("${legacyIdColumn}", legacyIdColumn)) + .setParameter("legacyId", id) + .setParameter("uuid", entity.getUuid()) + .executeUpdate() + ); + } } class Columns { diff --git a/src/test/java/net/hostsharing/hsadminng/hs/migration/ImportHostingAssets.java b/src/test/java/net/hostsharing/hsadminng/hs/migration/ImportHostingAssets.java index d3ed3407..e8d510d9 100644 --- a/src/test/java/net/hostsharing/hsadminng/hs/migration/ImportHostingAssets.java +++ b/src/test/java/net/hostsharing/hsadminng/hs/migration/ImportHostingAssets.java @@ -47,12 +47,12 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; import java.util.regex.Pattern; -import java.util.stream.Collectors; import static java.util.Arrays.stream; import static java.util.Map.entry; import static java.util.Map.ofEntries; import static java.util.Optional.ofNullable; +import static java.util.stream.Collectors.joining; import static java.util.stream.Collectors.toMap; import static java.util.stream.Collectors.toSet; import static net.hostsharing.hsadminng.hs.hosting.asset.HsHostingAssetType.CLOUD_SERVER; @@ -938,6 +938,132 @@ public class ImportHostingAssets extends BaseOfficeDataImport { @Test @Order(19930) + void verifyCloudServerLegacyIds() { + assumeThatWeAreImportingControlledTestData(); + assertThat(fetchHosingAssetLegacyIds(CLOUD_SERVER)).isEqualTo(""" + 23611 + """.trim()); + assertThat(missingHostingAsstLegacyIds(CLOUD_SERVER)).isEmpty(); + } + + @Test + @Order(19931) + void verifyManagedServerLegacyIds() { + assumeThatWeAreImportingControlledTestData(); + assertThat(fetchHosingAssetLegacyIds(MANAGED_SERVER)).isEqualTo(""" + 10968 + 10978 + 11061 + 11447 + """.trim()); + assertThat(missingHostingAsstLegacyIds(MANAGED_SERVER)).isEmpty(); + } + + @Test + @Order(19932) + void verifyManagedWebspaceLegacyIds() { + assumeThatWeAreImportingControlledTestData(); + assertThat(fetchHosingAssetLegacyIds(MANAGED_WEBSPACE)).isEqualTo(""" + 10630 + 11094 + 11111 + 11112 + 19959 + """.trim()); + assertThat(missingHostingAsstLegacyIds(MANAGED_WEBSPACE)).isEmpty(); + } + + @Test + @Order(19933) + void verifyUnixUserLegacyIds() { + assumeThatWeAreImportingControlledTestData(); + assertThat(fetchHosingAssetLegacyIds(UNIX_USER)).isEqualTo(""" + 5803 + 5805 + 5809 + 5811 + 5813 + 5835 + 5961 + 5964 + 5966 + 5990 + 6705 + 6824 + 7846 + 9546 + 9596 + """.trim()); + assertThat(missingHostingAsstLegacyIds(UNIX_USER)).isEmpty(); + } + + @Test + @Order(19934) + void verifyPgSqlDbLegacyIds() { + assumeThatWeAreImportingControlledTestData(); + assertThat(fetchHosingAssetLegacyIds(PGSQL_DATABASE)).isEqualTo(""" + 1077 + 1858 + 1860 + 4931 + 4932 + 7522 + 7523 + 7605 + """.trim()); + assertThat(missingHostingAsstLegacyIds(PGSQL_DATABASE)).isEmpty(); + } + + @Test + @Order(19934) + void verifyPgSqlUserLegacyIds() { + assumeThatWeAreImportingControlledTestData(); + assertThat(fetchHosingAssetLegacyIds(PGSQL_USER)).isEqualTo(""" + 1857 + 1859 + 1860 + 1861 + 4931 + 7522 + 7605 + """.trim()); + assertThat(missingHostingAsstLegacyIds(PGSQL_USER)).isEmpty(); + } + + @Test + @Order(19935) + void verifyMariaDbLegacyIds() { + assumeThatWeAreImportingControlledTestData(); + assertThat(fetchHosingAssetLegacyIds(MARIADB_DATABASE)).isEqualTo(""" + 1786 + 1805 + 4908 + 4941 + 4942 + 7520 + 7521 + 7604 + """.trim()); + assertThat(missingHostingAsstLegacyIds(MARIADB_DATABASE)).isEmpty(); + } + + @Test + @Order(19936) + void verifyMariaDbUserLegacyIds() { + assumeThatWeAreImportingControlledTestData(); + assertThat(fetchHosingAssetLegacyIds(MARIADB_USER)).isEqualTo(""" + 1858 + 4908 + 4909 + 4932 + 7520 + 7604 + """.trim()); + assertThat(missingHostingAsstLegacyIds(MARIADB_USER)).isEmpty(); + } + + @Test + @Order(19940) void verifyProjectAgentsCanViewEmailAddresses() { assumeThatWeAreImportingControlledTestData(); @@ -949,6 +1075,7 @@ public class ImportHostingAssets extends BaseOfficeDataImport { assertThat(haCount).isEqualTo(68); } + // ============================================================================================ @Test @@ -1006,6 +1133,11 @@ public class ImportHostingAssets extends BaseOfficeDataImport { } ).assertSuccessful() ); + + jpaAttempt.transacted(() -> { + context(rbacSuperuser); + updateLegacyIds(assets, "hs_hosting_asset_legacy_id", "legacy_id"); + }).assertSuccessful(); } private void verifyActuallyPersistedHostingAssetCount( @@ -1610,7 +1742,7 @@ public class ImportHostingAssets extends BaseOfficeDataImport { //noinspection unchecked zoneData.put("user-RR", ((ArrayList>) zoneData.get("user-RR")).stream() - .map(userRR -> userRR.stream().map(Object::toString).collect(Collectors.joining(" "))) + .map(userRR -> userRR.stream().map(Object::toString).collect(joining(" "))) .toArray(String[]::new) ); domainDnsSetupAsset.getConfig().putAll(zoneData); @@ -1758,4 +1890,35 @@ public class ImportHostingAssets extends BaseOfficeDataImport { protected static void assumeThatWeAreImportingControlledTestData() { assumeThat(isImportingControlledTestData()).isTrue(); } + + private String fetchHosingAssetLegacyIds(final HsHostingAssetType type) { + //noinspection unchecked + return ((List>) em.createNativeQuery( + """ + SELECT li.* FROM hs_hosting_asset_legacy_id li + JOIN hs_hosting_asset ha ON ha.uuid=li.uuid + WHERE CAST(ha.type AS text)=:type + ORDER BY legacy_id + """, + List.class) + .setParameter("type", type.name()) + .getResultList() + ).stream().map(row -> row.get(1).toString()).collect(joining("\n")); + } + + private String missingHostingAsstLegacyIds(final HsHostingAssetType type) { + //noinspection unchecked + return ((List>) em.createNativeQuery( + """ + SELECT ha.uuid, ha.type, ha.identifier FROM hs_hosting_asset ha + JOIN hs_hosting_asset_legacy_id li ON li.uuid=ha.uuid + WHERE li.legacy_id is null AND CAST(ha.type AS text)=:type + ORDER BY li.legacy_id + """, + List.class) + .setParameter("type", type.name()) + .getResultList()).stream() + .map(row -> row.stream().map(Object::toString).collect(joining(", "))) + .collect(joining("\n")); + } }