RED-7384: migration fixes forward port
* treat pending dicts in approved files for resize and remove in redaction-service * disable automatic analysis job using migration status table and liquibase migration * migrate imported redaction file * improve logging
This commit is contained in:
parent
06e0bba76c
commit
1fc74f2a0c
@ -6,7 +6,7 @@ plugins {
|
||||
jacoco
|
||||
}
|
||||
|
||||
val redactionServiceVersion by rootProject.extra { "4.262.0" }
|
||||
val redactionServiceVersion by rootProject.extra { "4.290.0" }
|
||||
val pdftronRedactionServiceVersion by rootProject.extra { "4.48.0" }
|
||||
val redactionReportServiceVersion by rootProject.extra { "4.47.0" }
|
||||
val searchServiceVersion by rootProject.extra { "2.71.0" }
|
||||
@ -49,7 +49,6 @@ tasks.named<Test>("test") {
|
||||
|
||||
tasks.test {
|
||||
finalizedBy(tasks.jacocoTestReport) // report is always generated after tests run
|
||||
|
||||
}
|
||||
|
||||
tasks.jacocoTestReport {
|
||||
|
||||
@ -40,10 +40,10 @@ public class CustomPermissionService {
|
||||
|
||||
public void syncAllCustomPermissions() {
|
||||
|
||||
log.info("Syncing all custom permissions");
|
||||
log.debug("Syncing all custom permissions");
|
||||
var targetObjects = getAllSupportedTargetObjects();
|
||||
targetObjects.forEach(this::applyCustomPermissions);
|
||||
log.info("All custom permissions synced");
|
||||
log.debug("All custom permissions synced");
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ -1,24 +1,21 @@
|
||||
package com.iqser.red.service.persistence.management.v1.processor.migration;
|
||||
|
||||
import static com.iqser.red.service.persistence.management.v1.processor.configuration.MessagingConfiguration.MIGRATION_QUEUE;
|
||||
import static com.iqser.red.service.persistence.service.v1.api.shared.model.dossiertemplate.dossier.file.FileType.ENTITY_LOG;
|
||||
import static com.knecon.fforesight.service.layoutparser.internal.api.queue.LayoutParsingQueueNames.LAYOUT_PARSING_REQUEST_QUEUE;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.springframework.amqp.rabbit.core.RabbitTemplate;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import com.iqser.red.service.persistence.management.v1.processor.entity.annotations.AnnotationEntityId;
|
||||
import com.iqser.red.service.persistence.management.v1.processor.entity.dossier.FileEntity;
|
||||
import com.iqser.red.service.persistence.management.v1.processor.entity.migration.SaasMigrationStatusEntity;
|
||||
import com.iqser.red.service.persistence.management.v1.processor.exception.InternalServerErrorException;
|
||||
import com.iqser.red.service.persistence.management.v1.processor.exception.NotFoundException;
|
||||
import com.iqser.red.service.persistence.management.v1.processor.model.ManualChangesQueryOptions;
|
||||
import com.iqser.red.service.persistence.management.v1.processor.service.CommentService;
|
||||
import com.iqser.red.service.persistence.management.v1.processor.service.DossierService;
|
||||
import com.iqser.red.service.persistence.management.v1.processor.service.IndexingService;
|
||||
import com.iqser.red.service.persistence.management.v1.processor.service.job.AutomaticAnalysisJob;
|
||||
@ -69,7 +66,9 @@ public class SaasMigrationService implements TenantSyncService {
|
||||
SaasAnnotationIdMigrationService saasAnnotationIdMigrationService;
|
||||
UncompressedFilesMigrationService uncompressedFilesMigrationService;
|
||||
ManualRedactionService manualRedactionService;
|
||||
CommentService commentService;
|
||||
RankDeDuplicationService rankDeDuplicationService;
|
||||
SaasMigrationManualChangesUpdateService saasMigrationManualChangesUpdateService;
|
||||
|
||||
|
||||
@Override
|
||||
@ -91,48 +90,29 @@ public class SaasMigrationService implements TenantSyncService {
|
||||
log.info("Finished uncompressed files migration ...");
|
||||
|
||||
rankDeDuplicationService.deduplicate();
|
||||
|
||||
int numberOfFiles = 0;
|
||||
var dossiers = dossierService.getAllDossiers()
|
||||
.stream()
|
||||
.filter(dossier -> dossier.getHardDeletedTime() == null)
|
||||
.toList();
|
||||
|
||||
for (var dossier : dossiers) {
|
||||
var files = fileStatusPersistenceService.getStatusesForDossier(dossier.getId())
|
||||
.stream()
|
||||
.filter(file -> file.getHardDeletedTime() == null)
|
||||
.toList();
|
||||
var files = saasMigrationStatusPersistenceService.findAll();
|
||||
|
||||
var migrationStati = saasMigrationStatusPersistenceService.findAll()
|
||||
.stream()
|
||||
.collect(Collectors.toMap(SaasMigrationStatusEntity::getFileId, SaasMigrationStatusEntity::getStatus));
|
||||
for (var file : files) {
|
||||
|
||||
for (var file : files) {
|
||||
if (notExistsOrError(file, migrationStati)) {
|
||||
// delete NER_ENTITIES since offsets depend on old document structure.
|
||||
storageService.deleteObject(TenantContext.getTenantId(), StorageIdUtils.getStorageId(dossier.getId(), file.getId(), FileType.NER_ENTITIES));
|
||||
saasMigrationStatusPersistenceService.createMigrationRequiredStatus(dossier.getId(), file.getId());
|
||||
var layoutParsingRequest = layoutParsingRequestFactory.build(dossier.getId(), file.getId(), false);
|
||||
rabbitTemplate.convertAndSend(LAYOUT_PARSING_REQUEST_QUEUE, layoutParsingRequest);
|
||||
numberOfFiles++;
|
||||
} else {
|
||||
log.info("Skipping file with id {} and dossier {}, since it is already migrated or is currently being migrated!", file.getId(), file.getDossierId());
|
||||
}
|
||||
if (!file.getStatus().equals(SaasMigrationStatus.MIGRATION_REQUIRED)) {
|
||||
log.info("Skipping {} for tenant {} since migration status is {}", file.getFileId(), TenantContext.getTenantId(), file.getStatus());
|
||||
continue;
|
||||
}
|
||||
|
||||
// delete NER_ENTITIES since offsets depend on old document structure.
|
||||
storageService.deleteObject(TenantContext.getTenantId(), StorageIdUtils.getStorageId(file.getDossierId(), file.getFileId(), FileType.NER_ENTITIES));
|
||||
|
||||
var layoutParsingRequest = layoutParsingRequestFactory.build(file.getDossierId(), file.getFileId(), false);
|
||||
|
||||
rabbitTemplate.convertAndSend(LAYOUT_PARSING_REQUEST_QUEUE, layoutParsingRequest);
|
||||
|
||||
numberOfFiles++;
|
||||
|
||||
}
|
||||
|
||||
log.info("Added {} documents for tenant {} to Layout-Parsing queue for saas migration", numberOfFiles, TenantContext.getTenantId());
|
||||
|
||||
if (numberOfFiles == 0) {
|
||||
finalizeMigration();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private boolean notExistsOrError(FileEntity file, Map<String, SaasMigrationStatus> migrationStati) {
|
||||
|
||||
return migrationStati.getOrDefault(file.getId(), SaasMigrationStatus.ERROR).equals(SaasMigrationStatus.ERROR);
|
||||
}
|
||||
|
||||
|
||||
@ -161,24 +141,27 @@ public class SaasMigrationService implements TenantSyncService {
|
||||
return;
|
||||
}
|
||||
|
||||
log.info("Layout Parsing finished for saas migration for tenant {} dossier {} and file {}", TenantContext.getTenantId(), dossierId, fileId);
|
||||
saasMigrationStatusPersistenceService.updateStatus(fileId, SaasMigrationStatus.DOCUMENT_FILES_MIGRATED);
|
||||
|
||||
if (fileStatusPersistenceService.getStatus(fileId).getWorkflowStatus().equals(WorkflowStatus.APPROVED)) {
|
||||
manualRedactionProviderService.convertUnprocessedAddToDictionariesToLocalChanges(fileId);
|
||||
saasMigrationManualChangesUpdateService.convertUnprocessedAddToDictionariesToLocalChanges(fileId);
|
||||
}
|
||||
|
||||
try {
|
||||
indexingService.reindex(dossierId, Set.of(fileId), false);
|
||||
|
||||
String dossierTemplateId = dossierService.getDossierById(dossierId).getDossierTemplateId();
|
||||
|
||||
rabbitTemplate.convertAndSend(MIGRATION_QUEUE,
|
||||
MigrationRequest.builder()
|
||||
.dossierTemplateId(dossierTemplateId)
|
||||
.dossierId(dossierId)
|
||||
.fileId(fileId)
|
||||
.fileIsApproved(fileStatusPersistenceService.getStatus(fileId).getWorkflowStatus().equals(WorkflowStatus.APPROVED))
|
||||
.manualRedactions(manualRedactionProviderService.getManualRedactions(fileId, ManualChangesQueryOptions.allWithoutDeleted()))
|
||||
.entitiesWithComments(commentService.getCommentCounts(fileId).keySet())
|
||||
.build());
|
||||
log.info("Layout Parsing finished for saas migration for tenant {} dossier {} and file {}", TenantContext.getTenantId(), dossierId, fileId);
|
||||
} catch (Exception e) {
|
||||
log.error("Queuing of entityLog migration failed with {}", e.getMessage());
|
||||
saasMigrationStatusPersistenceService.updateErrorStatus(fileId, String.format("Queuing of entityLog migration failed with %s", e.getMessage()));
|
||||
@ -211,8 +194,9 @@ public class SaasMigrationService implements TenantSyncService {
|
||||
|
||||
private boolean entityLogMigrationFilesExist(String dossierId, String fileId) {
|
||||
|
||||
return storageService.objectExists(TenantContext.getTenantId(), StorageIdUtils.getStorageId(dossierId, fileId, ENTITY_LOG))
|
||||
&& storageService.objectExists(TenantContext.getTenantId(), StorageIdUtils.getStorageId(dossierId, fileId, FileType.MIGRATED_IDS));
|
||||
return storageService.objectExists(TenantContext.getTenantId(), StorageIdUtils.getStorageId(dossierId, fileId, FileType.ENTITY_LOG)) && storageService.objectExists(
|
||||
TenantContext.getTenantId(),
|
||||
StorageIdUtils.getStorageId(dossierId, fileId, FileType.MIGRATED_IDS));
|
||||
}
|
||||
|
||||
|
||||
@ -239,26 +223,39 @@ public class SaasMigrationService implements TenantSyncService {
|
||||
List<ManualRedactionEntry> manualRedactionEntriesToAdd = migratedIds.getManualRedactionEntriesToAdd();
|
||||
int count = addManualRedactionEntries(manualRedactionEntriesToAdd);
|
||||
log.info("Added {} additional manual entries.", count);
|
||||
deleteSectionGrid(dossierId, fileId);
|
||||
deleteSectionGridAndNerEntitiesFiles(dossierId, fileId);
|
||||
saasMigrationStatusPersistenceService.updateStatus(fileId, SaasMigrationStatus.FINISHED);
|
||||
|
||||
log.info("AnnotationIds migration finished for saas migration for tenant {} dossier {} and file {}", TenantContext.getTenantId(), dossierId, fileId);
|
||||
finalizeMigration();
|
||||
// finalizeMigration(); // AutomaticAnalysisJob should be re-enabled by re-starting the persistence service pod after a rule change
|
||||
// This ensures no analysis will happen with outdated rules
|
||||
}
|
||||
|
||||
|
||||
private void deleteSectionGrid(String dossierId, String fileId) {
|
||||
private void deleteSectionGridAndNerEntitiesFiles(String dossierId, String fileId) {
|
||||
|
||||
try {
|
||||
storageService.deleteObject(TenantContext.getTenantId(), StorageIdUtils.getStorageId(dossierId, fileId, FileType.SECTION_GRID));
|
||||
} catch (StorageObjectDoesNotExist e) {
|
||||
log.info("No sectiongrid found for {}, {}, ignoring....", dossierId, fileId);
|
||||
}
|
||||
|
||||
try {
|
||||
storageService.deleteObject(TenantContext.getTenantId(), StorageIdUtils.getStorageId(dossierId, fileId, FileType.NER_ENTITIES));
|
||||
} catch (StorageObjectDoesNotExist e) {
|
||||
log.info("No ner entities file found for {}, {}, ignoring....", dossierId, fileId);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private int addManualRedactionEntries(List<ManualRedactionEntry> manualRedactionEntriesToAdd) {
|
||||
|
||||
manualRedactionEntriesToAdd.forEach(add -> {
|
||||
if (add.getSection() != null && add.getSection().length() > 254) {
|
||||
add.setSection(add.getSection().substring(0, 254));
|
||||
}
|
||||
});
|
||||
|
||||
return manualRedactionService.addManualRedactionEntries(manualRedactionEntriesToAdd, true);
|
||||
}
|
||||
|
||||
|
||||
@ -0,0 +1,84 @@
|
||||
package com.iqser.red.service.persistence.management.v1.processor.migration.migrations;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import com.iqser.red.service.persistence.management.v1.processor.entity.dossier.FileEntity;
|
||||
import com.iqser.red.service.persistence.management.v1.processor.migration.Migration;
|
||||
import com.iqser.red.service.persistence.management.v1.processor.service.FileManagementStorageService;
|
||||
import com.iqser.red.service.persistence.management.v1.processor.service.persistence.FileStatusPersistenceService;
|
||||
import com.iqser.red.service.persistence.service.v1.api.shared.model.analysislog.entitylog.Position;
|
||||
import com.iqser.red.service.persistence.service.v1.api.shared.model.analysislog.entitylog.imported.ImportedRedaction;
|
||||
import com.iqser.red.service.persistence.service.v1.api.shared.model.analysislog.entitylog.imported.ImportedRedactions;
|
||||
import com.iqser.red.service.persistence.service.v1.api.shared.model.dossiertemplate.dossier.file.FileType;
|
||||
|
||||
import lombok.Setter;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
|
||||
@Slf4j
|
||||
@Setter
|
||||
@Service
|
||||
public class MigrateImportedRedactionsFiles17 extends Migration {
|
||||
|
||||
@Autowired
|
||||
private FileStatusPersistenceService fileStatusPersistenceService;
|
||||
|
||||
@Autowired
|
||||
private FileManagementStorageService fileManagementStorageService;
|
||||
|
||||
private static final String NAME = "Migrating the ImportedRedaction files to the new structure";
|
||||
private static final long VERSION = 17;
|
||||
|
||||
|
||||
public MigrateImportedRedactionsFiles17() {
|
||||
|
||||
super(NAME, VERSION);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected void migrate() {
|
||||
|
||||
var files = fileStatusPersistenceService.getAllFiles();
|
||||
for (FileEntity file : files) {
|
||||
|
||||
if (fileManagementStorageService.objectExists(file.getDossierId(), file.getId(), FileType.IMPORTED_REDACTIONS)) {
|
||||
|
||||
com.iqser.red.service.persistence.service.v1.api.shared.model.redactionlog.imported.ImportedRedactions oldImportedRedactions = fileManagementStorageService.getImportedRedactions(
|
||||
file.getDossierId(),
|
||||
file.getId());
|
||||
|
||||
fileManagementStorageService.deleteObject(file.getDossierId(), file.getId(), FileType.IMPORTED_REDACTIONS);
|
||||
|
||||
List<ImportedRedaction> importedRedactionList = oldImportedRedactions.getImportedRedactions().values()
|
||||
.stream()
|
||||
.flatMap(Collection::stream)
|
||||
.map(this::toImportedRedaction)
|
||||
.toList();
|
||||
|
||||
ImportedRedactions importedRedactions = new ImportedRedactions(importedRedactionList);
|
||||
|
||||
fileManagementStorageService.storeJSONObject(file.getDossierId(), file.getId(), FileType.IMPORTED_REDACTIONS, importedRedactions);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private ImportedRedaction toImportedRedaction(com.iqser.red.service.persistence.service.v1.api.shared.model.redactionlog.imported.ImportedRedaction importedRedaction) {
|
||||
|
||||
return ImportedRedaction.builder().id(importedRedaction.getId()).positions(mapPositions(importedRedaction)).build();
|
||||
}
|
||||
|
||||
|
||||
private static List<Position> mapPositions(com.iqser.red.service.persistence.service.v1.api.shared.model.redactionlog.imported.ImportedRedaction importedRedaction) {
|
||||
|
||||
return importedRedaction.getPositions()
|
||||
.stream()
|
||||
.map(rectangle -> new Position(rectangle.getTopLeft().getX(), rectangle.getTopLeft().getY(), rectangle.getWidth(), rectangle.getHeight(), rectangle.getPage()))
|
||||
.toList();
|
||||
}
|
||||
|
||||
}
|
||||
@ -57,7 +57,7 @@ public class KeyCloakUserSyncService {
|
||||
// remove KC users, what's left is users that are in redaction but no longer in KC
|
||||
redactionObjectsUserIds.removeAll(allUserIds);
|
||||
|
||||
log.info("Performing user sync/cleanup for ids: {}", redactionObjectsUserIds);
|
||||
log.debug("Performing user sync/cleanup for ids: {}", redactionObjectsUserIds);
|
||||
|
||||
redactionObjectsUserIds.forEach(removedUser -> alLDossiers.forEach(dossier -> this.userService.updateDossierUsers(removedUser,
|
||||
UserService.UserRemovalModel.PERMANENT,
|
||||
|
||||
@ -132,7 +132,8 @@ public class ReanalysisRequiredStatusService {
|
||||
var fullAnalysisRequired = !rulesVersionMatches || !componentRulesVersionMatches || !legalBasisVersionMatches;
|
||||
if (reanalysisRequired || fullAnalysisRequired) {
|
||||
log.info(
|
||||
"For file: {} analysis is required because -> ruleVersionMatches: {}/{}, componentRuleVersionMatches {}/{}, dictionaryVersionMatches: {}/{}, legalBasisVersionMatches: {}/{}, dossierDictionaryVersionMatches: {}/{}",
|
||||
"For file: {}-{} analysis is required because -> ruleVersionMatches: {}/{}, componentRuleVersionMatches {}/{}, dictionaryVersionMatches: {}/{}, legalBasisVersionMatches: {}/{}, dossierDictionaryVersionMatches: {}/{}",
|
||||
fileStatus.getId(),
|
||||
fileStatus.getFilename(),
|
||||
fileStatus.getRulesVersion(),
|
||||
dossierTemplateVersions.getOrDefault(RULES, -1L),
|
||||
|
||||
@ -12,6 +12,7 @@ import org.springframework.stereotype.Service;
|
||||
|
||||
import com.iqser.red.service.persistence.management.v1.processor.configuration.MessagingConfiguration;
|
||||
import com.iqser.red.service.persistence.management.v1.processor.service.FileStatusService;
|
||||
import com.iqser.red.service.persistence.management.v1.processor.service.persistence.SaasMigrationStatusPersistenceService;
|
||||
import com.iqser.red.service.persistence.management.v1.processor.settings.FileManagementServiceSettings;
|
||||
import com.iqser.red.service.persistence.management.v1.processor.utils.TenantUtils;
|
||||
import com.iqser.red.service.persistence.service.v1.api.shared.model.dossiertemplate.dossier.file.FileModel;
|
||||
@ -34,6 +35,7 @@ public class AutomaticAnalysisJob implements Job {
|
||||
private final FileStatusService fileStatusService;
|
||||
private final TenantProvider tenantProvider;
|
||||
private final ObservationRegistry observationRegistry;
|
||||
private final SaasMigrationStatusPersistenceService saasMigrationStatusPersistenceService;
|
||||
|
||||
@Setter
|
||||
private boolean schedulingStopped;
|
||||
@ -52,13 +54,22 @@ public class AutomaticAnalysisJob implements Job {
|
||||
tenantProvider.getTenants()
|
||||
.forEach(tenant -> {
|
||||
|
||||
if (!TenantUtils.isTenantReadyForPersistence(tenant) || stoppedTenants.contains(tenant.getTenantId())) {
|
||||
if (!TenantUtils.isTenantReadyForPersistence(tenant)) {
|
||||
log.info("[Tenant:{}] Skipping scheduling since tenant is not ready.", tenant.getTenantId());
|
||||
return;
|
||||
}
|
||||
|
||||
if (stoppedTenants.contains(tenant.getTenantId())) {
|
||||
log.info("[Tenant:{}] Skipping scheduling as automatic reanalysis is disabled for tenant.", tenant.getTenantId());
|
||||
return;
|
||||
}
|
||||
|
||||
TenantContext.setTenantId(tenant.getTenantId());
|
||||
|
||||
var redactionQueueInfo = amqpAdmin.getQueueInfo(MessagingConfiguration.REDACTION_QUEUE);
|
||||
if (!saasMigrationStatusPersistenceService.migrationFinishedForTenant()) {
|
||||
log.info("[Tenant:{}] Skipping scheduling as there are files that require migration.", tenant.getTenantId());
|
||||
return;
|
||||
}var redactionQueueInfo = amqpAdmin.getQueueInfo(MessagingConfiguration.REDACTION_QUEUE);
|
||||
if (redactionQueueInfo != null) {
|
||||
log.debug("[Tenant:{}] Checking queue status to see if background analysis can happen. Currently {} holds {} elements and has {} consumers",
|
||||
tenant.getTenantId(),
|
||||
@ -107,10 +118,10 @@ public class AutomaticAnalysisJob implements Job {
|
||||
.observe(() -> {
|
||||
|
||||
if (file.isFullAnalysisRequired()) {
|
||||
log.info("[Tenant:{}] Queued file: {} for automatic full analysis! ", TenantContext.getTenantId(), file.getFilename());
|
||||
log.info("[Tenant:{}] Queued file: {} for automatic full analysis! ", TenantContext.getTenantId(), file.getId());
|
||||
fileStatusService.setStatusFullReprocess(file.getDossierId(), file.getId(), false, false);
|
||||
} else if (file.isReanalysisRequired()) {
|
||||
log.info("[Tenant:{}] Queued file: {} for automatic reanalysis! ", TenantContext.getTenantId(), file.getFilename());
|
||||
log.info("[Tenant:{}] Queued file: {} for automatic reanalysis! ", TenantContext.getTenantId(), file.getId());
|
||||
fileStatusService.setStatusReprocess(file.getDossierId(), file.getId(), false);
|
||||
}
|
||||
});
|
||||
@ -125,12 +136,14 @@ public class AutomaticAnalysisJob implements Job {
|
||||
|
||||
public void stopForTenant(String tenantId) {
|
||||
|
||||
log.info("Stopping automatic analysis for tenant {}", tenantId);
|
||||
stoppedTenants.add(tenantId);
|
||||
}
|
||||
|
||||
|
||||
public void startForTenant(String tenantId) {
|
||||
|
||||
log.info("Starting automatic analysis for tenant {}", tenantId);
|
||||
stoppedTenants.remove(tenantId);
|
||||
}
|
||||
|
||||
|
||||
@ -42,6 +42,11 @@ public class SaasMigrationStatusPersistenceService {
|
||||
return migrationStatusOptional.isPresent() && migrationStatusOptional.get().getStatus() != SaasMigrationStatus.FINISHED;
|
||||
}
|
||||
|
||||
public boolean migrationFinishedForTenant() {
|
||||
|
||||
return saasMigrationStatusRepository.findAllWhereStatusNotFinishedAndNotError() == 0;
|
||||
}
|
||||
|
||||
|
||||
@Transactional
|
||||
public void createMigrationRequiredStatus(String dossierId, String fileId) {
|
||||
|
||||
@ -37,4 +37,8 @@ public interface SaasMigrationStatusRepository extends JpaRepository<SaasMigrati
|
||||
@Query("select count(*) from SaasMigrationStatusEntity")
|
||||
int countAll();
|
||||
|
||||
|
||||
@Query("select count(*) from SaasMigrationStatusEntity e where e.status != 'FINISHED' and e.status != 'ERROR'")
|
||||
int findAllWhereStatusNotFinishedAndNotError();
|
||||
|
||||
}
|
||||
|
||||
@ -0,0 +1,10 @@
|
||||
databaseChangeLog:
|
||||
- changeSet:
|
||||
id: create-migration-required-status-for-each-present-file
|
||||
author: Kilian
|
||||
changes:
|
||||
- sql:
|
||||
sql: |
|
||||
INSERT INTO saas_migration_status (file_id, dossier_id, status)
|
||||
SELECT id, dossier_id, 'MIGRATION_REQUIRED'
|
||||
FROM file;
|
||||
Loading…
x
Reference in New Issue
Block a user