Merge branch 'RED-7384-fp' into 'master'

RED-7384: migration fixes forward port

Closes RED-7384

See merge request redactmanager/persistence-service!420
This commit is contained in:
Kilian Schüttler 2024-04-04 15:44:38 +02:00
commit 6525e0c2bc
12 changed files with 258 additions and 56 deletions

View File

@ -6,7 +6,7 @@ plugins {
jacoco
}
val redactionServiceVersion by rootProject.extra { "4.262.0" }
val redactionServiceVersion by rootProject.extra { "4.290.0" }
val pdftronRedactionServiceVersion by rootProject.extra { "4.48.0" }
val redactionReportServiceVersion by rootProject.extra { "4.47.0" }
val searchServiceVersion by rootProject.extra { "2.71.0" }
@ -49,7 +49,6 @@ tasks.named<Test>("test") {
tasks.test {
finalizedBy(tasks.jacocoTestReport) // report is always generated after tests run
}
tasks.jacocoTestReport {

View File

@ -40,10 +40,10 @@ public class CustomPermissionService {
public void syncAllCustomPermissions() {
log.info("Syncing all custom permissions");
log.debug("Syncing all custom permissions");
var targetObjects = getAllSupportedTargetObjects();
targetObjects.forEach(this::applyCustomPermissions);
log.info("All custom permissions synced");
log.debug("All custom permissions synced");
}

View File

@ -0,0 +1,83 @@
package com.iqser.red.service.persistence.management.v1.processor.migration;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collections;
import org.springframework.stereotype.Service;
import com.google.common.hash.HashFunction;
import com.google.common.hash.Hashing;
import com.iqser.red.service.persistence.management.v1.processor.entity.annotations.AnnotationEntityId;
import com.iqser.red.service.persistence.management.v1.processor.entity.annotations.ManualRedactionEntryEntity;
import com.iqser.red.service.persistence.management.v1.processor.model.ManualChangesQueryOptions;
import com.iqser.red.service.persistence.management.v1.processor.service.persistence.annotations.AddRedactionPersistenceService;
import com.iqser.red.service.persistence.management.v1.processor.service.persistence.annotations.RemoveRedactionPersistenceService;
import com.iqser.red.service.persistence.management.v1.processor.service.persistence.annotations.ResizeRedactionPersistenceService;
import com.iqser.red.service.persistence.service.v1.api.shared.model.dossiertemplate.type.DictionaryEntryType;
import lombok.RequiredArgsConstructor;
@Service
@RequiredArgsConstructor
public class SaasMigrationManualChangesUpdateService {
private final AddRedactionPersistenceService addRedactionPersistenceService;
private final HashFunction hashFunction = Hashing.murmur3_128();
public void convertUnprocessedAddToDictionariesToLocalChanges(String fileId) {
var unprocessedManualAdds = addRedactionPersistenceService.findEntriesByFileIdAndOptions(fileId, ManualChangesQueryOptions.unprocessedOnly());
for (var unprocessedManualAdd : unprocessedManualAdds) {
if (!unprocessedManualAdd.getDictionaryEntryType().equals(DictionaryEntryType.ENTRY)) {
continue;
}
if (unprocessedManualAdd.isAddToDictionary() || unprocessedManualAdd.isAddToAllDossiers()) {
// copy pending dict change to a new one with a different id. Can't reuse the same one, as it's the primary key of the table.
// It has no functionality, its only there, such that the UI can show a pending change.
ManualRedactionEntryEntity pendingDictAdd = ManualRedactionEntryEntity.builder()
.id(buildSecondaryId(unprocessedManualAdd.getId(), fileId))
.user(unprocessedManualAdd.getUser())
.typeId(unprocessedManualAdd.getTypeId())
.value(unprocessedManualAdd.getValue())
.reason(unprocessedManualAdd.getReason())
.legalBasis(unprocessedManualAdd.getLegalBasis())
.section(unprocessedManualAdd.getSection())
.rectangle(unprocessedManualAdd.isRectangle())
.addToDictionary(unprocessedManualAdd.isAddToDictionary())
.addToAllDossiers(unprocessedManualAdd.isAddToAllDossiers())
.dictionaryEntryType(DictionaryEntryType.ENTRY)
.requestDate(unprocessedManualAdd.getRequestDate())
.positions(new ArrayList<>(unprocessedManualAdd.getPositions())) // copy to new List
.fileStatus(unprocessedManualAdd.getFileStatus())
.textBefore(unprocessedManualAdd.getTextBefore())
.textAfter(unprocessedManualAdd.getTextAfter())
.sourceId(unprocessedManualAdd.getSourceId())
.typeIdsOfModifiedDictionaries(unprocessedManualAdd.getTypeIdsOfModifiedDictionaries())
.build();
addRedactionPersistenceService.update(pendingDictAdd);
// change existing dict add to unprocessed manual add. ID must match with prior entry, such that other unprocessed manual changes may be applied to it.
unprocessedManualAdd.setAddToDictionary(false);
unprocessedManualAdd.setAddToAllDossiers(false);
unprocessedManualAdd.setLegalBasis("");
unprocessedManualAdd.setTypeIdsOfModifiedDictionaries(Collections.emptySet());
unprocessedManualAdd.setDictionaryEntryType(null);
addRedactionPersistenceService.update(unprocessedManualAdd);
}
}
}
private AnnotationEntityId buildSecondaryId(AnnotationEntityId annotationEntityId, String fileId) {
return new AnnotationEntityId(hashFunction.hashString(annotationEntityId.getAnnotationId(), StandardCharsets.UTF_8).toString(), fileId);
}
}

View File

@ -1,24 +1,21 @@
package com.iqser.red.service.persistence.management.v1.processor.migration;
import static com.iqser.red.service.persistence.management.v1.processor.configuration.MessagingConfiguration.MIGRATION_QUEUE;
import static com.iqser.red.service.persistence.service.v1.api.shared.model.dossiertemplate.dossier.file.FileType.ENTITY_LOG;
import static com.knecon.fforesight.service.layoutparser.internal.api.queue.LayoutParsingQueueNames.LAYOUT_PARSING_REQUEST_QUEUE;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import org.springframework.amqp.rabbit.core.RabbitTemplate;
import org.springframework.stereotype.Service;
import com.iqser.red.service.persistence.management.v1.processor.entity.annotations.AnnotationEntityId;
import com.iqser.red.service.persistence.management.v1.processor.entity.dossier.FileEntity;
import com.iqser.red.service.persistence.management.v1.processor.entity.migration.SaasMigrationStatusEntity;
import com.iqser.red.service.persistence.management.v1.processor.exception.InternalServerErrorException;
import com.iqser.red.service.persistence.management.v1.processor.exception.NotFoundException;
import com.iqser.red.service.persistence.management.v1.processor.model.ManualChangesQueryOptions;
import com.iqser.red.service.persistence.management.v1.processor.service.CommentService;
import com.iqser.red.service.persistence.management.v1.processor.service.DossierService;
import com.iqser.red.service.persistence.management.v1.processor.service.IndexingService;
import com.iqser.red.service.persistence.management.v1.processor.service.job.AutomaticAnalysisJob;
@ -69,7 +66,9 @@ public class SaasMigrationService implements TenantSyncService {
SaasAnnotationIdMigrationService saasAnnotationIdMigrationService;
UncompressedFilesMigrationService uncompressedFilesMigrationService;
ManualRedactionService manualRedactionService;
CommentService commentService;
RankDeDuplicationService rankDeDuplicationService;
SaasMigrationManualChangesUpdateService saasMigrationManualChangesUpdateService;
@Override
@ -91,48 +90,29 @@ public class SaasMigrationService implements TenantSyncService {
log.info("Finished uncompressed files migration ...");
rankDeDuplicationService.deduplicate();
int numberOfFiles = 0;
var dossiers = dossierService.getAllDossiers()
.stream()
.filter(dossier -> dossier.getHardDeletedTime() == null)
.toList();
for (var dossier : dossiers) {
var files = fileStatusPersistenceService.getStatusesForDossier(dossier.getId())
.stream()
.filter(file -> file.getHardDeletedTime() == null)
.toList();
var files = saasMigrationStatusPersistenceService.findAll();
var migrationStati = saasMigrationStatusPersistenceService.findAll()
.stream()
.collect(Collectors.toMap(SaasMigrationStatusEntity::getFileId, SaasMigrationStatusEntity::getStatus));
for (var file : files) {
for (var file : files) {
if (notExistsOrError(file, migrationStati)) {
// delete NER_ENTITIES since offsets depend on old document structure.
storageService.deleteObject(TenantContext.getTenantId(), StorageIdUtils.getStorageId(dossier.getId(), file.getId(), FileType.NER_ENTITIES));
saasMigrationStatusPersistenceService.createMigrationRequiredStatus(dossier.getId(), file.getId());
var layoutParsingRequest = layoutParsingRequestFactory.build(dossier.getId(), file.getId(), false);
rabbitTemplate.convertAndSend(LAYOUT_PARSING_REQUEST_QUEUE, layoutParsingRequest);
numberOfFiles++;
} else {
log.info("Skipping file with id {} and dossier {}, since it is already migrated or is currently being migrated!", file.getId(), file.getDossierId());
}
if (!file.getStatus().equals(SaasMigrationStatus.MIGRATION_REQUIRED)) {
log.info("Skipping {} for tenant {} since migration status is {}", file.getFileId(), TenantContext.getTenantId(), file.getStatus());
continue;
}
// delete NER_ENTITIES since offsets depend on old document structure.
storageService.deleteObject(TenantContext.getTenantId(), StorageIdUtils.getStorageId(file.getDossierId(), file.getFileId(), FileType.NER_ENTITIES));
var layoutParsingRequest = layoutParsingRequestFactory.build(file.getDossierId(), file.getFileId(), false);
rabbitTemplate.convertAndSend(LAYOUT_PARSING_REQUEST_QUEUE, layoutParsingRequest);
numberOfFiles++;
}
log.info("Added {} documents for tenant {} to Layout-Parsing queue for saas migration", numberOfFiles, TenantContext.getTenantId());
if (numberOfFiles == 0) {
finalizeMigration();
}
}
private boolean notExistsOrError(FileEntity file, Map<String, SaasMigrationStatus> migrationStati) {
return migrationStati.getOrDefault(file.getId(), SaasMigrationStatus.ERROR).equals(SaasMigrationStatus.ERROR);
}
@ -161,24 +141,27 @@ public class SaasMigrationService implements TenantSyncService {
return;
}
log.info("Layout Parsing finished for saas migration for tenant {} dossier {} and file {}", TenantContext.getTenantId(), dossierId, fileId);
saasMigrationStatusPersistenceService.updateStatus(fileId, SaasMigrationStatus.DOCUMENT_FILES_MIGRATED);
if (fileStatusPersistenceService.getStatus(fileId).getWorkflowStatus().equals(WorkflowStatus.APPROVED)) {
manualRedactionProviderService.convertUnprocessedAddToDictionariesToLocalChanges(fileId);
saasMigrationManualChangesUpdateService.convertUnprocessedAddToDictionariesToLocalChanges(fileId);
}
try {
indexingService.reindex(dossierId, Set.of(fileId), false);
String dossierTemplateId = dossierService.getDossierById(dossierId).getDossierTemplateId();
rabbitTemplate.convertAndSend(MIGRATION_QUEUE,
MigrationRequest.builder()
.dossierTemplateId(dossierTemplateId)
.dossierId(dossierId)
.fileId(fileId)
.fileIsApproved(fileStatusPersistenceService.getStatus(fileId).getWorkflowStatus().equals(WorkflowStatus.APPROVED))
.manualRedactions(manualRedactionProviderService.getManualRedactions(fileId, ManualChangesQueryOptions.allWithoutDeleted()))
.entitiesWithComments(commentService.getCommentCounts(fileId).keySet())
.build());
log.info("Layout Parsing finished for saas migration for tenant {} dossier {} and file {}", TenantContext.getTenantId(), dossierId, fileId);
} catch (Exception e) {
log.error("Queuing of entityLog migration failed with {}", e.getMessage());
saasMigrationStatusPersistenceService.updateErrorStatus(fileId, String.format("Queuing of entityLog migration failed with %s", e.getMessage()));
@ -211,8 +194,9 @@ public class SaasMigrationService implements TenantSyncService {
private boolean entityLogMigrationFilesExist(String dossierId, String fileId) {
return storageService.objectExists(TenantContext.getTenantId(), StorageIdUtils.getStorageId(dossierId, fileId, ENTITY_LOG))
&& storageService.objectExists(TenantContext.getTenantId(), StorageIdUtils.getStorageId(dossierId, fileId, FileType.MIGRATED_IDS));
return storageService.objectExists(TenantContext.getTenantId(), StorageIdUtils.getStorageId(dossierId, fileId, FileType.ENTITY_LOG)) && storageService.objectExists(
TenantContext.getTenantId(),
StorageIdUtils.getStorageId(dossierId, fileId, FileType.MIGRATED_IDS));
}
@ -239,26 +223,38 @@ public class SaasMigrationService implements TenantSyncService {
List<ManualRedactionEntry> manualRedactionEntriesToAdd = migratedIds.getManualRedactionEntriesToAdd();
int count = addManualRedactionEntries(manualRedactionEntriesToAdd);
log.info("Added {} additional manual entries.", count);
deleteSectionGrid(dossierId, fileId);
deleteSectionGridAndNerEntitiesFiles(dossierId, fileId);
saasMigrationStatusPersistenceService.updateStatus(fileId, SaasMigrationStatus.FINISHED);
log.info("AnnotationIds migration finished for saas migration for tenant {} dossier {} and file {}", TenantContext.getTenantId(), dossierId, fileId);
finalizeMigration();
finalizeMigration(); // AutomaticAnalysisJob should be re-enabled by re-starting the persistence service pod after a rule change
}
private void deleteSectionGrid(String dossierId, String fileId) {
private void deleteSectionGridAndNerEntitiesFiles(String dossierId, String fileId) {
try {
storageService.deleteObject(TenantContext.getTenantId(), StorageIdUtils.getStorageId(dossierId, fileId, FileType.SECTION_GRID));
} catch (StorageObjectDoesNotExist e) {
log.info("No sectiongrid found for {}, {}, ignoring....", dossierId, fileId);
}
try {
storageService.deleteObject(TenantContext.getTenantId(), StorageIdUtils.getStorageId(dossierId, fileId, FileType.NER_ENTITIES));
} catch (StorageObjectDoesNotExist e) {
log.info("No ner entities file found for {}, {}, ignoring....", dossierId, fileId);
}
}
private int addManualRedactionEntries(List<ManualRedactionEntry> manualRedactionEntriesToAdd) {
manualRedactionEntriesToAdd.forEach(add -> {
if (add.getSection() != null && add.getSection().length() > 254) {
add.setSection(add.getSection().substring(0, 254));
}
});
return manualRedactionService.addManualRedactionEntries(manualRedactionEntriesToAdd, true);
}
@ -300,7 +296,7 @@ public class SaasMigrationService implements TenantSyncService {
private void finalizeMigration() {
if (saasMigrationStatusPersistenceService.countByStatus(SaasMigrationStatus.FINISHED) == saasMigrationStatusPersistenceService.countAll()) {
automaticAnalysisJob.startForTenant(TenantContext.getTenantId());
// automaticAnalysisJob.startForTenant(TenantContext.getTenantId());
tenantProvider.updateDetails(TenantContext.getTenantId(), UpdateDetailsRequest.builder().key("persistence-service-ready").value(true).build());
log.info("Saas migration finished for tenantId {}, re-enabled scheduler", TenantContext.getTenantId());
}

View File

@ -0,0 +1,89 @@
package com.iqser.red.service.persistence.management.v1.processor.migration.migrations;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import com.iqser.red.service.persistence.management.v1.processor.entity.dossier.FileEntity;
import com.iqser.red.service.persistence.management.v1.processor.migration.Migration;
import com.iqser.red.service.persistence.management.v1.processor.service.FileManagementStorageService;
import com.iqser.red.service.persistence.management.v1.processor.service.persistence.FileStatusPersistenceService;
import com.iqser.red.service.persistence.service.v1.api.shared.model.analysislog.entitylog.Position;
import com.iqser.red.service.persistence.service.v1.api.shared.model.analysislog.entitylog.imported.ImportedRedaction;
import com.iqser.red.service.persistence.service.v1.api.shared.model.analysislog.entitylog.imported.ImportedRedactions;
import com.iqser.red.service.persistence.service.v1.api.shared.model.analysislog.entitylog.imported.ImportedRedactionsPerPage;
import com.iqser.red.service.persistence.service.v1.api.shared.model.dossiertemplate.dossier.file.FileType;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
@Slf4j
@Setter
@Service
public class MigrateImportedRedactionsFiles17 extends Migration {
@Autowired
private FileStatusPersistenceService fileStatusPersistenceService;
@Autowired
private FileManagementStorageService fileManagementStorageService;
private static final String NAME = "Migrating the ImportedRedaction files to the new structure";
private static final long VERSION = 17;
public MigrateImportedRedactionsFiles17() {
super(NAME, VERSION);
}
@Override
protected void migrate() {
var files = fileStatusPersistenceService.getAllFiles();
for (FileEntity file : files) {
if (fileManagementStorageService.objectExists(file.getDossierId(), file.getId(), FileType.IMPORTED_REDACTIONS)) {
com.iqser.red.service.persistence.service.v1.api.shared.model.redactionlog.imported.ImportedRedactions oldImportedRedactions = fileManagementStorageService.getImportedRedactions(
file.getDossierId(),
file.getId());
fileManagementStorageService.deleteObject(file.getDossierId(), file.getId(), FileType.IMPORTED_REDACTIONS);
Map<Integer, List<ImportedRedaction>> importedRedactionsPerPage = oldImportedRedactions.getImportedRedactions().entrySet()
.stream()
.collect(Collectors.toMap(Map.Entry::getKey,
entry -> entry.getValue()
.stream()
.map(this::toImportedRedaction)
.toList()));
ImportedRedactionsPerPage importedRedactions = new ImportedRedactionsPerPage(importedRedactionsPerPage);
fileManagementStorageService.storeJSONObject(file.getDossierId(), file.getId(), FileType.IMPORTED_REDACTIONS, importedRedactions);
}
}
}
private ImportedRedaction toImportedRedaction(com.iqser.red.service.persistence.service.v1.api.shared.model.redactionlog.imported.ImportedRedaction importedRedaction) {
return ImportedRedaction.builder().id(importedRedaction.getId()).positions(mapPositions(importedRedaction)).build();
}
private static List<Position> mapPositions(com.iqser.red.service.persistence.service.v1.api.shared.model.redactionlog.imported.ImportedRedaction importedRedaction) {
return importedRedaction.getPositions()
.stream()
.map(rectangle -> new Position(rectangle.getTopLeft().getX(), rectangle.getTopLeft().getY(), rectangle.getWidth(), rectangle.getHeight(), rectangle.getPage()))
.toList();
}
}

View File

@ -57,7 +57,7 @@ public class KeyCloakUserSyncService {
// remove KC users, what's left is users that are in redaction but no longer in KC
redactionObjectsUserIds.removeAll(allUserIds);
log.info("Performing user sync/cleanup for ids: {}", redactionObjectsUserIds);
log.debug("Performing user sync/cleanup for ids: {}", redactionObjectsUserIds);
redactionObjectsUserIds.forEach(removedUser -> alLDossiers.forEach(dossier -> this.userService.updateDossierUsers(removedUser,
UserService.UserRemovalModel.PERMANENT,

View File

@ -132,7 +132,8 @@ public class ReanalysisRequiredStatusService {
var fullAnalysisRequired = !rulesVersionMatches || !componentRulesVersionMatches || !legalBasisVersionMatches;
if (reanalysisRequired || fullAnalysisRequired) {
log.info(
"For file: {} analysis is required because -> ruleVersionMatches: {}/{}, componentRuleVersionMatches {}/{}, dictionaryVersionMatches: {}/{}, legalBasisVersionMatches: {}/{}, dossierDictionaryVersionMatches: {}/{}",
"For file: {}-{} analysis is required because -> ruleVersionMatches: {}/{}, componentRuleVersionMatches {}/{}, dictionaryVersionMatches: {}/{}, legalBasisVersionMatches: {}/{}, dossierDictionaryVersionMatches: {}/{}",
fileStatus.getId(),
fileStatus.getFilename(),
fileStatus.getRulesVersion(),
dossierTemplateVersions.getOrDefault(RULES, -1L),

View File

@ -12,6 +12,7 @@ import org.springframework.stereotype.Service;
import com.iqser.red.service.persistence.management.v1.processor.configuration.MessagingConfiguration;
import com.iqser.red.service.persistence.management.v1.processor.service.FileStatusService;
import com.iqser.red.service.persistence.management.v1.processor.service.persistence.SaasMigrationStatusPersistenceService;
import com.iqser.red.service.persistence.management.v1.processor.settings.FileManagementServiceSettings;
import com.iqser.red.service.persistence.management.v1.processor.utils.TenantUtils;
import com.iqser.red.service.persistence.service.v1.api.shared.model.dossiertemplate.dossier.file.FileModel;
@ -34,6 +35,7 @@ public class AutomaticAnalysisJob implements Job {
private final FileStatusService fileStatusService;
private final TenantProvider tenantProvider;
private final ObservationRegistry observationRegistry;
private final SaasMigrationStatusPersistenceService saasMigrationStatusPersistenceService;
@Setter
private boolean schedulingStopped;
@ -52,13 +54,22 @@ public class AutomaticAnalysisJob implements Job {
tenantProvider.getTenants()
.forEach(tenant -> {
if (!TenantUtils.isTenantReadyForPersistence(tenant) || stoppedTenants.contains(tenant.getTenantId())) {
if (!TenantUtils.isTenantReadyForPersistence(tenant)) {
log.info("[Tenant:{}] Skipping scheduling since tenant is not ready.", tenant.getTenantId());
return;
}
if (stoppedTenants.contains(tenant.getTenantId())) {
log.info("[Tenant:{}] Skipping scheduling as automatic reanalysis is disabled for tenant.", tenant.getTenantId());
return;
}
TenantContext.setTenantId(tenant.getTenantId());
var redactionQueueInfo = amqpAdmin.getQueueInfo(MessagingConfiguration.REDACTION_QUEUE);
if (!saasMigrationStatusPersistenceService.migrationFinishedForTenant()) {
log.info("[Tenant:{}] Skipping scheduling as there are files that require migration.", tenant.getTenantId());
return;
}var redactionQueueInfo = amqpAdmin.getQueueInfo(MessagingConfiguration.REDACTION_QUEUE);
if (redactionQueueInfo != null) {
log.debug("[Tenant:{}] Checking queue status to see if background analysis can happen. Currently {} holds {} elements and has {} consumers",
tenant.getTenantId(),
@ -107,10 +118,10 @@ public class AutomaticAnalysisJob implements Job {
.observe(() -> {
if (file.isFullAnalysisRequired()) {
log.info("[Tenant:{}] Queued file: {} for automatic full analysis! ", TenantContext.getTenantId(), file.getFilename());
log.info("[Tenant:{}] Queued file: {} for automatic full analysis! ", TenantContext.getTenantId(), file.getId());
fileStatusService.setStatusFullReprocess(file.getDossierId(), file.getId(), false, false);
} else if (file.isReanalysisRequired()) {
log.info("[Tenant:{}] Queued file: {} for automatic reanalysis! ", TenantContext.getTenantId(), file.getFilename());
log.info("[Tenant:{}] Queued file: {} for automatic reanalysis! ", TenantContext.getTenantId(), file.getId());
fileStatusService.setStatusReprocess(file.getDossierId(), file.getId(), false);
}
});
@ -125,12 +136,14 @@ public class AutomaticAnalysisJob implements Job {
public void stopForTenant(String tenantId) {
log.info("Stopping automatic analysis for tenant {}", tenantId);
stoppedTenants.add(tenantId);
}
public void startForTenant(String tenantId) {
log.info("Starting automatic analysis for tenant {}", tenantId);
stoppedTenants.remove(tenantId);
}

View File

@ -42,6 +42,11 @@ public class SaasMigrationStatusPersistenceService {
return migrationStatusOptional.isPresent() && migrationStatusOptional.get().getStatus() != SaasMigrationStatus.FINISHED;
}
public boolean migrationFinishedForTenant() {
return saasMigrationStatusRepository.findAllWhereStatusNotFinishedAndNotError() == 0;
}
@Transactional
public void createMigrationRequiredStatus(String dossierId, String fileId) {

View File

@ -37,4 +37,8 @@ public interface SaasMigrationStatusRepository extends JpaRepository<SaasMigrati
@Query("select count(*) from SaasMigrationStatusEntity")
int countAll();
@Query("select count(*) from SaasMigrationStatusEntity e where e.status != 'FINISHED' and e.status != 'ERROR'")
int findAllWhereStatusNotFinishedAndNotError();
}

View File

@ -193,3 +193,5 @@ databaseChangeLog:
file: db/changelog/tenant/122-add-legal-basis-variables-to-recategorize.yaml
- include:
file: db/changelog/tenant/123-add-value-to-recategorize.yaml
- include:
file: db/changelog/tenant/124-create-migration-required-status-for-each-present-file.yaml

View File

@ -0,0 +1,10 @@
databaseChangeLog:
- changeSet:
id: create-migration-required-status-for-each-present-file
author: Kilian
changes:
- sql:
sql: |
INSERT INTO saas_migration_status (file_id, dossier_id, status)
SELECT id, dossier_id, 'MIGRATION_REQUIRED'
FROM file;