migration-hotfix: remove duplicate entries from entityLog if present #647
@ -6,6 +6,13 @@ plugins {
|
||||
val springBootStarterVersion = "3.1.5"
|
||||
val springCloudVersion = "4.0.4"
|
||||
|
||||
configurations {
|
||||
all {
|
||||
exclude(group = "commons-logging", module = "commons-logging")
|
||||
exclude(group = "org.springframework.boot", module = "spring-boot-starter-log4j2")
|
||||
exclude(group = "com.iqser.red.commons", module = "logging-commons")
|
||||
}
|
||||
}
|
||||
dependencies {
|
||||
api(project(":persistence-service-shared-api-v1"))
|
||||
api(project(":persistence-service-shared-mongo-v1"))
|
||||
|
||||
@ -1,8 +1,11 @@
|
||||
package com.iqser.red.service.persistence.management.v1.processor.migration;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
@ -13,6 +16,7 @@ import com.iqser.red.service.persistence.management.v1.processor.service.persist
|
||||
import com.iqser.red.service.persistence.management.v1.processor.service.persistence.repository.FileRepository;
|
||||
import com.iqser.red.service.persistence.management.v1.processor.utils.StorageIdUtils;
|
||||
import com.iqser.red.service.persistence.service.v1.api.shared.model.analysislog.entitylog.EntityLog;
|
||||
import com.iqser.red.service.persistence.service.v1.api.shared.model.analysislog.entitylog.EntityLogEntry;
|
||||
import com.iqser.red.service.persistence.service.v1.api.shared.model.dossiertemplate.dossier.file.FileType;
|
||||
import com.iqser.red.storage.commons.exception.StorageException;
|
||||
import com.iqser.red.storage.commons.exception.StorageObjectDoesNotExist;
|
||||
@ -49,9 +53,10 @@ public class StorageToMongoCopyService {
|
||||
log.info("Reading dossier {} file {} from storage", dossierFile.dossierId, dossierFile.fileId);
|
||||
Optional<EntityLog> entityLogFromStorage = getEntityLogFromStorageForMigration(dossierFile.dossierId, dossierFile.fileId);
|
||||
if (entityLogFromStorage.isPresent()) {
|
||||
EntityLog entityLog = removeDuplicates(entityLogFromStorage.get());
|
||||
log.info("File found, now saving in mongodb");
|
||||
fileManagementStorageService.deleteEntityLog(dossierFile.dossierId, dossierFile.fileId);
|
||||
fileManagementStorageService.insertEntityLog(dossierFile.dossierId, dossierFile.fileId, entityLogFromStorage.get());
|
||||
fileManagementStorageService.insertEntityLog(dossierFile.dossierId, dossierFile.fileId, entityLog);
|
||||
log.info("Deleting old file from storage");
|
||||
fileManagementStorageService.deleteObject(dossierFile.dossierId, dossierFile.fileId, FileType.ENTITY_LOG);
|
||||
}
|
||||
@ -59,6 +64,28 @@ public class StorageToMongoCopyService {
|
||||
}
|
||||
|
||||
|
||||
// shouldn't have happened, but it seems there are a few rare cases at knoell prod stack. We can't get any example files though.
|
||||
protected static EntityLog removeDuplicates(EntityLog entityLog) {
|
||||
|
||||
HashMap<String, EntityLogEntry> existingAnnotationIds = new HashMap<>();
|
||||
|
||||
int size = entityLog.getEntityLogEntry().size();
|
||||
List<EntityLogEntry> cleanedEntries = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
EntityLogEntry entry = entityLog.getEntityLogEntry()
|
||||
.get(i);
|
||||
if (!existingAnnotationIds.containsKey(entry.getId())) {
|
||||
cleanedEntries.add(entry);
|
||||
existingAnnotationIds.put(entry.getId(), entry);
|
||||
} else {
|
||||
log.warn("Duplicate entry found for id {} \nExisting: {}\n Duplicate: {}", entry.getId(), existingAnnotationIds.get(entry.getId()), entry);
|
||||
}
|
||||
}
|
||||
entityLog.setEntityLogEntry(cleanedEntries);
|
||||
return entityLog;
|
||||
}
|
||||
|
||||
|
||||
private Optional<EntityLog> getEntityLogFromStorageForMigration(String dossierId, String fileId) {
|
||||
|
||||
try {
|
||||
|
||||
@ -0,0 +1,48 @@
|
||||
package com.iqser.red.service.persistence.management.v1.processor.migration;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.*;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import com.iqser.red.service.persistence.service.v1.api.shared.model.analysislog.entitylog.EntityLog;
|
||||
import com.iqser.red.service.persistence.service.v1.api.shared.model.analysislog.entitylog.EntityLogEntry;
|
||||
|
||||
class StorageToMongoCopyServiceTest {
|
||||
|
||||
@Test
|
||||
public void testDeDuplication() {
|
||||
|
||||
EntityLog entityLog = new EntityLog();
|
||||
List<EntityLogEntry> entityLogEntries = new ArrayList<>();
|
||||
entityLogEntries.add(EntityLogEntry.builder().id("1").build());
|
||||
entityLogEntries.add(EntityLogEntry.builder().id("1").build());
|
||||
entityLogEntries.add(EntityLogEntry.builder().id("1").build());
|
||||
entityLogEntries.add(EntityLogEntry.builder().id("2").build());
|
||||
entityLogEntries.add(EntityLogEntry.builder().id("3").build());
|
||||
entityLogEntries.add(EntityLogEntry.builder().id("4").build());
|
||||
entityLog.setEntityLogEntry(entityLogEntries);
|
||||
|
||||
EntityLog cleanedEntityLog = StorageToMongoCopyService.removeDuplicates(entityLog);
|
||||
assertEquals(4, cleanedEntityLog.getEntityLogEntry().size());
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testDeDuplicationWithoutDuplicates() {
|
||||
|
||||
EntityLog entityLog = new EntityLog();
|
||||
List<EntityLogEntry> entityLogEntries = new ArrayList<>();
|
||||
entityLogEntries.add(EntityLogEntry.builder().id("1").build());
|
||||
entityLogEntries.add(EntityLogEntry.builder().id("2").build());
|
||||
entityLogEntries.add(EntityLogEntry.builder().id("3").build());
|
||||
entityLogEntries.add(EntityLogEntry.builder().id("4").build());
|
||||
entityLog.setEntityLogEntry(entityLogEntries);
|
||||
|
||||
EntityLog cleanedEntityLog = StorageToMongoCopyService.removeDuplicates(entityLog);
|
||||
assertEquals(4, cleanedEntityLog.getEntityLogEntry().size());
|
||||
}
|
||||
|
||||
}
|
||||
Loading…
x
Reference in New Issue
Block a user