diff --git a/Experimental/nbproject/project.xml b/Experimental/nbproject/project.xml index 26eeafb76c63bc86fa1b5fd9da212b914f444108..66e23443167b927c71fb2522aff132d55b0e276f 100644 --- a/Experimental/nbproject/project.xml +++ b/Experimental/nbproject/project.xml @@ -168,6 +168,7 @@ </module-dependencies> <public-packages> <package>org.sleuthkit.autopsy.experimental.autoingest</package> + <package>org.sleuthkit.autopsy.experimental.cleanup</package> <package>org.sleuthkit.autopsy.experimental.configuration</package> </public-packages> <class-path-extension> diff --git a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/AutoIngestManager.java b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/AutoIngestManager.java index d4f789a8619996480583c6a88d3fcaed3e469659..4a7ca274161ac9f14152cb562d8da0195948d75d 100644 --- a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/AutoIngestManager.java +++ b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/AutoIngestManager.java @@ -1,7 +1,7 @@ /* * Autopsy Forensic Browser * - * Copyright 2016-2021 Basis Technology Corp. + * Copyright 2016-2022 Basis Technology Corp. * Contact: carrier <at> sleuthkit <dot> org * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -37,12 +37,15 @@ import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.Date; import java.util.EnumSet; +import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.Observable; import java.util.Set; import java.util.UUID; @@ -73,6 +76,7 @@ import org.sleuthkit.autopsy.corecomponentinterfaces.DataSourceProcessorCallback.DataSourceProcessorResult; import static org.sleuthkit.autopsy.corecomponentinterfaces.DataSourceProcessorCallback.DataSourceProcessorResult.CRITICAL_ERRORS; import org.sleuthkit.autopsy.corecomponentinterfaces.DataSourceProcessorProgressMonitor; +import org.sleuthkit.autopsy.coreutils.FileUtil; import org.sleuthkit.autopsy.coreutils.Logger; import org.sleuthkit.autopsy.coreutils.NetworkUtils; import org.sleuthkit.autopsy.coreutils.ThreadUtils; @@ -96,6 +100,7 @@ import org.sleuthkit.autopsy.datasourceprocessors.DataSourceProcessorUtility; import org.sleuthkit.autopsy.experimental.autoingest.AutoIngestJob.AutoIngestJobException; import org.sleuthkit.autopsy.experimental.autoingest.AutoIngestNodeControlEvent.ControlEventType; +import org.sleuthkit.autopsy.experimental.cleanup.AutoIngestCleanup; import org.sleuthkit.autopsy.ingest.IngestJob; import org.sleuthkit.autopsy.ingest.IngestJob.CancellationReason; import org.sleuthkit.autopsy.ingest.IngestJobSettings; @@ -110,6 +115,7 @@ import org.sleuthkit.datamodel.SleuthkitCase; import org.sleuthkit.datamodel.TskCoreException; import org.sleuthkit.autopsy.keywordsearch.KeywordSearchJobSettings; +import org.sleuthkit.autopsy.progress.ProgressIndicator; /** * An auto ingest manager is responsible for processing auto ingest jobs defined @@ -219,6 +225,7 @@ private AutoIngestManager() { inputScanExecutor = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setNameFormat(INPUT_SCAN_THREAD_NAME).build()); jobProcessingExecutor = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setNameFormat(AUTO_INGEST_THREAD_NAME).build()); jobStatusPublishingExecutor = new ScheduledThreadPoolExecutor(1, new ThreadFactoryBuilder().setNameFormat(JOB_STATUS_PUBLISHING_THREAD_NAME).build()); + hostNamesToRunningJobs = new ConcurrentHashMap<>(); hostNamesToLastMsgTime = new ConcurrentHashMap<>(); jobsLock = new Object(); @@ -256,8 +263,9 @@ void startUp() throws AutoIngestManagerException { rootOutputDirectory = Paths.get(AutoIngestUserPreferences.getAutoModeResultsFolder()); inputScanSchedulingExecutor.scheduleWithFixedDelay(new InputDirScanSchedulingTask(), 0, AutoIngestUserPreferences.getMinutesOfInputScanInterval(), TimeUnit.MINUTES); jobProcessingTask = new JobProcessingTask(); - jobProcessingTaskFuture = jobProcessingExecutor.submit(jobProcessingTask); + jobProcessingTaskFuture = jobProcessingExecutor.submit(jobProcessingTask); jobStatusPublishingExecutor.scheduleWithFixedDelay(new PeriodicJobStatusEventTask(), JOB_STATUS_EVENT_INTERVAL_SECONDS, JOB_STATUS_EVENT_INTERVAL_SECONDS, TimeUnit.SECONDS); + eventPublisher.addSubscriber(EVENT_LIST, instance); state = State.RUNNING; @@ -1947,6 +1955,12 @@ private void processJobs() throws CoordinationServiceException, SharedConfigurat processJob(); } finally { manifestLock.release(); + + // force garbage collection to release file handles + System.gc(); + + // perform optional input and output directory cleanup + cleanup(); } if (jobProcessingTaskFuture.isCancelled()) { return; @@ -1959,6 +1973,110 @@ private void processJobs() throws CoordinationServiceException, SharedConfigurat } } + private void cleanup() { + try { + //discover the registered implementations of automated cleanup + Collection<? extends AutoIngestCleanup> cleanups + = Lookup.getDefault().lookupAll(AutoIngestCleanup.class); + + if (!cleanups.isEmpty()) { + AutoIngestCleanup cleanup = cleanups.iterator().next(); + + sysLogger.log(Level.INFO, "CleanupSchedulingTask - trying to get ingest job lock"); + // NOTE1: Make a copy of the completed jobs list. There is no need to hold the jobs + // lock during the entire very lengthy cleanup operation. Jobs lock is also used + // to process incoming messages from other nodes so we don't want to hold it for hours. + + // NOTE2: Create a map of cases and data sources, so that we only attempt to clean + // each case once. otherwise if there are many completed jobs for a case + // that has jobs being processed by other AINs, we will be stuck attemping to clean + // that case over and over again, and unable to get locks. + Map<Path, List<Path>> casesToJobsMap = new HashMap<>(); + synchronized (jobsLock) { + for (AutoIngestJob job : completedJobs) { + Path casePath = job.getCaseDirectoryPath(); + Path dsPath = job.getManifest().getDataSourcePath(); + + List<Path> list = casesToJobsMap.get(casePath); + if (list == null) { + list = new ArrayList<>(); + casesToJobsMap.put(casePath, list); + } + list.add(dsPath); + } + } + + sysLogger.log(Level.INFO, "CleanupSchedulingTask - got ingest job lock"); + String deletedCaseName = ""; + for (Map.Entry<Path, List<Path>> caseData : casesToJobsMap.entrySet()) { + // do cleanup for each case and data source of the case + Path casePath = caseData.getKey(); + boolean success = true; + if (casePath.toFile().exists()) { + sysLogger.log(Level.INFO, "Cleaning up case {0} ", casePath.toString()); + success = cleanup.runCleanupTask(casePath, AutoIngestCleanup.DeleteOptions.DELETE_INPUT_AND_OUTPUT, new DoNothingProgressIndicator()); + } else { + // case directory has been deleted. make sure data source is deleted as well + // because we will never be able to run automated cleanup on a case directory + // that has been deleted. + for (Path dsPath : caseData.getValue()) { + File dsFile = dsPath.toFile(); + if (dsFile.exists()) { + sysLogger.log(Level.INFO, "Cleaning up data source {0} for deleted case {1}", new Object[]{dsPath.toString(), casePath.toString()}); + if (!FileUtil.deleteFileDir(dsFile)) { + sysLogger.log(Level.SEVERE, String.format("Failed to delete data source file at %s ", dsPath.toString())); + } + } + } + } + + if (success) { + sysLogger.log(Level.INFO, "Cleanup task successfully completed for case: {0}", casePath.toString()); + } else { + sysLogger.log(Level.WARNING, "Cleanup task failed for case: {0}", casePath.toString()); + continue; + } + + // NOTE: the code below asumes that case directory and all data sources are being deleted + // during cleanup. This may not be the case in future implementations of AutoIngestCleanup + + // verify that the data source and case directory have indeed been deleted + for (Path dsPath : caseData.getValue()) { + if (dsPath.toFile().exists()) { + // data source have NOT ben deleted + sysLogger.log(Level.SEVERE, "Data source has not been deleted during cleanup: {0}", dsPath.toString()); + } + } + + if (casePath.toFile().exists()) { + // case output directory has NOT ben deleted, or at least some contents of the + // case directory remain + sysLogger.log(Level.SEVERE, "Case directory has not been deleted during cleanup: {0}", casePath.toString()); + } + + deletedCaseName = casePath.toString(); + } + + if (!deletedCaseName.isEmpty()) { + // send message that a at lease one case has been deleted. This message triggers input direcotry + // re-scan on other AINs so only send one message after all cleanup is complete. The actual + // case name is not relevant either and is not being tracked on the receiving side. + final String name = deletedCaseName; + new Thread(() -> { + eventPublisher.publishRemotely(new AutoIngestCaseDeletedEvent(LOCAL_HOST_NAME, name, + getSystemUserNameProperty())); + }).start(); + + // trigger input scan which will update the ZK nodes and tables + scanInputDirsNow(); + } + } + + } catch (Exception ex) { + sysLogger.log(Level.SEVERE, "Unexpected exception in CleanupSchedulingTask", ex); //NON-NLS + } + } + /** * Inspects the pending jobs queue, looking for the next job that is * ready for processing. If such a job is found, it is removed from the @@ -3100,6 +3218,44 @@ private JobMetricsCollectionException(String message, Throwable cause) { } } + } + + /** + * A progress monitor that does nothing. + */ + private class DoNothingProgressIndicator implements ProgressIndicator { + + @Override + public void start(String message, int totalWorkUnits) { + } + + @Override + public void start(String message) { + } + + @Override + public void switchToIndeterminate(String message) { + } + + @Override + public void switchToDeterminate(String message, int workUnitsCompleted, int totalWorkUnits) { + } + + @Override + public void progress(String message) { + } + + @Override + public void progress(int workUnitsCompleted) { + } + + @Override + public void progress(String message, int workUnitsCompleted) { + } + + @Override + public void finish() { + } } /** diff --git a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseAction.java b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseAction.java index d51e03d350833be3af4689df4aeb4b5f14468e2a..bc78ea337e65708882021d76d4fc2e6499f61622 100755 --- a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseAction.java +++ b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseAction.java @@ -23,6 +23,7 @@ import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData; import org.sleuthkit.autopsy.coreutils.MessageNotifyUtil; import org.sleuthkit.autopsy.progress.ProgressIndicator; +import org.sleuthkit.autopsy.experimental.cleanup.AutoIngestCleanup.DeleteOptions; /** * An action that completely deletes one or more multi-user cases. Only the @@ -69,6 +70,6 @@ public void actionPerformed(ActionEvent event) { @Override DeleteCaseTask getTask(CaseNodeData caseNodeData, ProgressIndicator progress) { - return new DeleteCaseTask(caseNodeData, DeleteCaseTask.DeleteOptions.DELETE_CASE, progress); + return new DeleteCaseTask(caseNodeData, DeleteOptions.DELETE_CASE, progress); } } diff --git a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseInputAction.java b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseInputAction.java index a3148fbf52fa728dd03ea8ea9d36df723bc01056..7d09401af3120f16c33414e50d41431580e376f4 100755 --- a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseInputAction.java +++ b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseInputAction.java @@ -22,7 +22,7 @@ import org.openide.util.NbBundle; import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData; import org.sleuthkit.autopsy.coreutils.MessageNotifyUtil; -import org.sleuthkit.autopsy.experimental.autoingest.DeleteCaseTask.DeleteOptions; +import org.sleuthkit.autopsy.experimental.cleanup.AutoIngestCleanup.DeleteOptions; import org.sleuthkit.autopsy.progress.ProgressIndicator; /** diff --git a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseInputAndOutputAction.java b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseInputAndOutputAction.java index c773c465e018a60348a05c8beac287b7a38a68c4..d1053c0311baaeb55cfaf4bbf532084a879a8987 100755 --- a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseInputAndOutputAction.java +++ b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseInputAndOutputAction.java @@ -23,7 +23,7 @@ import org.openide.util.NbBundle.Messages; import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData; import org.sleuthkit.autopsy.coreutils.MessageNotifyUtil; -import org.sleuthkit.autopsy.experimental.autoingest.DeleteCaseTask.DeleteOptions; +import org.sleuthkit.autopsy.experimental.cleanup.AutoIngestCleanup.DeleteOptions; import org.sleuthkit.autopsy.progress.ProgressIndicator; /** diff --git a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseOutputAction.java b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseOutputAction.java index cd6f6f9418c0777755672edee1e954a1669ade18..7970edb93b562c974e30339d07452391b707a6ab 100755 --- a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseOutputAction.java +++ b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseOutputAction.java @@ -22,7 +22,7 @@ import org.openide.util.NbBundle; import org.sleuthkit.autopsy.casemodule.multiusercases.CaseNodeData; import org.sleuthkit.autopsy.coreutils.MessageNotifyUtil; -import org.sleuthkit.autopsy.experimental.autoingest.DeleteCaseTask.DeleteOptions; +import org.sleuthkit.autopsy.experimental.cleanup.AutoIngestCleanup.DeleteOptions; import org.sleuthkit.autopsy.progress.ProgressIndicator; /** diff --git a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseTask.java b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseTask.java index e96d2cc5fdcd34e08dc07d6d61988af608d38de9..db9271e02b8e5d2848943f8ad8e39f5b45ae1313 100755 --- a/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseTask.java +++ b/Experimental/src/org/sleuthkit/autopsy/experimental/autoingest/DeleteCaseTask.java @@ -49,6 +49,7 @@ import org.sleuthkit.autopsy.progress.ProgressIndicator; import org.sleuthkit.autopsy.coreutils.Logger; import org.sleuthkit.autopsy.experimental.autoingest.AutoIngestJobNodeData.InvalidDataException; +import org.sleuthkit.autopsy.experimental.cleanup.AutoIngestCleanup.DeleteOptions; import org.sleuthkit.datamodel.DataSource; import org.sleuthkit.datamodel.Image; import org.sleuthkit.datamodel.TskCoreException; @@ -58,68 +59,65 @@ * directed to the dedicated auto ingest dashboard log instead of to the general * application log. */ -final class DeleteCaseTask implements Runnable { +public final class DeleteCaseTask implements Runnable { - private static final int MANIFEST_FILE_LOCKING_TIMEOUT_MINS = 5; + private static final int MANIFEST_FILE_LOCKING_TIMEOUT_MINS = 1; private static final int MANIFEST_DELETE_TRIES = 3; private static final Logger logger = AutoIngestDashboardLogger.getLogger(); private final CaseNodeData caseNodeData; private final DeleteOptions deleteOption; private final ProgressIndicator progress; - private final List<ManifestFileLock> manifestFileLocks; + private final boolean bestEffortDeletion; + private final boolean deleteDataSourceDirectories; + private final List<ManifestFileLock> manifestFileLocks = new ArrayList<>(); private CoordinationService coordinationService; private CaseMetadata caseMetadata; /** - * Options to support implementing different case deletion use cases. + * Constructs a task that deletes part or all of a given case.Note that all + * logging is directed to the dedicated auto ingest dashboard log instead of + * to the general application log. + * + * @param caseNodeData The case directory coordination service node data for + * the case. + * @param deleteOption The deletion option for the task. + * @param progress A progress indicator. + * @param bestEffortDeletion A flag whether manifest/input deletion is "best + * effort". If the flag is set to true, then we will delete all manifest + * files for which we are able to get exclusive lock, and leave behind all + * manifest files and data sources for which we were unable to get exclusive + * lock. If the flag is set to false, then the algorithm will abort and exit + * unless it is able to get exclusive lock on all of the manifest files. + * @param deleteDataSourceDirectories A flag whether to delete the directory + * containing each data source. If the flag is false, only the data source + * file will be deleted. If the flag is true, The entire directory + * containing the data source will be deleted. */ - enum DeleteOptions { - /** - * Delete the auto ingest job manifests and corresponding data sources, - * while leaving the manifest file coordination service nodes and the - * rest of the case intact. The use case is freeing auto ingest input - * directory space while retaining the option to restore the data - * sources, effectively restoring the case. - */ - DELETE_INPUT, - /** - * Delete the manifest file coordination service nodes and the output - * for a case, while leaving the auto ingest job manifests and - * corresponding data sources intact. The use case is auto ingest - * reprocessing of a case with a clean slate without having to restore - * the manifests and data sources. - */ - DELETE_OUTPUT, - /** - * Delete everything. - */ - DELETE_INPUT_AND_OUTPUT, - /** - * Delete only the case components that the application created. This is - * DELETE_OUTPUT with the additional feature that manifest file - * coordination service nodes are marked as deleted, rather than - * actually deleted. This eliminates the requirement that manifests and - * data sources have to be deleted before deleting the case to avoid an - * unwanted, automatic reprocessing of the case. - */ - DELETE_CASE + public DeleteCaseTask(CaseNodeData caseNodeData, DeleteOptions deleteOption, ProgressIndicator progress, + boolean bestEffortDeletion, boolean deleteDataSourceDirectories) { + this.caseNodeData = caseNodeData; + this.deleteOption = deleteOption; + this.progress = progress; + this.bestEffortDeletion = bestEffortDeletion; + this.deleteDataSourceDirectories = deleteDataSourceDirectories; } - + /** - * Constructs a task that deletes part or all of a given case. Note that all + * Constructs a task that deletes part or all of a given case.Note that all * logging is directed to the dedicated auto ingest dashboard log instead of * to the general application log. * * @param caseNodeData The case directory coordination service node data for - * the case. + * the case. * @param deleteOption The deletion option for the task. - * @param progress A progress indicator. + * @param progress A progress indicator. */ - DeleteCaseTask(CaseNodeData caseNodeData, DeleteOptions deleteOption, ProgressIndicator progress) { + public DeleteCaseTask(CaseNodeData caseNodeData, DeleteOptions deleteOption, ProgressIndicator progress) { this.caseNodeData = caseNodeData; this.deleteOption = deleteOption; this.progress = progress; - manifestFileLocks = new ArrayList<>(); + this.bestEffortDeletion = false; //abort and exit unless we get exclusive lock on all of the manifest files. + this.deleteDataSourceDirectories = false; // only data source files will be deleted } @Override @@ -177,7 +175,7 @@ public void run() { }) private void deleteCase() throws CoordinationServiceException, IOException, InterruptedException { progress.progress(Bundle.DeleteCaseTask_progress_connectingToCoordSvc()); - logger.log(Level.INFO, String.format("Connecting to the coordination service for deletion of %s", caseNodeData.getDisplayName())); + //logger.log(Level.INFO, String.format("Connecting to the coordination service for deletion of %s", caseNodeData.getDisplayName())); coordinationService = CoordinationService.getInstance(); checkForCancellation(); @@ -212,7 +210,7 @@ private void deleteCase() throws CoordinationServiceException, IOException, Inte * case while it is being deleted. */ progress.progress(Bundle.DeleteCaseTask_progress_acquiringCaseDirLock()); - logger.log(Level.INFO, String.format("Acquiring an exclusive case directory lock for %s", caseNodeData.getDisplayName())); + //logger.log(Level.INFO, String.format("Acquiring an exclusive case directory lock for %s", caseNodeData.getDisplayName())); String caseDirLockName = CoordinationServiceUtils.getCaseDirectoryNodePath(caseNodeData.getDirectory()); try (CoordinationService.Lock caseDirLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.CASES, caseDirLockName)) { if (caseDirLock == null) { @@ -375,7 +373,7 @@ private boolean acquireManifestFileLocks() throws IOException, CoordinationServi logger.log(Level.INFO, String.format("Found %d manifest file path(s) for %s", manifestFilePaths.size(), caseNodeData.getDisplayName())); if (!manifestFilePaths.isEmpty()) { progress.progress(Bundle.DeleteCaseTask_progress_acquiringManifestLocks()); - logger.log(Level.INFO, String.format("Acquiring exclusive manifest file locks for %s", caseNodeData.getDisplayName())); + //logger.log(Level.INFO, String.format("Acquiring exclusive manifest file locks for %s", caseNodeData.getDisplayName())); /* * When acquiring the locks, it is reasonable to block briefly, * since the auto ingest node (AIN) input directory scanning tasks @@ -390,15 +388,17 @@ private boolean acquireManifestFileLocks() throws IOException, CoordinationServi for (Path manifestPath : manifestFilePaths) { checkForCancellation(); progress.progress(Bundle.DeleteCaseTask_progress_lockingManifest(manifestPath.toString())); - logger.log(Level.INFO, String.format("Exclusively locking the manifest %s for %s", manifestPath, caseNodeData.getDisplayName())); + //logger.log(Level.INFO, String.format("Exclusively locking the manifest %s for %s", manifestPath, caseNodeData.getDisplayName())); CoordinationService.Lock manifestLock = coordinationService.tryGetExclusiveLock(CoordinationService.CategoryNode.MANIFESTS, manifestPath.toString(), MANIFEST_FILE_LOCKING_TIMEOUT_MINS, TimeUnit.MINUTES); if (null != manifestLock) { manifestFileLocks.add(new ManifestFileLock(manifestPath, manifestLock)); } else { - logger.log(Level.INFO, String.format("Failed to exclusively lock the manifest %s because it was already held by another host", manifestPath, caseNodeData.getDisplayName())); - allLocksAcquired = false; - releaseManifestFileLocks(); - break; + logger.log(Level.WARNING, String.format("Failed to exclusively lock the manifest %s because it was already held by another host", manifestPath, caseNodeData.getDisplayName())); + if (!bestEffortDeletion) { + allLocksAcquired = false; + releaseManifestFileLocks(); + break; + } } } } catch (CoordinationServiceException | InterruptedException ex) { @@ -422,7 +422,7 @@ private void deleteCaseContents() throws InterruptedException { final File caseDirectory = caseNodeData.getDirectory().toFile(); if (caseDirectory.exists()) { progress.progress(Bundle.DeleteCaseTask_progress_openingCaseMetadataFile()); - logger.log(Level.INFO, String.format("Opening case metadata file for %s", caseNodeData.getDisplayName())); + //logger.log(Level.INFO, String.format("Opening case metadata file for %s", caseNodeData.getDisplayName())); Path caseMetadataPath = CaseMetadata.getCaseMetadataFilePath(caseNodeData.getDirectory()); if (caseMetadataPath != null) { try { @@ -470,7 +470,7 @@ private void deleteAutoIngestInput() throws InterruptedException { SleuthkitCase caseDb = null; try { progress.progress(Bundle.DeleteCaseTask_progress_openingCaseDatabase()); - logger.log(Level.INFO, String.format("Opening the case database for %s", caseNodeData.getDisplayName())); + //logger.log(Level.INFO, String.format("Opening the case database for %s", caseNodeData.getDisplayName())); caseDb = SleuthkitCase.openCase(caseMetadata.getCaseDatabaseName(), UserPreferences.getDatabaseConnectionInfo(), caseMetadata.getCaseDirectory()); List<DataSource> dataSources = caseDb.getDataSources(); checkForCancellation(); @@ -493,6 +493,13 @@ private void deleteAutoIngestInput() throws InterruptedException { } else { allInputDeleted = false; } + + if (deleteDataSourceDirectories) { + File parentDir = manifestFile.getParentFile(); + if (parentDir.exists() && !FileUtil.deleteFileDir(parentDir)) { + logger.log(Level.WARNING, String.format("Failed to delete data source directory %s for %s", parentDir.toString(), caseNodeData.getDisplayName())); + } + } } else { logger.log(Level.WARNING, String.format("Failed to parse manifest file %s for %s", manifestFilePath, caseNodeData.getDisplayName())); allInputDeleted = false; @@ -563,7 +570,7 @@ private boolean deleteManifestFile(File manifestFile) throws InterruptedExceptio */ Path manifestFilePath = manifestFile.toPath(); progress.progress(Bundle.DeleteCaseTask_progress_deletingManifest(manifestFilePath)); - logger.log(Level.INFO, String.format("Deleting manifest file %s for %s", manifestFilePath, caseNodeData.getDisplayName())); + //logger.log(Level.INFO, String.format("Deleting manifest file %s for %s", manifestFilePath, caseNodeData.getDisplayName())); int tries = 0; boolean deleted = false; while (!deleted && tries < MANIFEST_DELETE_TRIES) { @@ -697,7 +704,7 @@ private void markManifestFileNodesAsDeleted() throws InterruptedException { private void deleteCaseResourcesNode() throws InterruptedException { if (deleteOption == DeleteOptions.DELETE_OUTPUT || deleteOption == DeleteOptions.DELETE_INPUT_AND_OUTPUT || deleteOption == DeleteOptions.DELETE_CASE) { progress.progress(Bundle.DeleteCaseTask_progress_deletingResourcesLockNode()); - logger.log(Level.INFO, String.format("Deleting case resources log znode for %s", caseNodeData.getDisplayName())); + //logger.log(Level.INFO, String.format("Deleting case resources log znode for %s", caseNodeData.getDisplayName())); String resourcesNodePath = CoordinationServiceUtils.getCaseResourcesNodePath(caseNodeData.getDirectory()); try { coordinationService.deleteNode(CategoryNode.CASES, resourcesNodePath); @@ -719,7 +726,7 @@ private void deleteCaseResourcesNode() throws InterruptedException { private void deleteCaseAutoIngestLogNode() throws InterruptedException { if (deleteOption == DeleteOptions.DELETE_OUTPUT || deleteOption == DeleteOptions.DELETE_INPUT_AND_OUTPUT || deleteOption == DeleteOptions.DELETE_CASE) { progress.progress(Bundle.DeleteCaseTask_progress_deletingJobLogLockNode()); - logger.log(Level.INFO, String.format("Deleting case auto ingest job log znode for %s", caseNodeData.getDisplayName())); + //logger.log(Level.INFO, String.format("Deleting case auto ingest job log znode for %s", caseNodeData.getDisplayName())); String logFilePath = CoordinationServiceUtils.getCaseAutoIngestLogNodePath(caseNodeData.getDirectory()); try { coordinationService.deleteNode(CategoryNode.CASES, logFilePath); @@ -753,7 +760,7 @@ private void deleteCaseDirectoryNode() throws InterruptedException { && caseNodeData.isDeletedFlagSet(CaseNodeData.DeletedFlags.CASE_DIR) && caseNodeData.isDeletedFlagSet(CaseNodeData.DeletedFlags.MANIFEST_FILE_NODES))) { progress.progress(Bundle.DeleteCaseTask_progress_deletingCaseDirCoordSvcNode()); - logger.log(Level.INFO, String.format("Deleting case directory znode for %s", caseNodeData.getDisplayName())); + //logger.log(Level.INFO, String.format("Deleting case directory znode for %s", caseNodeData.getDisplayName())); String caseDirNodePath = CoordinationServiceUtils.getCaseDirectoryNodePath(caseNodeData.getDirectory()); try { coordinationService.deleteNode(CategoryNode.CASES, caseDirNodePath); @@ -773,7 +780,7 @@ private void deleteCaseDirectoryNode() throws InterruptedException { private void deleteCaseNameNode() throws InterruptedException { if (deleteOption == DeleteOptions.DELETE_OUTPUT || deleteOption == DeleteOptions.DELETE_INPUT_AND_OUTPUT || deleteOption == DeleteOptions.DELETE_CASE) { progress.progress(Bundle.DeleteCaseTask_progress_deletingCaseNameCoordSvcNode()); - logger.log(Level.INFO, String.format("Deleting case name znode for %s", caseNodeData.getDisplayName())); + //logger.log(Level.INFO, String.format("Deleting case name znode for %s", caseNodeData.getDisplayName())); try { String caseNameLockNodeName = CoordinationServiceUtils.getCaseNameNodePath(caseNodeData.getDirectory()); coordinationService.deleteNode(CategoryNode.CASES, caseNameLockNodeName); @@ -827,11 +834,11 @@ private void deleteManifestFileNodes() throws InterruptedException { String manifestFilePath = manifestFileLock.getManifestFilePath().toString(); try { progress.progress(Bundle.DeleteCaseTask_progress_releasingManifestLock(manifestFilePath)); - logger.log(Level.INFO, String.format("Releasing the lock on the manifest file %s for %s", manifestFilePath, caseNodeData.getDisplayName())); + //logger.log(Level.INFO, String.format("Releasing the lock on the manifest file %s for %s", manifestFilePath, caseNodeData.getDisplayName())); manifestFileLock.release(); if (manifestFileLock.isInputDeleted()) { progress.progress(Bundle.DeleteCaseTask_progress_deletingManifestFileNode(manifestFilePath)); - logger.log(Level.INFO, String.format("Deleting the manifest file znode for %s for %s", manifestFilePath, caseNodeData.getDisplayName())); + //logger.log(Level.INFO, String.format("Deleting the manifest file znode for %s for %s", manifestFilePath, caseNodeData.getDisplayName())); coordinationService.deleteNode(CoordinationService.CategoryNode.MANIFESTS, manifestFilePath); } else { allINodesDeleted = false; diff --git a/Experimental/src/org/sleuthkit/autopsy/experimental/cleanup/AutoIngestCleanup.java b/Experimental/src/org/sleuthkit/autopsy/experimental/cleanup/AutoIngestCleanup.java new file mode 100755 index 0000000000000000000000000000000000000000..e6c734175ed8ed1405040a56a5e9eb04ee3c467d --- /dev/null +++ b/Experimental/src/org/sleuthkit/autopsy/experimental/cleanup/AutoIngestCleanup.java @@ -0,0 +1,75 @@ +/* + * Autopsy Forensic Browser + * + * Copyright 2022 Basis Technology Corp. + * Contact: carrier <at> sleuthkit <dot> org + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.sleuthkit.autopsy.experimental.cleanup; + +import java.nio.file.Path; +import org.sleuthkit.autopsy.progress.ProgressIndicator; + +/** + * Interface to perform automated cleanup of auto ingest input and output directories, + * as well as ZK nodes. + */ +public interface AutoIngestCleanup { + + /** + * Options to support implementing different case deletion use cases. + */ + public enum DeleteOptions { + /** + * Delete the auto ingest job manifests and corresponding data sources, + * while leaving the manifest file coordination service nodes and the + * rest of the case intact. The use case is freeing auto ingest input + * directory space while retaining the option to restore the data + * sources, effectively restoring the case. + */ + DELETE_INPUT, + /** + * Delete the manifest file coordination service nodes and the output + * for a case, while leaving the auto ingest job manifests and + * corresponding data sources intact. The use case is auto ingest + * reprocessing of a case with a clean slate without having to restore + * the manifests and data sources. + */ + DELETE_OUTPUT, + /** + * Delete everything. + */ + DELETE_INPUT_AND_OUTPUT, + /** + * Delete only the case components that the application created. This is + * DELETE_OUTPUT with the additional feature that manifest file + * coordination service nodes are marked as deleted, rather than + * actually deleted. This eliminates the requirement that manifests and + * data sources have to be deleted before deleting the case to avoid an + * unwanted, automatic reprocessing of the case. + */ + DELETE_CASE + } + + /** + * Performs auto ingest cleanup. For example, deletes input data sources, + * output directory, manifest file, and ZK nodes. + * + * @param caseOutputDirectoryPath Path to case output directory. + * @param deleteOption Cleanup options. + * @param progress Progress indicator. + * @return True if cleanup completed successfully, false otherwise. + */ + boolean runCleanupTask(Path caseOutputDirectoryPath, DeleteOptions deleteOption, ProgressIndicator progress); +}