diff --git a/Core/manifest.mf b/Core/manifest.mf index 31bfec73dea8aaed44fe9231734a0b3f8bd318e5..7aa34c46dc4817251b3903ae1dca94496d70fe05 100644 --- a/Core/manifest.mf +++ b/Core/manifest.mf @@ -1,10 +1,10 @@ -Manifest-Version: 1.0 -OpenIDE-Module: org.sleuthkit.autopsy.core/9 -OpenIDE-Module-Localizing-Bundle: org/sleuthkit/autopsy/core/Bundle.properties -OpenIDE-Module-Layer: org/sleuthkit/autopsy/core/layer.xml -OpenIDE-Module-Implementation-Version: 9 -OpenIDE-Module-Requires: org.openide.windows.WindowManager, org.netbeans.api.javahelp.Help -AutoUpdate-Show-In-Client: true -AutoUpdate-Essential-Module: true -OpenIDE-Module-Install: org/sleuthkit/autopsy/core/Installer.class - +Manifest-Version: 1.0 +OpenIDE-Module: org.sleuthkit.autopsy.core/9 +OpenIDE-Module-Localizing-Bundle: org/sleuthkit/autopsy/core/Bundle.properties +OpenIDE-Module-Layer: org/sleuthkit/autopsy/core/layer.xml +OpenIDE-Module-Implementation-Version: 9 +OpenIDE-Module-Requires: org.openide.windows.WindowManager, org.netbeans.api.javahelp.Help +AutoUpdate-Show-In-Client: true +AutoUpdate-Essential-Module: true +OpenIDE-Module-Install: org/sleuthkit/autopsy/core/Installer.class + diff --git a/Core/src/org/sleuthkit/autopsy/datamodel/DirectoryNode.java b/Core/src/org/sleuthkit/autopsy/datamodel/DirectoryNode.java index 3c4d33c253d3a9993e4a52309fcf58413676a361..f859362757f7d654aec5814d26accfd00bb26ed9 100755 --- a/Core/src/org/sleuthkit/autopsy/datamodel/DirectoryNode.java +++ b/Core/src/org/sleuthkit/autopsy/datamodel/DirectoryNode.java @@ -1,97 +1,97 @@ -/* - * Autopsy Forensic Browser - * - * Copyright 2011 Basis Technology Corp. - * Contact: carrier <at> sleuthkit <dot> org - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.sleuthkit.autopsy.datamodel; - -import java.util.ArrayList; -import java.util.List; -import javax.swing.Action; -import org.sleuthkit.autopsy.directorytree.ExtractAction; -import org.sleuthkit.autopsy.directorytree.NewWindowViewAction; -import org.sleuthkit.autopsy.directorytree.TagAbstractFileAction; -import org.sleuthkit.autopsy.directorytree.ViewContextAction; -import org.sleuthkit.datamodel.AbstractFile; -import org.sleuthkit.datamodel.Directory; -import org.sleuthkit.datamodel.TskData.TSK_FS_NAME_FLAG_ENUM; - -/** - * This class is used to represent the "Node" for the directory. Its children - * are more directories. - */ -public class DirectoryNode extends AbstractFsContentNode<AbstractFile> { - - public static final String DOTDOTDIR = "[parent folder]"; - public static final String DOTDIR = "[current folder]"; - - public DirectoryNode(Directory dir) { - this(dir, true); - - setIcon(dir); - } - - public DirectoryNode(AbstractFile dir, boolean directoryBrowseMode) { - super(dir, directoryBrowseMode); - - setIcon(dir); - } - - private void setIcon(AbstractFile dir) { - // set name, display name, and icon - if (dir.isDirNameFlagSet(TSK_FS_NAME_FLAG_ENUM.UNALLOC)) { - this.setIconBaseWithExtension("org/sleuthkit/autopsy/images/folder-icon-deleted.png"); - } else { - this.setIconBaseWithExtension("org/sleuthkit/autopsy/images/Folder-icon.png"); - } - } - - /** - * Right click action for this node - * - * @param popup - * @return - */ - @Override - public Action[] getActions(boolean popup) { - List<Action> actions = new ArrayList<>(); - if (!getDirectoryBrowseMode()) { - actions.add(new ViewContextAction("View File in Directory", this)); - actions.add(null); // creates a menu separator - } - actions.add(new NewWindowViewAction("View in New Window", this)); - actions.add(null); // creates a menu separator - actions.add(ExtractAction.getInstance()); - actions.add(null); // creates a menu separator - actions.add(TagAbstractFileAction.getInstance()); - return actions.toArray(new Action[0]); - } - - @Override - public <T> T accept(ContentNodeVisitor<T> v) { - return v.visit(this); - } - - @Override - public <T> T accept(DisplayableItemNodeVisitor<T> v) { - return v.visit(this); - } - - @Override - public TYPE getDisplayableItemNodeType() { - return TYPE.CONTENT; - } -} +/* + * Autopsy Forensic Browser + * + * Copyright 2011 Basis Technology Corp. + * Contact: carrier <at> sleuthkit <dot> org + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.sleuthkit.autopsy.datamodel; + +import java.util.ArrayList; +import java.util.List; +import javax.swing.Action; +import org.sleuthkit.autopsy.directorytree.ExtractAction; +import org.sleuthkit.autopsy.directorytree.NewWindowViewAction; +import org.sleuthkit.autopsy.directorytree.TagAbstractFileAction; +import org.sleuthkit.autopsy.directorytree.ViewContextAction; +import org.sleuthkit.datamodel.AbstractFile; +import org.sleuthkit.datamodel.Directory; +import org.sleuthkit.datamodel.TskData.TSK_FS_NAME_FLAG_ENUM; + +/** + * This class is used to represent the "Node" for the directory. Its children + * are more directories. + */ +public class DirectoryNode extends AbstractFsContentNode<AbstractFile> { + + public static final String DOTDOTDIR = "[parent folder]"; + public static final String DOTDIR = "[current folder]"; + + public DirectoryNode(Directory dir) { + this(dir, true); + + setIcon(dir); + } + + public DirectoryNode(AbstractFile dir, boolean directoryBrowseMode) { + super(dir, directoryBrowseMode); + + setIcon(dir); + } + + private void setIcon(AbstractFile dir) { + // set name, display name, and icon + if (dir.isDirNameFlagSet(TSK_FS_NAME_FLAG_ENUM.UNALLOC)) { + this.setIconBaseWithExtension("org/sleuthkit/autopsy/images/folder-icon-deleted.png"); + } else { + this.setIconBaseWithExtension("org/sleuthkit/autopsy/images/Folder-icon.png"); + } + } + + /** + * Right click action for this node + * + * @param popup + * @return + */ + @Override + public Action[] getActions(boolean popup) { + List<Action> actions = new ArrayList<>(); + if (!getDirectoryBrowseMode()) { + actions.add(new ViewContextAction("View File in Directory", this)); + actions.add(null); // creates a menu separator + } + actions.add(new NewWindowViewAction("View in New Window", this)); + actions.add(null); // creates a menu separator + actions.add(ExtractAction.getInstance()); + actions.add(null); // creates a menu separator + actions.add(TagAbstractFileAction.getInstance()); + return actions.toArray(new Action[0]); + } + + @Override + public <T> T accept(ContentNodeVisitor<T> v) { + return v.visit(this); + } + + @Override + public <T> T accept(DisplayableItemNodeVisitor<T> v) { + return v.visit(this); + } + + @Override + public TYPE getDisplayableItemNodeType() { + return TYPE.CONTENT; + } +} diff --git a/Core/src/org/sleuthkit/autopsy/datamodel/FileNode.java b/Core/src/org/sleuthkit/autopsy/datamodel/FileNode.java index 9ca87fd8a7dd33a491b93cf4b5ef219d17960892..628cd145a8b8fbe80be569f373bf6e4f959a728b 100755 --- a/Core/src/org/sleuthkit/autopsy/datamodel/FileNode.java +++ b/Core/src/org/sleuthkit/autopsy/datamodel/FileNode.java @@ -1,180 +1,180 @@ -/* - * Autopsy Forensic Browser - * - * Copyright 2011 Basis Technology Corp. - * Contact: carrier <at> sleuthkit <dot> org - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.sleuthkit.autopsy.datamodel; - -import java.util.ArrayList; -import java.util.List; -import javax.swing.Action; -import org.sleuthkit.autopsy.directorytree.ExternalViewerAction; -import org.sleuthkit.autopsy.directorytree.ExtractAction; -import org.sleuthkit.autopsy.directorytree.HashSearchAction; -import org.sleuthkit.autopsy.directorytree.NewWindowViewAction; -import org.sleuthkit.autopsy.directorytree.TagAbstractFileAction; -import org.sleuthkit.autopsy.directorytree.ViewContextAction; -import org.sleuthkit.datamodel.AbstractFile; -import org.sleuthkit.datamodel.TskData.TSK_DB_FILES_TYPE_ENUM; -import org.sleuthkit.datamodel.TskData.TSK_FS_NAME_FLAG_ENUM; - -/** - * This class is used to represent the "Node" for the file. It may have derived - * files children. - */ -public class FileNode extends AbstractFsContentNode<AbstractFile> { - - /** - * @param file underlying Content - */ - public FileNode(AbstractFile file) { - this(file, true); - - setIcon(file); - } - - public FileNode(AbstractFile file, boolean directoryBrowseMode) { - super(file, directoryBrowseMode); - - setIcon(file); - } - - private void setIcon(AbstractFile file) { - // set name, display name, and icon - if (file.isDirNameFlagSet(TSK_FS_NAME_FLAG_ENUM.UNALLOC)) { - if (file.getType().equals(TSK_DB_FILES_TYPE_ENUM.CARVED)) { - this.setIconBaseWithExtension("org/sleuthkit/autopsy/images/carved-file-icon-16.png"); - } else { - this.setIconBaseWithExtension("org/sleuthkit/autopsy/images/file-icon-deleted.png"); - } - } else { - this.setIconBaseWithExtension(getIconForFileType(file)); - } - } - - /** - * Right click action for this node - * - * @param popup - * @return - */ - @Override - public Action[] getActions(boolean popup) { - List<Action> actionsList = new ArrayList<>(); - if (!this.getDirectoryBrowseMode()) { - actionsList.add(new ViewContextAction("View File in Directory", this)); - actionsList.add(null); // creates a menu separator - } - actionsList.add(new NewWindowViewAction("View in New Window", this)); - actionsList.add(new ExternalViewerAction("Open in External Viewer", this)); - actionsList.add(null); // creates a menu separator - actionsList.add(ExtractAction.getInstance()); - actionsList.add(new HashSearchAction("Search for files with the same MD5 hash", this)); - actionsList.add(null); // creates a menu separator - actionsList.add(TagAbstractFileAction.getInstance()); - return actionsList.toArray(new Action[0]); - } - - @Override - public <T> T accept(ContentNodeVisitor< T> v) { - return v.visit(this); - } - - @Override - public <T> T accept(DisplayableItemNodeVisitor< T> v) { - return v.visit(this); - } - - // Given a file, returns the correct icon for said - // file based off it's extension - static String getIconForFileType(AbstractFile file) { - // Get the name, extension - String name = file.getName(); - int dotIndex = name.lastIndexOf("."); - if (dotIndex == -1) { - return "org/sleuthkit/autopsy/images/file-icon.png"; - } - String ext = name.substring(dotIndex).toLowerCase(); - - // Images - for (String s : FileTypeExtensions.getImageExtensions()) { - if (ext.equals(s)) { - return "org/sleuthkit/autopsy/images/image-file.png"; - } - } - // Videos - for (String s : FileTypeExtensions.getVideoExtensions()) { - if (ext.equals(s)) { - return "org/sleuthkit/autopsy/images/video-file.png"; - } - } - // Audio Files - for (String s : FileTypeExtensions.getAudioExtensions()) { - if (ext.equals(s)) { - return "org/sleuthkit/autopsy/images/audio-file.png"; - } - } - // Documents - for (String s : FileTypeExtensions.getDocumentExtensions()) { - if (ext.equals(s)) { - return "org/sleuthkit/autopsy/images/doc-file.png"; - } - } - // Executables / System Files - for (String s : FileTypeExtensions.getExecutableExtensions()) { - if (ext.equals(s)) { - return "org/sleuthkit/autopsy/images/exe-file.png"; - } - } - // Text Files - for (String s : FileTypeExtensions.getTextExtensions()) { - if (ext.equals(s)) { - return "org/sleuthkit/autopsy/images/text-file.png"; - } - } - // Web Files - for (String s : FileTypeExtensions.getWebExtensions()) { - if (ext.equals(s)) { - return "org/sleuthkit/autopsy/images/web-file.png"; - } - } - // PDFs - for (String s : FileTypeExtensions.getPDFExtensions()) { - if (ext.equals(s)) { - return "org/sleuthkit/autopsy/images/pdf-file.png"; - } - } - // Archives - for (String s : FileTypeExtensions.getArchiveExtensions()) { - if (ext.equals(s)) { - return "org/sleuthkit/autopsy/images/archive-file.png"; - } - } - // Else return the default - return "org/sleuthkit/autopsy/images/file-icon.png"; - - } - - @Override - public TYPE getDisplayableItemNodeType() { - return TYPE.CONTENT; - } - - @Override - public boolean isLeafTypeNode() { - return true; //false; - } -} +/* + * Autopsy Forensic Browser + * + * Copyright 2011 Basis Technology Corp. + * Contact: carrier <at> sleuthkit <dot> org + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.sleuthkit.autopsy.datamodel; + +import java.util.ArrayList; +import java.util.List; +import javax.swing.Action; +import org.sleuthkit.autopsy.directorytree.ExternalViewerAction; +import org.sleuthkit.autopsy.directorytree.ExtractAction; +import org.sleuthkit.autopsy.directorytree.HashSearchAction; +import org.sleuthkit.autopsy.directorytree.NewWindowViewAction; +import org.sleuthkit.autopsy.directorytree.TagAbstractFileAction; +import org.sleuthkit.autopsy.directorytree.ViewContextAction; +import org.sleuthkit.datamodel.AbstractFile; +import org.sleuthkit.datamodel.TskData.TSK_DB_FILES_TYPE_ENUM; +import org.sleuthkit.datamodel.TskData.TSK_FS_NAME_FLAG_ENUM; + +/** + * This class is used to represent the "Node" for the file. It may have derived + * files children. + */ +public class FileNode extends AbstractFsContentNode<AbstractFile> { + + /** + * @param file underlying Content + */ + public FileNode(AbstractFile file) { + this(file, true); + + setIcon(file); + } + + public FileNode(AbstractFile file, boolean directoryBrowseMode) { + super(file, directoryBrowseMode); + + setIcon(file); + } + + private void setIcon(AbstractFile file) { + // set name, display name, and icon + if (file.isDirNameFlagSet(TSK_FS_NAME_FLAG_ENUM.UNALLOC)) { + if (file.getType().equals(TSK_DB_FILES_TYPE_ENUM.CARVED)) { + this.setIconBaseWithExtension("org/sleuthkit/autopsy/images/carved-file-icon-16.png"); + } else { + this.setIconBaseWithExtension("org/sleuthkit/autopsy/images/file-icon-deleted.png"); + } + } else { + this.setIconBaseWithExtension(getIconForFileType(file)); + } + } + + /** + * Right click action for this node + * + * @param popup + * @return + */ + @Override + public Action[] getActions(boolean popup) { + List<Action> actionsList = new ArrayList<>(); + if (!this.getDirectoryBrowseMode()) { + actionsList.add(new ViewContextAction("View File in Directory", this)); + actionsList.add(null); // creates a menu separator + } + actionsList.add(new NewWindowViewAction("View in New Window", this)); + actionsList.add(new ExternalViewerAction("Open in External Viewer", this)); + actionsList.add(null); // creates a menu separator + actionsList.add(ExtractAction.getInstance()); + actionsList.add(new HashSearchAction("Search for files with the same MD5 hash", this)); + actionsList.add(null); // creates a menu separator + actionsList.add(TagAbstractFileAction.getInstance()); + return actionsList.toArray(new Action[0]); + } + + @Override + public <T> T accept(ContentNodeVisitor< T> v) { + return v.visit(this); + } + + @Override + public <T> T accept(DisplayableItemNodeVisitor< T> v) { + return v.visit(this); + } + + // Given a file, returns the correct icon for said + // file based off it's extension + static String getIconForFileType(AbstractFile file) { + // Get the name, extension + String name = file.getName(); + int dotIndex = name.lastIndexOf("."); + if (dotIndex == -1) { + return "org/sleuthkit/autopsy/images/file-icon.png"; + } + String ext = name.substring(dotIndex).toLowerCase(); + + // Images + for (String s : FileTypeExtensions.getImageExtensions()) { + if (ext.equals(s)) { + return "org/sleuthkit/autopsy/images/image-file.png"; + } + } + // Videos + for (String s : FileTypeExtensions.getVideoExtensions()) { + if (ext.equals(s)) { + return "org/sleuthkit/autopsy/images/video-file.png"; + } + } + // Audio Files + for (String s : FileTypeExtensions.getAudioExtensions()) { + if (ext.equals(s)) { + return "org/sleuthkit/autopsy/images/audio-file.png"; + } + } + // Documents + for (String s : FileTypeExtensions.getDocumentExtensions()) { + if (ext.equals(s)) { + return "org/sleuthkit/autopsy/images/doc-file.png"; + } + } + // Executables / System Files + for (String s : FileTypeExtensions.getExecutableExtensions()) { + if (ext.equals(s)) { + return "org/sleuthkit/autopsy/images/exe-file.png"; + } + } + // Text Files + for (String s : FileTypeExtensions.getTextExtensions()) { + if (ext.equals(s)) { + return "org/sleuthkit/autopsy/images/text-file.png"; + } + } + // Web Files + for (String s : FileTypeExtensions.getWebExtensions()) { + if (ext.equals(s)) { + return "org/sleuthkit/autopsy/images/web-file.png"; + } + } + // PDFs + for (String s : FileTypeExtensions.getPDFExtensions()) { + if (ext.equals(s)) { + return "org/sleuthkit/autopsy/images/pdf-file.png"; + } + } + // Archives + for (String s : FileTypeExtensions.getArchiveExtensions()) { + if (ext.equals(s)) { + return "org/sleuthkit/autopsy/images/archive-file.png"; + } + } + // Else return the default + return "org/sleuthkit/autopsy/images/file-icon.png"; + + } + + @Override + public TYPE getDisplayableItemNodeType() { + return TYPE.CONTENT; + } + + @Override + public boolean isLeafTypeNode() { + return true; //false; + } +} diff --git a/Core/src/org/sleuthkit/autopsy/directorytree/TagAbstractFileAction.java b/Core/src/org/sleuthkit/autopsy/directorytree/TagAbstractFileAction.java index 657673ac74b37d75c75507d111f87851f6570231..4f64b8cfb7a5b5cd90da63a6ff8d954f2a1e6981 100755 --- a/Core/src/org/sleuthkit/autopsy/directorytree/TagAbstractFileAction.java +++ b/Core/src/org/sleuthkit/autopsy/directorytree/TagAbstractFileAction.java @@ -1,70 +1,70 @@ -/* - * Autopsy Forensic Browser - * - * Copyright 2013 Basis Technology Corp. - * Contact: carrier <at> sleuthkit <dot> org - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.sleuthkit.autopsy.directorytree; - -import java.awt.event.ActionEvent; -import java.util.Collection; -import javax.swing.AbstractAction; -import javax.swing.JMenuItem; -import org.openide.util.Utilities; -import org.openide.util.actions.Presenter; -import org.sleuthkit.autopsy.datamodel.Tags; -import org.sleuthkit.datamodel.AbstractFile; - -public class TagAbstractFileAction extends AbstractAction implements Presenter.Popup { - // This class is a singleton to support multi-selection of nodes, since - // org.openide.nodes.NodeOp.findActions(Node[] nodes) will only pick up an Action if every - // node in the array returns a reference to the same action object from Node.getActions(boolean). - private static TagAbstractFileAction instance; - - public static synchronized TagAbstractFileAction getInstance() { - if (null == instance) { - instance = new TagAbstractFileAction(); - } - return instance; - } - - private TagAbstractFileAction() { - } - - @Override - public JMenuItem getPopupPresenter() { - return new TagAbstractFileMenu(); - } - - @Override - public void actionPerformed(ActionEvent e) { - // Do nothing - this action should never be performed. - // Submenu actions are invoked instead. - } - - private static class TagAbstractFileMenu extends TagMenu { - public TagAbstractFileMenu() { - super(Utilities.actionsGlobalContext().lookupAll(AbstractFile.class).size() > 1 ? "Tag Files" : "Tag File"); - } - - @Override - protected void applyTag(String tagName, String comment) { - Collection<? extends AbstractFile> selectedFiles = Utilities.actionsGlobalContext().lookupAll(AbstractFile.class); - for (AbstractFile file : selectedFiles) { - Tags.createTag(file, tagName, comment); - } - } - } -} +/* + * Autopsy Forensic Browser + * + * Copyright 2013 Basis Technology Corp. + * Contact: carrier <at> sleuthkit <dot> org + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.sleuthkit.autopsy.directorytree; + +import java.awt.event.ActionEvent; +import java.util.Collection; +import javax.swing.AbstractAction; +import javax.swing.JMenuItem; +import org.openide.util.Utilities; +import org.openide.util.actions.Presenter; +import org.sleuthkit.autopsy.datamodel.Tags; +import org.sleuthkit.datamodel.AbstractFile; + +public class TagAbstractFileAction extends AbstractAction implements Presenter.Popup { + // This class is a singleton to support multi-selection of nodes, since + // org.openide.nodes.NodeOp.findActions(Node[] nodes) will only pick up an Action if every + // node in the array returns a reference to the same action object from Node.getActions(boolean). + private static TagAbstractFileAction instance; + + public static synchronized TagAbstractFileAction getInstance() { + if (null == instance) { + instance = new TagAbstractFileAction(); + } + return instance; + } + + private TagAbstractFileAction() { + } + + @Override + public JMenuItem getPopupPresenter() { + return new TagAbstractFileMenu(); + } + + @Override + public void actionPerformed(ActionEvent e) { + // Do nothing - this action should never be performed. + // Submenu actions are invoked instead. + } + + private static class TagAbstractFileMenu extends TagMenu { + public TagAbstractFileMenu() { + super(Utilities.actionsGlobalContext().lookupAll(AbstractFile.class).size() > 1 ? "Tag Files" : "Tag File"); + } + + @Override + protected void applyTag(String tagName, String comment) { + Collection<? extends AbstractFile> selectedFiles = Utilities.actionsGlobalContext().lookupAll(AbstractFile.class); + for (AbstractFile file : selectedFiles) { + Tags.createTag(file, tagName, comment); + } + } + } +} diff --git a/Core/src/org/sleuthkit/autopsy/directorytree/TagBlackboardArtifactAction.java b/Core/src/org/sleuthkit/autopsy/directorytree/TagBlackboardArtifactAction.java index 3d1a9641b321d530dcd54f19812d15f67b3ae91f..6d2a056a379cbe812cf9587f9654781920cf6b0a 100755 --- a/Core/src/org/sleuthkit/autopsy/directorytree/TagBlackboardArtifactAction.java +++ b/Core/src/org/sleuthkit/autopsy/directorytree/TagBlackboardArtifactAction.java @@ -1,71 +1,71 @@ -/* - * Autopsy Forensic Browser - * - * Copyright 2013 Basis Technology Corp. - * Contact: carrier <at> sleuthkit <dot> org - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.sleuthkit.autopsy.directorytree; - -import java.awt.event.ActionEvent; -import java.util.Collection; -import javax.swing.AbstractAction; -import javax.swing.JMenuItem; -import org.openide.util.Utilities; -import org.openide.util.actions.Presenter; -import org.sleuthkit.autopsy.datamodel.Tags; -import org.sleuthkit.datamodel.BlackboardArtifact; - -public class TagBlackboardArtifactAction extends AbstractAction implements Presenter.Popup { - // This class is a singleton to support multi-selection of nodes, since - // org.openide.nodes.NodeOp.findActions(Node[] nodes) will only pick up an Action if every - // node in the array returns a reference to the same action object from Node.getActions(boolean). - private static TagBlackboardArtifactAction instance; - - public static synchronized TagBlackboardArtifactAction getInstance() { - if (null == instance) { - instance = new TagBlackboardArtifactAction(); - } - return instance; - } - - private TagBlackboardArtifactAction() { - } - - @Override - public JMenuItem getPopupPresenter() { - return new TagBlackboardArtifactMenu(); - } - - @Override - public void actionPerformed(ActionEvent e) { - // Do nothing - this action should never be performed. - // Submenu actions are invoked instead. - } - - - private static class TagBlackboardArtifactMenu extends TagMenu { - public TagBlackboardArtifactMenu() { - super(Utilities.actionsGlobalContext().lookupAll(BlackboardArtifact.class).size() > 1 ? "Tag Results" : "Tag Result"); - } - - @Override - protected void applyTag(String tagName, String comment) { - Collection<? extends BlackboardArtifact> selectedArtifacts = Utilities.actionsGlobalContext().lookupAll(BlackboardArtifact.class); - for (BlackboardArtifact artifact : selectedArtifacts) { - Tags.createTag(artifact, tagName, comment); - } - } - } -} +/* + * Autopsy Forensic Browser + * + * Copyright 2013 Basis Technology Corp. + * Contact: carrier <at> sleuthkit <dot> org + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.sleuthkit.autopsy.directorytree; + +import java.awt.event.ActionEvent; +import java.util.Collection; +import javax.swing.AbstractAction; +import javax.swing.JMenuItem; +import org.openide.util.Utilities; +import org.openide.util.actions.Presenter; +import org.sleuthkit.autopsy.datamodel.Tags; +import org.sleuthkit.datamodel.BlackboardArtifact; + +public class TagBlackboardArtifactAction extends AbstractAction implements Presenter.Popup { + // This class is a singleton to support multi-selection of nodes, since + // org.openide.nodes.NodeOp.findActions(Node[] nodes) will only pick up an Action if every + // node in the array returns a reference to the same action object from Node.getActions(boolean). + private static TagBlackboardArtifactAction instance; + + public static synchronized TagBlackboardArtifactAction getInstance() { + if (null == instance) { + instance = new TagBlackboardArtifactAction(); + } + return instance; + } + + private TagBlackboardArtifactAction() { + } + + @Override + public JMenuItem getPopupPresenter() { + return new TagBlackboardArtifactMenu(); + } + + @Override + public void actionPerformed(ActionEvent e) { + // Do nothing - this action should never be performed. + // Submenu actions are invoked instead. + } + + + private static class TagBlackboardArtifactMenu extends TagMenu { + public TagBlackboardArtifactMenu() { + super(Utilities.actionsGlobalContext().lookupAll(BlackboardArtifact.class).size() > 1 ? "Tag Results" : "Tag Result"); + } + + @Override + protected void applyTag(String tagName, String comment) { + Collection<? extends BlackboardArtifact> selectedArtifacts = Utilities.actionsGlobalContext().lookupAll(BlackboardArtifact.class); + for (BlackboardArtifact artifact : selectedArtifacts) { + Tags.createTag(artifact, tagName, comment); + } + } + } +} diff --git a/ExifParser/manifest.mf b/ExifParser/manifest.mf index dbf05fee2fd8b097335603a77174c78594d4dcd8..44ad288f513e993d0abd4ce930ea4d459b39942e 100644 --- a/ExifParser/manifest.mf +++ b/ExifParser/manifest.mf @@ -1,6 +1,6 @@ -Manifest-Version: 1.0 -AutoUpdate-Show-In-Client: true -OpenIDE-Module: org.sleuthkit.autopsy.exifparser/3 -OpenIDE-Module-Implementation-Version: 9 -OpenIDE-Module-Layer: org/sleuthkit/autopsy/exifparser/layer.xml -OpenIDE-Module-Localizing-Bundle: org/sleuthkit/autopsy/exifparser/Bundle.properties +Manifest-Version: 1.0 +AutoUpdate-Show-In-Client: true +OpenIDE-Module: org.sleuthkit.autopsy.exifparser/3 +OpenIDE-Module-Implementation-Version: 9 +OpenIDE-Module-Layer: org/sleuthkit/autopsy/exifparser/layer.xml +OpenIDE-Module-Localizing-Bundle: org/sleuthkit/autopsy/exifparser/Bundle.properties diff --git a/HashDatabase/manifest.mf b/HashDatabase/manifest.mf index ba201a294caf462b8953a74211a3679dd5f125aa..b8c105413cd0db1a05e3e42973312ec7010898a5 100644 --- a/HashDatabase/manifest.mf +++ b/HashDatabase/manifest.mf @@ -1,7 +1,7 @@ -Manifest-Version: 1.0 -AutoUpdate-Show-In-Client: true -OpenIDE-Module: org.sleuthkit.autopsy.hashdatabase/3 -OpenIDE-Module-Implementation-Version: 9 -OpenIDE-Module-Layer: org/sleuthkit/autopsy/hashdatabase/layer.xml -OpenIDE-Module-Localizing-Bundle: org/sleuthkit/autopsy/hashdatabase/Bundle.properties - +Manifest-Version: 1.0 +AutoUpdate-Show-In-Client: true +OpenIDE-Module: org.sleuthkit.autopsy.hashdatabase/3 +OpenIDE-Module-Implementation-Version: 9 +OpenIDE-Module-Layer: org/sleuthkit/autopsy/hashdatabase/layer.xml +OpenIDE-Module-Localizing-Bundle: org/sleuthkit/autopsy/hashdatabase/Bundle.properties + diff --git a/HashDatabase/nbproject/project.properties b/HashDatabase/nbproject/project.properties index 88f75855cb825089c512f4e7f5780fa66ee96929..e633758f8a57b3f761b606cb4527d39b42809807 100644 --- a/HashDatabase/nbproject/project.properties +++ b/HashDatabase/nbproject/project.properties @@ -1,6 +1,6 @@ -javac.source=1.7 -javac.compilerargs=-Xlint -Xlint:-serial -license.file=../LICENSE-2.0.txt -nbm.homepage=http://www.sleuthkit.org/autopsy/ -nbm.needs.restart=true -spec.version.base=1.3 +javac.source=1.7 +javac.compilerargs=-Xlint -Xlint:-serial +license.file=../LICENSE-2.0.txt +nbm.homepage=http://www.sleuthkit.org/autopsy/ +nbm.needs.restart=true +spec.version.base=1.3 diff --git a/HashDatabase/src/org/sleuthkit/autopsy/hashdatabase/Bundle.properties b/HashDatabase/src/org/sleuthkit/autopsy/hashdatabase/Bundle.properties index a9aead73cb45f15eb909e29f016265046c785c74..bf9c210f1a2af334e6ae1295f6838ce0587fc941 100644 --- a/HashDatabase/src/org/sleuthkit/autopsy/hashdatabase/Bundle.properties +++ b/HashDatabase/src/org/sleuthkit/autopsy/hashdatabase/Bundle.properties @@ -1,64 +1,64 @@ -OpenIDE-Module-Display-Category=Ingest Module -OpenIDE-Module-Long-Description=\ - Hash Database ingest module. \n\n\ - The ingest module analyzes files in the disk image and marks them as "known" (based on NSRL database lookup for "known" files) and "bad / interesting" (based on one or more databases supplied by the user).\n\n\ - The module also contains additional non-ingest tools that are integrated in the GUI, such as file lookup by hash and hash database configuration. -OpenIDE-Module-Name=HashDatabase -HashDbSimplePanel.knownLabel.text=NSRL Database: -HashDbSimplePanel.notableLabel.text=Known Bad Database(s): -HashDbSimplePanel.knownValLabel.text=- -HashDbSimplePanel.notableValLabel.text=- -HashDbSimplePanel.jLabel1.text=Enable known bad databases for ingest: -HashDbAddDatabaseDialog.cancelButton.text=Cancel -HashDbAddDatabaseDialog.okButton.text=OK -HashDbAddDatabaseDialog.nsrlRadioButton.text=NSRL -HashDbAddDatabaseDialog.knownBadRadioButton.text=Known Bad -HashDbAddDatabaseDialog.databasePathTextField.text= -HashDbAddDatabaseDialog.browseButton.text=Browse -HashDbAddDatabaseDialog.jLabel1.text=Enter the name of the database: -HashDbAddDatabaseDialog.databaseNameTextField.text= -HashDbAddDatabaseDialog.jLabel2.text=Select the type of database: -HashDbAddDatabaseDialog.useForIngestCheckbox.text=Enable for ingest -HashDbAddDatabaseDialog.sendInboxMessagesCheckbox.text=Enable sending messages to inbox during ingest -HashDbSearchPanel.hashTable.columnModel.title0=MD5 Hashes -HashDbSearchPanel.hashTable.columnModel.title3=Title 4 -HashDbSearchPanel.hashTable.columnModel.title2=Title 3 -HashDbSearchPanel.hashTable.columnModel.title1=Title 2 -HashDbSearchPanel.addButton.text=Add Hash -HashDbSearchPanel.hashField.text= -HashDbSearchPanel.hashLabel.text=MD5 hash: -HashDbSearchPanel.searchButton.text=Search -HashDbSearchPanel.removeButton.text=Remove Selected -HashDbSearchPanel.titleLabel.text=Search for files with the following MD5 hash(es): -HashDbSearchPanel.errorField.text=Error: Not all files have been hashed. -HashDbSearchPanel.saveBox.text=Remember Hashes -HashDbSearchPanel.cancelButton.text=Cancel -HashDbSimplePanel.calcHashesButton.text=Calculate hashes even if no hash database is selected -HashDbSimplePanel.nsrlDbLabel.text=NSRL Database: -HashDbSimplePanel.nsrlDbLabelVal.text=- -HashDbManagementPanel.hashDbIndexStatusLabel.text=No database selected -HashDbManagementPanel.jLabel2.text=Name: -HashDbManagementPanel.showInboxMessagesCheckBox.text=Enable sending messages to inbox during ingest -HashDbManagementPanel.useForIngestCheckbox.text=Enable for ingest -HashDbManagementPanel.indexButton.text=Index -HashDbManagementPanel.indexLabel.text=Index Status: -HashDbManagementPanel.optionsLabel.text=Options -HashDbManagementPanel.jLabel4.text=Location: -HashDbManagementPanel.jLabel6.text=Type: -HashDbManagementPanel.ingestWarningLabel.text=Ingest is ongoing, some settings will be unavailable until it finishes. -HashDbManagementPanel.hashDbTypeLabel.text=No database selected -HashDbManagementPanel.typeLabel.text=Type: -HashDbManagementPanel.deleteButton.text=Delete Database -HashDbManagementPanel.importButton.text=Import Database -HashDbManagementPanel.hashDbNameLabel.text=No database selected -HashDbManagementPanel.nameLabel.text=Name: -HashDbManagementPanel.jButton3.text=Import Database -HashDbManagementPanel.locationLabel.text=Location: -HashDbManagementPanel.hashDbLocationLabel.text=No database selected -HashDbManagementPanel.informationLabel.text=Information -HashDbManagementPanel.hashDatabasesLabel.text=Hash Databases: -OpenIDE-Module-Short-Description=Hash Database Ingest Module and hash db tools -ModalNoButtons.CURRENTLYON_LABEL.text=Currently Indexing x of y -ModalNoButtons.GO_GET_COFFEE_LABEL.text=Hash databases are currently being indexed, this may take some time. -ModalNoButtons.CURRENTDB_LABEL.text=(CurrentDb) -ModalNoButtons.CANCEL_BUTTON.text=Cancel +OpenIDE-Module-Display-Category=Ingest Module +OpenIDE-Module-Long-Description=\ + Hash Database ingest module. \n\n\ + The ingest module analyzes files in the disk image and marks them as "known" (based on NSRL database lookup for "known" files) and "bad / interesting" (based on one or more databases supplied by the user).\n\n\ + The module also contains additional non-ingest tools that are integrated in the GUI, such as file lookup by hash and hash database configuration. +OpenIDE-Module-Name=HashDatabase +HashDbSimplePanel.knownLabel.text=NSRL Database: +HashDbSimplePanel.notableLabel.text=Known Bad Database(s): +HashDbSimplePanel.knownValLabel.text=- +HashDbSimplePanel.notableValLabel.text=- +HashDbSimplePanel.jLabel1.text=Enable known bad databases for ingest: +HashDbAddDatabaseDialog.cancelButton.text=Cancel +HashDbAddDatabaseDialog.okButton.text=OK +HashDbAddDatabaseDialog.nsrlRadioButton.text=NSRL +HashDbAddDatabaseDialog.knownBadRadioButton.text=Known Bad +HashDbAddDatabaseDialog.databasePathTextField.text= +HashDbAddDatabaseDialog.browseButton.text=Browse +HashDbAddDatabaseDialog.jLabel1.text=Enter the name of the database: +HashDbAddDatabaseDialog.databaseNameTextField.text= +HashDbAddDatabaseDialog.jLabel2.text=Select the type of database: +HashDbAddDatabaseDialog.useForIngestCheckbox.text=Enable for ingest +HashDbAddDatabaseDialog.sendInboxMessagesCheckbox.text=Enable sending messages to inbox during ingest +HashDbSearchPanel.hashTable.columnModel.title0=MD5 Hashes +HashDbSearchPanel.hashTable.columnModel.title3=Title 4 +HashDbSearchPanel.hashTable.columnModel.title2=Title 3 +HashDbSearchPanel.hashTable.columnModel.title1=Title 2 +HashDbSearchPanel.addButton.text=Add Hash +HashDbSearchPanel.hashField.text= +HashDbSearchPanel.hashLabel.text=MD5 hash: +HashDbSearchPanel.searchButton.text=Search +HashDbSearchPanel.removeButton.text=Remove Selected +HashDbSearchPanel.titleLabel.text=Search for files with the following MD5 hash(es): +HashDbSearchPanel.errorField.text=Error: Not all files have been hashed. +HashDbSearchPanel.saveBox.text=Remember Hashes +HashDbSearchPanel.cancelButton.text=Cancel +HashDbSimplePanel.calcHashesButton.text=Calculate hashes even if no hash database is selected +HashDbSimplePanel.nsrlDbLabel.text=NSRL Database: +HashDbSimplePanel.nsrlDbLabelVal.text=- +HashDbManagementPanel.hashDbIndexStatusLabel.text=No database selected +HashDbManagementPanel.jLabel2.text=Name: +HashDbManagementPanel.showInboxMessagesCheckBox.text=Enable sending messages to inbox during ingest +HashDbManagementPanel.useForIngestCheckbox.text=Enable for ingest +HashDbManagementPanel.indexButton.text=Index +HashDbManagementPanel.indexLabel.text=Index Status: +HashDbManagementPanel.optionsLabel.text=Options +HashDbManagementPanel.jLabel4.text=Location: +HashDbManagementPanel.jLabel6.text=Type: +HashDbManagementPanel.ingestWarningLabel.text=Ingest is ongoing, some settings will be unavailable until it finishes. +HashDbManagementPanel.hashDbTypeLabel.text=No database selected +HashDbManagementPanel.typeLabel.text=Type: +HashDbManagementPanel.deleteButton.text=Delete Database +HashDbManagementPanel.importButton.text=Import Database +HashDbManagementPanel.hashDbNameLabel.text=No database selected +HashDbManagementPanel.nameLabel.text=Name: +HashDbManagementPanel.jButton3.text=Import Database +HashDbManagementPanel.locationLabel.text=Location: +HashDbManagementPanel.hashDbLocationLabel.text=No database selected +HashDbManagementPanel.informationLabel.text=Information +HashDbManagementPanel.hashDatabasesLabel.text=Hash Databases: +OpenIDE-Module-Short-Description=Hash Database Ingest Module and hash db tools +ModalNoButtons.CURRENTLYON_LABEL.text=Currently Indexing x of y +ModalNoButtons.GO_GET_COFFEE_LABEL.text=Hash databases are currently being indexed, this may take some time. +ModalNoButtons.CURRENTDB_LABEL.text=(CurrentDb) +ModalNoButtons.CANCEL_BUTTON.text=Cancel diff --git a/HashDatabase/src/org/sleuthkit/autopsy/hashdatabase/HashDb.java b/HashDatabase/src/org/sleuthkit/autopsy/hashdatabase/HashDb.java index 9a08234eab73939900f40c034cd063db47d9da80..47d3e58c520c874df3aa602729ab817360e4b415 100644 --- a/HashDatabase/src/org/sleuthkit/autopsy/hashdatabase/HashDb.java +++ b/HashDatabase/src/org/sleuthkit/autopsy/hashdatabase/HashDb.java @@ -1,304 +1,304 @@ -/* - * Autopsy Forensic Browser - * - * Copyright 2011 Basis Technology Corp. - * Contact: carrier <at> sleuthkit <dot> org - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.sleuthkit.autopsy.hashdatabase; - -import java.beans.PropertyChangeListener; -import java.beans.PropertyChangeSupport; -import java.io.File; -import java.util.List; -import java.util.logging.Level; -import javax.swing.SwingWorker; -import org.netbeans.api.progress.ProgressHandle; -import org.netbeans.api.progress.ProgressHandleFactory; -import org.openide.util.Cancellable; -import org.sleuthkit.autopsy.coreutils.Logger; -import org.sleuthkit.datamodel.SleuthkitJNI; -import org.sleuthkit.datamodel.TskException; - -/** - * Hash database representation of NSRL and Known Bad hash databases - * with indexing capability - * - */ -public class HashDb implements Comparable<HashDb> { - - enum EVENT {INDEXING_DONE }; - private final PropertyChangeSupport pcs = new PropertyChangeSupport(this); - - - public enum DBType{ - NSRL("NSRL"), KNOWN_BAD("Known Bad"); - - private String displayName; - - private DBType(String displayName) { - this.displayName = displayName; - } - - public String getDisplayName() { - return this.displayName; - } - } - - // Suffix added to the end of a database name to get its index file - private static final String INDEX_SUFFIX = "-md5.idx"; - - private String name; - private List<String> databasePaths; // TODO: Length limited to one for now... - private boolean useForIngest; - private boolean showInboxMessages; - private boolean indexing; - private DBType type; - - public HashDb(String name, List<String> databasePaths, boolean useForIngest, boolean showInboxMessages, DBType type) { - this.name = name; - this.databasePaths = databasePaths; - this.useForIngest = useForIngest; - this.showInboxMessages = showInboxMessages; - this.type = type; - this.indexing = false; - } - - void addPropertyChangeListener(PropertyChangeListener pcl) { - pcs.addPropertyChangeListener(pcl); - } - - void removePropertyChangeListener(PropertyChangeListener pcl) { - pcs.removePropertyChangeListener(pcl); - } - - boolean getUseForIngest() { - return useForIngest; - } - - boolean getShowInboxMessages() { - return showInboxMessages; - } - - DBType getDbType() { - return type; - } - - String getName() { - return name; - } - - List<String> getDatabasePaths() { - return databasePaths; - } - - void setUseForIngest(boolean useForIngest) { - this.useForIngest = useForIngest; - } - - void setShowInboxMessages(boolean showInboxMessages) { - this.showInboxMessages = showInboxMessages; - } - - void setName(String name) { - this.name = name; - } - - void setDatabasePaths(List<String> databasePaths) { - this.databasePaths = databasePaths; - } - - void setDbType(DBType type) { - this.type = type; - } - - /** - * Checks if the database exists. - * @return true if a file exists at the database path, else false - */ - boolean databaseExists() { - return databaseFile().exists(); - } - - /** - * Checks if Sleuth Kit can open the index for the database path. - * @return true if the index was found and opened successfully, else false - */ - boolean indexExists() { - try { - return hasIndex(databasePaths.get(0)); // TODO: support multiple paths - } catch (TskException ex) { - Logger.getLogger(this.getClass().getName()).log(Level.WARNING, "Error checking if index exists.", ex); - return false; - } - } - - /** - * Gets the database file. - * @return a File initialized with the database path - */ - File databaseFile() { - return new File(databasePaths.get(0)); // TODO: support multiple paths - } - - /** - * Gets the index file - * @return a File initialized with an index path derived from the database - * path - */ - File indexFile() { - return new File(toIndexPath(databasePaths.get(0))); // TODO: support multiple paths - } - - /** - * Checks if the index file is older than the database file - * @return true if there is are files at the index path and the database - * path, and the index file has an older modified-time than the database - * file, else false - */ - boolean isOutdated() { - File i = indexFile(); - File db = databaseFile(); - - return i.exists() && db.exists() && isOlderThan(i, db); - } - - /** - * Checks if the database is being indexed - */ - boolean isIndexing() { - return indexing; - } - - /** - * Returns the status of the HashDb as determined from indexExists(), - * databaseExists(), and isOutdated() - * @return IndexStatus enum according to their definitions - */ - IndexStatus status() { - boolean i = this.indexExists(); - boolean db = this.databaseExists(); - - if(indexing) - return IndexStatus.INDEXING; - if (i) { - if (db) { - return this.isOutdated() ? IndexStatus.INDEX_OUTDATED : IndexStatus.INDEX_CURRENT; - } else { - return IndexStatus.NO_DB; - } - } else { - return db ? IndexStatus.NO_INDEX : IndexStatus.NONE; - } - } - - /** - * Tries to index the database (overwrites any existing index) - * @throws TskException if an error occurs in the SleuthKit bindings - */ - void createIndex() throws TskException { - indexing = true; - CreateIndex creator = new CreateIndex(); - creator.execute(); - } - - /** - * Checks if one file is older than an other - * @param a first file - * @param b second file - * @return true if the first file's last modified data is before the second - * file's last modified date - */ - private static boolean isOlderThan(File a, File b) { - return a.lastModified() < b.lastModified(); - } - - /** - * Determines if a path points to an index by checking the suffix - * @param path - * @return true if index - */ - static boolean isIndexPath(String path) { - return path.endsWith(INDEX_SUFFIX); - } - - /** - * Derives database path from an image path by removing the suffix. - * @param indexPath - * @return - */ - static String toDatabasePath(String indexPath) { - return indexPath.substring(0, indexPath.lastIndexOf(INDEX_SUFFIX)); - } - - /** - * Derives image path from an database path by appending the suffix. - * @param databasePath - * @return - */ - static String toIndexPath(String databasePath) { - return databasePath.concat(INDEX_SUFFIX); - } - - /** - * Calls Sleuth Kit method via JNI to determine whether there is an - * index for the given path - * @param databasePath path Path for the database the index is of - * (database doesn't have to actually exist)' - * @return true if index exists - * @throws TskException if there is an error in the JNI call - */ - static boolean hasIndex(String databasePath) throws TskException { - return SleuthkitJNI.lookupIndexExists(databasePath); - } - - @Override - public int compareTo(HashDb o) { - return this.name.compareTo(o.name); - } - - /* Thread that creates a database's index */ - private class CreateIndex extends SwingWorker<Object,Void> { - - private ProgressHandle progress; - - CreateIndex(){}; - - @Override - protected Object doInBackground() throws Exception { - progress = ProgressHandleFactory.createHandle("Indexing " + name); - - /** We need proper cancel support in TSK to make the task cancellable - new Cancellable() { - Override - public boolean cancel() { - return CreateIndex.this.cancel(true); - } - }); - */ - progress.start(); - progress.switchToIndeterminate(); - SleuthkitJNI.createLookupIndex(databasePaths.get(0)); - return null; - } - - /* clean up or start the worker threads */ - @Override - protected void done() { - indexing = false; - progress.finish(); - pcs.firePropertyChange(EVENT.INDEXING_DONE.toString(), null, name); - } - } +/* + * Autopsy Forensic Browser + * + * Copyright 2011 Basis Technology Corp. + * Contact: carrier <at> sleuthkit <dot> org + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.sleuthkit.autopsy.hashdatabase; + +import java.beans.PropertyChangeListener; +import java.beans.PropertyChangeSupport; +import java.io.File; +import java.util.List; +import java.util.logging.Level; +import javax.swing.SwingWorker; +import org.netbeans.api.progress.ProgressHandle; +import org.netbeans.api.progress.ProgressHandleFactory; +import org.openide.util.Cancellable; +import org.sleuthkit.autopsy.coreutils.Logger; +import org.sleuthkit.datamodel.SleuthkitJNI; +import org.sleuthkit.datamodel.TskException; + +/** + * Hash database representation of NSRL and Known Bad hash databases + * with indexing capability + * + */ +public class HashDb implements Comparable<HashDb> { + + enum EVENT {INDEXING_DONE }; + private final PropertyChangeSupport pcs = new PropertyChangeSupport(this); + + + public enum DBType{ + NSRL("NSRL"), KNOWN_BAD("Known Bad"); + + private String displayName; + + private DBType(String displayName) { + this.displayName = displayName; + } + + public String getDisplayName() { + return this.displayName; + } + } + + // Suffix added to the end of a database name to get its index file + private static final String INDEX_SUFFIX = "-md5.idx"; + + private String name; + private List<String> databasePaths; // TODO: Length limited to one for now... + private boolean useForIngest; + private boolean showInboxMessages; + private boolean indexing; + private DBType type; + + public HashDb(String name, List<String> databasePaths, boolean useForIngest, boolean showInboxMessages, DBType type) { + this.name = name; + this.databasePaths = databasePaths; + this.useForIngest = useForIngest; + this.showInboxMessages = showInboxMessages; + this.type = type; + this.indexing = false; + } + + void addPropertyChangeListener(PropertyChangeListener pcl) { + pcs.addPropertyChangeListener(pcl); + } + + void removePropertyChangeListener(PropertyChangeListener pcl) { + pcs.removePropertyChangeListener(pcl); + } + + boolean getUseForIngest() { + return useForIngest; + } + + boolean getShowInboxMessages() { + return showInboxMessages; + } + + DBType getDbType() { + return type; + } + + String getName() { + return name; + } + + List<String> getDatabasePaths() { + return databasePaths; + } + + void setUseForIngest(boolean useForIngest) { + this.useForIngest = useForIngest; + } + + void setShowInboxMessages(boolean showInboxMessages) { + this.showInboxMessages = showInboxMessages; + } + + void setName(String name) { + this.name = name; + } + + void setDatabasePaths(List<String> databasePaths) { + this.databasePaths = databasePaths; + } + + void setDbType(DBType type) { + this.type = type; + } + + /** + * Checks if the database exists. + * @return true if a file exists at the database path, else false + */ + boolean databaseExists() { + return databaseFile().exists(); + } + + /** + * Checks if Sleuth Kit can open the index for the database path. + * @return true if the index was found and opened successfully, else false + */ + boolean indexExists() { + try { + return hasIndex(databasePaths.get(0)); // TODO: support multiple paths + } catch (TskException ex) { + Logger.getLogger(this.getClass().getName()).log(Level.WARNING, "Error checking if index exists.", ex); + return false; + } + } + + /** + * Gets the database file. + * @return a File initialized with the database path + */ + File databaseFile() { + return new File(databasePaths.get(0)); // TODO: support multiple paths + } + + /** + * Gets the index file + * @return a File initialized with an index path derived from the database + * path + */ + File indexFile() { + return new File(toIndexPath(databasePaths.get(0))); // TODO: support multiple paths + } + + /** + * Checks if the index file is older than the database file + * @return true if there is are files at the index path and the database + * path, and the index file has an older modified-time than the database + * file, else false + */ + boolean isOutdated() { + File i = indexFile(); + File db = databaseFile(); + + return i.exists() && db.exists() && isOlderThan(i, db); + } + + /** + * Checks if the database is being indexed + */ + boolean isIndexing() { + return indexing; + } + + /** + * Returns the status of the HashDb as determined from indexExists(), + * databaseExists(), and isOutdated() + * @return IndexStatus enum according to their definitions + */ + IndexStatus status() { + boolean i = this.indexExists(); + boolean db = this.databaseExists(); + + if(indexing) + return IndexStatus.INDEXING; + if (i) { + if (db) { + return this.isOutdated() ? IndexStatus.INDEX_OUTDATED : IndexStatus.INDEX_CURRENT; + } else { + return IndexStatus.NO_DB; + } + } else { + return db ? IndexStatus.NO_INDEX : IndexStatus.NONE; + } + } + + /** + * Tries to index the database (overwrites any existing index) + * @throws TskException if an error occurs in the SleuthKit bindings + */ + void createIndex() throws TskException { + indexing = true; + CreateIndex creator = new CreateIndex(); + creator.execute(); + } + + /** + * Checks if one file is older than an other + * @param a first file + * @param b second file + * @return true if the first file's last modified data is before the second + * file's last modified date + */ + private static boolean isOlderThan(File a, File b) { + return a.lastModified() < b.lastModified(); + } + + /** + * Determines if a path points to an index by checking the suffix + * @param path + * @return true if index + */ + static boolean isIndexPath(String path) { + return path.endsWith(INDEX_SUFFIX); + } + + /** + * Derives database path from an image path by removing the suffix. + * @param indexPath + * @return + */ + static String toDatabasePath(String indexPath) { + return indexPath.substring(0, indexPath.lastIndexOf(INDEX_SUFFIX)); + } + + /** + * Derives image path from an database path by appending the suffix. + * @param databasePath + * @return + */ + static String toIndexPath(String databasePath) { + return databasePath.concat(INDEX_SUFFIX); + } + + /** + * Calls Sleuth Kit method via JNI to determine whether there is an + * index for the given path + * @param databasePath path Path for the database the index is of + * (database doesn't have to actually exist)' + * @return true if index exists + * @throws TskException if there is an error in the JNI call + */ + static boolean hasIndex(String databasePath) throws TskException { + return SleuthkitJNI.lookupIndexExists(databasePath); + } + + @Override + public int compareTo(HashDb o) { + return this.name.compareTo(o.name); + } + + /* Thread that creates a database's index */ + private class CreateIndex extends SwingWorker<Object,Void> { + + private ProgressHandle progress; + + CreateIndex(){}; + + @Override + protected Object doInBackground() throws Exception { + progress = ProgressHandleFactory.createHandle("Indexing " + name); + + /** We need proper cancel support in TSK to make the task cancellable + new Cancellable() { + Override + public boolean cancel() { + return CreateIndex.this.cancel(true); + } + }); + */ + progress.start(); + progress.switchToIndeterminate(); + SleuthkitJNI.createLookupIndex(databasePaths.get(0)); + return null; + } + + /* clean up or start the worker threads */ + @Override + protected void done() { + indexing = false; + progress.finish(); + pcs.firePropertyChange(EVENT.INDEXING_DONE.toString(), null, name); + } + } } \ No newline at end of file diff --git a/HashDatabase/src/org/sleuthkit/autopsy/hashdatabase/HashDbXML.java b/HashDatabase/src/org/sleuthkit/autopsy/hashdatabase/HashDbXML.java index 9ddc638b9ee381db8b78ca70216cdfb9c8af3a1d..b0040ff080e3b037e81d0901f6fa94bf2fea8371 100644 --- a/HashDatabase/src/org/sleuthkit/autopsy/hashdatabase/HashDbXML.java +++ b/HashDatabase/src/org/sleuthkit/autopsy/hashdatabase/HashDbXML.java @@ -1,425 +1,425 @@ -/* - * Autopsy Forensic Browser - * - * Copyright 2011 Basis Technology Corp. - * Contact: carrier <at> sleuthkit <dot> org - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.sleuthkit.autopsy.hashdatabase; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.logging.Level; -import javax.swing.JFileChooser; -import javax.swing.JOptionPane; -import javax.swing.filechooser.FileNameExtensionFilter; -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; -import javax.xml.parsers.ParserConfigurationException; -import org.sleuthkit.autopsy.coreutils.Logger; -import org.sleuthkit.autopsy.coreutils.PlatformUtil; -import org.sleuthkit.autopsy.coreutils.XMLUtil; -import org.sleuthkit.autopsy.hashdatabase.HashDb.DBType; -import org.sleuthkit.datamodel.SleuthkitJNI; -import org.sleuthkit.datamodel.TskCoreException; -import org.w3c.dom.Document; -import org.w3c.dom.Element; -import org.w3c.dom.NodeList; - -public class HashDbXML { - private static final String ROOT_EL = "hash_sets"; - private static final String SET_EL = "hash_set"; - private static final String SET_NAME_ATTR = "name"; - private static final String SET_TYPE_ATTR = "type"; - private static final String SET_USE_FOR_INGEST_ATTR = "use_for_ingest"; - private static final String SET_SHOW_INBOX_MESSAGES = "show_inbox_messages"; - private static final String PATH_EL = "hash_set_path"; - private static final String PATH_NUMBER_ATTR = "number"; - private static final String CUR_HASHSETS_FILE_NAME = "hashsets.xml"; - private static final String XSDFILE = "HashsetsSchema.xsd"; - private static final String ENCODING = "UTF-8"; - private static final String CUR_HASHSET_FILE = PlatformUtil.getUserConfigDirectory() + File.separator + CUR_HASHSETS_FILE_NAME; - private static final String SET_CALC = "hash_calculate"; - private static final String SET_VALUE = "value"; - private static final Logger logger = Logger.getLogger(HashDbXML.class.getName()); - private static HashDbXML currentInstance; - - private List<HashDb> knownBadSets; - private HashDb nsrlSet; - private String xmlFile; - private boolean calculate; - - private HashDbXML(String xmlFile) { - knownBadSets = new ArrayList<HashDb>(); - this.xmlFile = xmlFile; - } - - /** - * get instance for managing the current keyword list of the application - */ - static synchronized HashDbXML getCurrent() { - if (currentInstance == null) { - currentInstance = new HashDbXML(CUR_HASHSET_FILE); - currentInstance.reload(); - } - return currentInstance; - } - - /** - * Get the hash sets - */ - public List<HashDb> getAllSets() { - List<HashDb> ret = new ArrayList<HashDb>(); - if(nsrlSet != null) { - ret.add(nsrlSet); - } - ret.addAll(knownBadSets); - return ret; - } - - /** - * Get the Known Bad sets - */ - public List<HashDb> getKnownBadSets() { - return knownBadSets; - } - - /** - * Get the NSRL set - */ - public HashDb getNSRLSet() { - return nsrlSet; - } - - /** - * Add a known bad hash set - */ - public void addKnownBadSet(HashDb set) { - knownBadSets.add(set); - //save(); - } - - /** - * Add a known bad hash set - */ - public void addKnownBadSet(int index, HashDb set) { - knownBadSets.add(index, set); - //save(); - } - - /** - * Set the NSRL hash set (override old set) - */ - public void setNSRLSet(HashDb set) { - this.nsrlSet = set; - //save(); - } - - /** - * Remove a hash known bad set - */ - public void removeKnownBadSetAt(int index) { - knownBadSets.remove(index); - //save(); - } - - /** - * Remove the NSRL database - */ - public void removeNSRLSet() { - this.nsrlSet = null; - //save(); - } - - /** - * load the file or create new - */ - public void reload() { - boolean created = false; - - //TODO clearing the list causes a bug: we lose track of the state - //whether db is being indexed, we should somehow preserve the state when loading new HashDb objects - - knownBadSets.clear(); - nsrlSet = null; - - if (!this.setsFileExists()) { - //create new if it doesn't exist - save(); - created = true; - } - - //load, if fails to load create new; save regardless - load(); - if (!created) { - //create new if failed to load - save(); - } - } - - /** - * Sets the local variable calculate to the given boolean. - * @param set the state to make calculate - */ - public void setCalculate(boolean set) { - this.calculate = set; - //save(); - } - - /** - * Returns the value of the local boolean calculate. - * @return true if calculate is true, false otherwise - */ - public boolean getCalculate() { - return this.calculate; - } - - /** - * writes out current sets file replacing the last one - */ - public boolean save() { - boolean success = false; - - DocumentBuilderFactory dbfac = DocumentBuilderFactory.newInstance(); - - try { - DocumentBuilder docBuilder = dbfac.newDocumentBuilder(); - Document doc = docBuilder.newDocument(); - - Element rootEl = doc.createElement(ROOT_EL); - doc.appendChild(rootEl); - - for (HashDb set : knownBadSets) { - String useForIngest = Boolean.toString(set.getUseForIngest()); - String showInboxMessages = Boolean.toString(set.getShowInboxMessages()); - List<String> paths = set.getDatabasePaths(); - String type = DBType.KNOWN_BAD.toString(); - - Element setEl = doc.createElement(SET_EL); - setEl.setAttribute(SET_NAME_ATTR, set.getName()); - setEl.setAttribute(SET_TYPE_ATTR, type); - setEl.setAttribute(SET_USE_FOR_INGEST_ATTR, useForIngest); - setEl.setAttribute(SET_SHOW_INBOX_MESSAGES, showInboxMessages); - - for (int i = 0; i < paths.size(); i++) { - String path = paths.get(i); - Element pathEl = doc.createElement(PATH_EL); - pathEl.setAttribute(PATH_NUMBER_ATTR, Integer.toString(i)); - pathEl.setTextContent(path); - setEl.appendChild(pathEl); - } - rootEl.appendChild(setEl); - } - - if(nsrlSet != null) { - String useForIngest = Boolean.toString(nsrlSet.getUseForIngest()); - String showInboxMessages = Boolean.toString(nsrlSet.getShowInboxMessages()); - List<String> paths = nsrlSet.getDatabasePaths(); - String type = DBType.NSRL.toString(); - - Element setEl = doc.createElement(SET_EL); - setEl.setAttribute(SET_NAME_ATTR, nsrlSet.getName()); - setEl.setAttribute(SET_TYPE_ATTR, type); - setEl.setAttribute(SET_USE_FOR_INGEST_ATTR, useForIngest); - setEl.setAttribute(SET_SHOW_INBOX_MESSAGES, showInboxMessages); - - for (int i = 0; i < paths.size(); i++) { - String path = paths.get(i); - Element pathEl = doc.createElement(PATH_EL); - pathEl.setAttribute(PATH_NUMBER_ATTR, Integer.toString(i)); - pathEl.setTextContent(path); - setEl.appendChild(pathEl); - } - rootEl.appendChild(setEl); - } - - String calcValue = Boolean.toString(calculate); - Element setCalc = doc.createElement(SET_CALC); - setCalc.setAttribute(SET_VALUE, calcValue); - rootEl.appendChild(setCalc); - - success = XMLUtil.saveDoc(HashDbXML.class, xmlFile, ENCODING, doc); - } catch (ParserConfigurationException e) { - logger.log(Level.SEVERE, "Error saving hash sets: can't initialize parser.", e); - } - return success; - } - - /** - * load and parse XML, then dispose - */ - public boolean load() { - final Document doc = XMLUtil.loadDoc(HashDbXML.class, xmlFile, XSDFILE); - if (doc == null) { - return false; - } - - Element root = doc.getDocumentElement(); - if (root == null) { - logger.log(Level.SEVERE, "Error loading hash sets: invalid file format."); - return false; - } - NodeList setsNList = root.getElementsByTagName(SET_EL); - int numSets = setsNList.getLength(); - if(numSets==0) { - logger.log(Level.WARNING, "No element hash_set exists."); - } - for (int i = 0; i < numSets; ++i) { - Element setEl = (Element) setsNList.item(i); - final String name = setEl.getAttribute(SET_NAME_ATTR); - final String type = setEl.getAttribute(SET_TYPE_ATTR); - final String useForIngest = setEl.getAttribute(SET_USE_FOR_INGEST_ATTR); - final String showInboxMessages = setEl.getAttribute(SET_SHOW_INBOX_MESSAGES); - Boolean useForIngestBool = Boolean.parseBoolean(useForIngest); - Boolean showInboxMessagesBool = Boolean.parseBoolean(showInboxMessages); - List<String> paths = new ArrayList<String>(); - - // Parse all paths - NodeList pathsNList = setEl.getElementsByTagName(PATH_EL); - final int numPaths = pathsNList.getLength(); - for (int j = 0; j < numPaths; ++j) { - Element pathEl = (Element) pathsNList.item(j); - String number = pathEl.getAttribute(PATH_NUMBER_ATTR); - String path = pathEl.getTextContent(); - - // If either the database or it's index exist - File database = new File(path); - File index = new File(HashDb.toIndexPath(path)); - if(database.exists() || index.exists()) { - paths.add(path); - } else { - // Ask for new path - int ret = JOptionPane.showConfirmDialog(null, "Database " + name + " could not be found at location\n" - + path + "\n" - + " Would you like to search for the file?", "Missing Database", JOptionPane.YES_NO_OPTION); - if (ret == JOptionPane.YES_OPTION) { - String filePath = searchForFile(name); - if(filePath!=null) { - paths.add(filePath); - } - } - } - } - - // Check everything was properly set - if(name.isEmpty()) { - logger.log(Level.WARNING, "Name was not set for hash_set at index {0}.", i); - } - if(type.isEmpty()) { - logger.log(Level.SEVERE, "Type was not set for hash_set at index {0}, cannot make instance of HashDb class.", i); - return false; // exit because this causes a fatal error - } - if(useForIngest.isEmpty()) { - logger.log(Level.WARNING, "UseForIngest was not set for hash_set at index {0}.", i); - } - if(showInboxMessages.isEmpty()) { - logger.log(Level.WARNING, "ShowInboxMessages was not set for hash_set at index {0}.", i); - } - - if(paths.isEmpty()) { - logger.log(Level.WARNING, "No paths were set for hash_set at index {0}. Removing the database.", i); - } else { - // No paths for this entry, the user most likely declined to search for them - DBType typeDBType = DBType.valueOf(type); - HashDb set = new HashDb(name, paths, useForIngestBool, showInboxMessagesBool, typeDBType); - - if(typeDBType == DBType.KNOWN_BAD) { - knownBadSets.add(set); - } else if(typeDBType == DBType.NSRL) { - this.nsrlSet = set; - } - } - } - - NodeList calcList = root.getElementsByTagName(SET_CALC); - int numCalc = calcList.getLength(); // Shouldn't be more than 1 - if(numCalc==0) { - logger.log(Level.WARNING, "No element hash_calculate exists."); - } - for(int i=0; i<numCalc; i++) { - Element calcEl = (Element) calcList.item(i); - final String value = calcEl.getAttribute(SET_VALUE); - calculate = Boolean.parseBoolean(value); - } - return true; - } - - /** - * Ask the user to browse to a new Hash Database file with the same database - * name as the one provided. If the names do not match, the database cannot - * be added. If the user cancels the search, return null, meaning the user - * would like to remove the entry for the missing database. - * - * @param name the name of the database to add - * @return the file path to the new database, or null if the user wants to - * delete the old database - */ - private String searchForFile(String name) { - // Initialize the file chooser and only allow hash databases to be opened - JFileChooser fc = new JFileChooser(); - fc.setDragEnabled(false); - fc.setFileSelectionMode(JFileChooser.FILES_ONLY); - String[] EXTENSION = new String[] { "txt", "idx", "hash", "Hash" }; - FileNameExtensionFilter filter = new FileNameExtensionFilter( - "Hash Database File", EXTENSION); - fc.setFileFilter(filter); - fc.setMultiSelectionEnabled(false); - - int retval = fc.showOpenDialog(null); - // If the user selects an appropriate file - if (retval == JFileChooser.APPROVE_OPTION) { - File f = fc.getSelectedFile(); - try { - String filePath = f.getCanonicalPath(); - if (HashDb.isIndexPath(filePath)) { - filePath = HashDb.toDatabasePath(filePath); - } - String derivedName = SleuthkitJNI.getDatabaseName(filePath); - // If the database has the same name as before, return it - if(derivedName.equals(name)) { - return filePath; - } else { - int tryAgain = JOptionPane.showConfirmDialog(null, "Database file cannot be added because it does not have the same name as the original.\n" + - "Would you like to try a different database?", "Invalid File", JOptionPane.YES_NO_OPTION); - if (tryAgain == JOptionPane.YES_OPTION) { - return searchForFile(name); - } else { - return null; - } - } - } catch (IOException ex) { - logger.log(Level.WARNING, "Couldn't get selected file path.", ex); - } catch (TskCoreException ex) { - int tryAgain = JOptionPane.showConfirmDialog(null, "Database file you chose cannot be opened.\n" + "If it was just an index, please try to recreate it from the database.\n" + - "Would you like to choose another database?", "Invalid File", JOptionPane.YES_NO_OPTION); - if (tryAgain == JOptionPane.YES_OPTION) { - return searchForFile(name); - } else { - return null; - } - } - } - // Otherwise the user cancelled, so delete the missing entry - return null; - } - - private boolean setsFileExists() { - File f = new File(xmlFile); - return f.exists() && f.canRead() && f.canWrite(); - } - - -} +/* + * Autopsy Forensic Browser + * + * Copyright 2011 Basis Technology Corp. + * Contact: carrier <at> sleuthkit <dot> org + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.sleuthkit.autopsy.hashdatabase; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.logging.Level; +import javax.swing.JFileChooser; +import javax.swing.JOptionPane; +import javax.swing.filechooser.FileNameExtensionFilter; +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.parsers.ParserConfigurationException; +import org.sleuthkit.autopsy.coreutils.Logger; +import org.sleuthkit.autopsy.coreutils.PlatformUtil; +import org.sleuthkit.autopsy.coreutils.XMLUtil; +import org.sleuthkit.autopsy.hashdatabase.HashDb.DBType; +import org.sleuthkit.datamodel.SleuthkitJNI; +import org.sleuthkit.datamodel.TskCoreException; +import org.w3c.dom.Document; +import org.w3c.dom.Element; +import org.w3c.dom.NodeList; + +public class HashDbXML { + private static final String ROOT_EL = "hash_sets"; + private static final String SET_EL = "hash_set"; + private static final String SET_NAME_ATTR = "name"; + private static final String SET_TYPE_ATTR = "type"; + private static final String SET_USE_FOR_INGEST_ATTR = "use_for_ingest"; + private static final String SET_SHOW_INBOX_MESSAGES = "show_inbox_messages"; + private static final String PATH_EL = "hash_set_path"; + private static final String PATH_NUMBER_ATTR = "number"; + private static final String CUR_HASHSETS_FILE_NAME = "hashsets.xml"; + private static final String XSDFILE = "HashsetsSchema.xsd"; + private static final String ENCODING = "UTF-8"; + private static final String CUR_HASHSET_FILE = PlatformUtil.getUserConfigDirectory() + File.separator + CUR_HASHSETS_FILE_NAME; + private static final String SET_CALC = "hash_calculate"; + private static final String SET_VALUE = "value"; + private static final Logger logger = Logger.getLogger(HashDbXML.class.getName()); + private static HashDbXML currentInstance; + + private List<HashDb> knownBadSets; + private HashDb nsrlSet; + private String xmlFile; + private boolean calculate; + + private HashDbXML(String xmlFile) { + knownBadSets = new ArrayList<HashDb>(); + this.xmlFile = xmlFile; + } + + /** + * get instance for managing the current keyword list of the application + */ + static synchronized HashDbXML getCurrent() { + if (currentInstance == null) { + currentInstance = new HashDbXML(CUR_HASHSET_FILE); + currentInstance.reload(); + } + return currentInstance; + } + + /** + * Get the hash sets + */ + public List<HashDb> getAllSets() { + List<HashDb> ret = new ArrayList<HashDb>(); + if(nsrlSet != null) { + ret.add(nsrlSet); + } + ret.addAll(knownBadSets); + return ret; + } + + /** + * Get the Known Bad sets + */ + public List<HashDb> getKnownBadSets() { + return knownBadSets; + } + + /** + * Get the NSRL set + */ + public HashDb getNSRLSet() { + return nsrlSet; + } + + /** + * Add a known bad hash set + */ + public void addKnownBadSet(HashDb set) { + knownBadSets.add(set); + //save(); + } + + /** + * Add a known bad hash set + */ + public void addKnownBadSet(int index, HashDb set) { + knownBadSets.add(index, set); + //save(); + } + + /** + * Set the NSRL hash set (override old set) + */ + public void setNSRLSet(HashDb set) { + this.nsrlSet = set; + //save(); + } + + /** + * Remove a hash known bad set + */ + public void removeKnownBadSetAt(int index) { + knownBadSets.remove(index); + //save(); + } + + /** + * Remove the NSRL database + */ + public void removeNSRLSet() { + this.nsrlSet = null; + //save(); + } + + /** + * load the file or create new + */ + public void reload() { + boolean created = false; + + //TODO clearing the list causes a bug: we lose track of the state + //whether db is being indexed, we should somehow preserve the state when loading new HashDb objects + + knownBadSets.clear(); + nsrlSet = null; + + if (!this.setsFileExists()) { + //create new if it doesn't exist + save(); + created = true; + } + + //load, if fails to load create new; save regardless + load(); + if (!created) { + //create new if failed to load + save(); + } + } + + /** + * Sets the local variable calculate to the given boolean. + * @param set the state to make calculate + */ + public void setCalculate(boolean set) { + this.calculate = set; + //save(); + } + + /** + * Returns the value of the local boolean calculate. + * @return true if calculate is true, false otherwise + */ + public boolean getCalculate() { + return this.calculate; + } + + /** + * writes out current sets file replacing the last one + */ + public boolean save() { + boolean success = false; + + DocumentBuilderFactory dbfac = DocumentBuilderFactory.newInstance(); + + try { + DocumentBuilder docBuilder = dbfac.newDocumentBuilder(); + Document doc = docBuilder.newDocument(); + + Element rootEl = doc.createElement(ROOT_EL); + doc.appendChild(rootEl); + + for (HashDb set : knownBadSets) { + String useForIngest = Boolean.toString(set.getUseForIngest()); + String showInboxMessages = Boolean.toString(set.getShowInboxMessages()); + List<String> paths = set.getDatabasePaths(); + String type = DBType.KNOWN_BAD.toString(); + + Element setEl = doc.createElement(SET_EL); + setEl.setAttribute(SET_NAME_ATTR, set.getName()); + setEl.setAttribute(SET_TYPE_ATTR, type); + setEl.setAttribute(SET_USE_FOR_INGEST_ATTR, useForIngest); + setEl.setAttribute(SET_SHOW_INBOX_MESSAGES, showInboxMessages); + + for (int i = 0; i < paths.size(); i++) { + String path = paths.get(i); + Element pathEl = doc.createElement(PATH_EL); + pathEl.setAttribute(PATH_NUMBER_ATTR, Integer.toString(i)); + pathEl.setTextContent(path); + setEl.appendChild(pathEl); + } + rootEl.appendChild(setEl); + } + + if(nsrlSet != null) { + String useForIngest = Boolean.toString(nsrlSet.getUseForIngest()); + String showInboxMessages = Boolean.toString(nsrlSet.getShowInboxMessages()); + List<String> paths = nsrlSet.getDatabasePaths(); + String type = DBType.NSRL.toString(); + + Element setEl = doc.createElement(SET_EL); + setEl.setAttribute(SET_NAME_ATTR, nsrlSet.getName()); + setEl.setAttribute(SET_TYPE_ATTR, type); + setEl.setAttribute(SET_USE_FOR_INGEST_ATTR, useForIngest); + setEl.setAttribute(SET_SHOW_INBOX_MESSAGES, showInboxMessages); + + for (int i = 0; i < paths.size(); i++) { + String path = paths.get(i); + Element pathEl = doc.createElement(PATH_EL); + pathEl.setAttribute(PATH_NUMBER_ATTR, Integer.toString(i)); + pathEl.setTextContent(path); + setEl.appendChild(pathEl); + } + rootEl.appendChild(setEl); + } + + String calcValue = Boolean.toString(calculate); + Element setCalc = doc.createElement(SET_CALC); + setCalc.setAttribute(SET_VALUE, calcValue); + rootEl.appendChild(setCalc); + + success = XMLUtil.saveDoc(HashDbXML.class, xmlFile, ENCODING, doc); + } catch (ParserConfigurationException e) { + logger.log(Level.SEVERE, "Error saving hash sets: can't initialize parser.", e); + } + return success; + } + + /** + * load and parse XML, then dispose + */ + public boolean load() { + final Document doc = XMLUtil.loadDoc(HashDbXML.class, xmlFile, XSDFILE); + if (doc == null) { + return false; + } + + Element root = doc.getDocumentElement(); + if (root == null) { + logger.log(Level.SEVERE, "Error loading hash sets: invalid file format."); + return false; + } + NodeList setsNList = root.getElementsByTagName(SET_EL); + int numSets = setsNList.getLength(); + if(numSets==0) { + logger.log(Level.WARNING, "No element hash_set exists."); + } + for (int i = 0; i < numSets; ++i) { + Element setEl = (Element) setsNList.item(i); + final String name = setEl.getAttribute(SET_NAME_ATTR); + final String type = setEl.getAttribute(SET_TYPE_ATTR); + final String useForIngest = setEl.getAttribute(SET_USE_FOR_INGEST_ATTR); + final String showInboxMessages = setEl.getAttribute(SET_SHOW_INBOX_MESSAGES); + Boolean useForIngestBool = Boolean.parseBoolean(useForIngest); + Boolean showInboxMessagesBool = Boolean.parseBoolean(showInboxMessages); + List<String> paths = new ArrayList<String>(); + + // Parse all paths + NodeList pathsNList = setEl.getElementsByTagName(PATH_EL); + final int numPaths = pathsNList.getLength(); + for (int j = 0; j < numPaths; ++j) { + Element pathEl = (Element) pathsNList.item(j); + String number = pathEl.getAttribute(PATH_NUMBER_ATTR); + String path = pathEl.getTextContent(); + + // If either the database or it's index exist + File database = new File(path); + File index = new File(HashDb.toIndexPath(path)); + if(database.exists() || index.exists()) { + paths.add(path); + } else { + // Ask for new path + int ret = JOptionPane.showConfirmDialog(null, "Database " + name + " could not be found at location\n" + + path + "\n" + + " Would you like to search for the file?", "Missing Database", JOptionPane.YES_NO_OPTION); + if (ret == JOptionPane.YES_OPTION) { + String filePath = searchForFile(name); + if(filePath!=null) { + paths.add(filePath); + } + } + } + } + + // Check everything was properly set + if(name.isEmpty()) { + logger.log(Level.WARNING, "Name was not set for hash_set at index {0}.", i); + } + if(type.isEmpty()) { + logger.log(Level.SEVERE, "Type was not set for hash_set at index {0}, cannot make instance of HashDb class.", i); + return false; // exit because this causes a fatal error + } + if(useForIngest.isEmpty()) { + logger.log(Level.WARNING, "UseForIngest was not set for hash_set at index {0}.", i); + } + if(showInboxMessages.isEmpty()) { + logger.log(Level.WARNING, "ShowInboxMessages was not set for hash_set at index {0}.", i); + } + + if(paths.isEmpty()) { + logger.log(Level.WARNING, "No paths were set for hash_set at index {0}. Removing the database.", i); + } else { + // No paths for this entry, the user most likely declined to search for them + DBType typeDBType = DBType.valueOf(type); + HashDb set = new HashDb(name, paths, useForIngestBool, showInboxMessagesBool, typeDBType); + + if(typeDBType == DBType.KNOWN_BAD) { + knownBadSets.add(set); + } else if(typeDBType == DBType.NSRL) { + this.nsrlSet = set; + } + } + } + + NodeList calcList = root.getElementsByTagName(SET_CALC); + int numCalc = calcList.getLength(); // Shouldn't be more than 1 + if(numCalc==0) { + logger.log(Level.WARNING, "No element hash_calculate exists."); + } + for(int i=0; i<numCalc; i++) { + Element calcEl = (Element) calcList.item(i); + final String value = calcEl.getAttribute(SET_VALUE); + calculate = Boolean.parseBoolean(value); + } + return true; + } + + /** + * Ask the user to browse to a new Hash Database file with the same database + * name as the one provided. If the names do not match, the database cannot + * be added. If the user cancels the search, return null, meaning the user + * would like to remove the entry for the missing database. + * + * @param name the name of the database to add + * @return the file path to the new database, or null if the user wants to + * delete the old database + */ + private String searchForFile(String name) { + // Initialize the file chooser and only allow hash databases to be opened + JFileChooser fc = new JFileChooser(); + fc.setDragEnabled(false); + fc.setFileSelectionMode(JFileChooser.FILES_ONLY); + String[] EXTENSION = new String[] { "txt", "idx", "hash", "Hash" }; + FileNameExtensionFilter filter = new FileNameExtensionFilter( + "Hash Database File", EXTENSION); + fc.setFileFilter(filter); + fc.setMultiSelectionEnabled(false); + + int retval = fc.showOpenDialog(null); + // If the user selects an appropriate file + if (retval == JFileChooser.APPROVE_OPTION) { + File f = fc.getSelectedFile(); + try { + String filePath = f.getCanonicalPath(); + if (HashDb.isIndexPath(filePath)) { + filePath = HashDb.toDatabasePath(filePath); + } + String derivedName = SleuthkitJNI.getDatabaseName(filePath); + // If the database has the same name as before, return it + if(derivedName.equals(name)) { + return filePath; + } else { + int tryAgain = JOptionPane.showConfirmDialog(null, "Database file cannot be added because it does not have the same name as the original.\n" + + "Would you like to try a different database?", "Invalid File", JOptionPane.YES_NO_OPTION); + if (tryAgain == JOptionPane.YES_OPTION) { + return searchForFile(name); + } else { + return null; + } + } + } catch (IOException ex) { + logger.log(Level.WARNING, "Couldn't get selected file path.", ex); + } catch (TskCoreException ex) { + int tryAgain = JOptionPane.showConfirmDialog(null, "Database file you chose cannot be opened.\n" + "If it was just an index, please try to recreate it from the database.\n" + + "Would you like to choose another database?", "Invalid File", JOptionPane.YES_NO_OPTION); + if (tryAgain == JOptionPane.YES_OPTION) { + return searchForFile(name); + } else { + return null; + } + } + } + // Otherwise the user cancelled, so delete the missing entry + return null; + } + + private boolean setsFileExists() { + File f = new File(xmlFile); + return f.exists() && f.canRead() && f.canWrite(); + } + + +} diff --git a/KeywordSearch/manifest.mf b/KeywordSearch/manifest.mf index dd9e48a200c93afddfcdc3347cd173893b1bfede..e3096520251b08753c5e1778e2b60efcedb5c0be 100644 --- a/KeywordSearch/manifest.mf +++ b/KeywordSearch/manifest.mf @@ -1,9 +1,9 @@ -Manifest-Version: 1.0 -AutoUpdate-Show-In-Client: true -OpenIDE-Module: org.sleuthkit.autopsy.keywordsearch/5 -OpenIDE-Module-Implementation-Version: 9 -OpenIDE-Module-Install: org/sleuthkit/autopsy/keywordsearch/Installer.class -OpenIDE-Module-Layer: org/sleuthkit/autopsy/keywordsearch/layer.xml -OpenIDE-Module-Localizing-Bundle: org/sleuthkit/autopsy/keywordsearch/Bundle.properties -OpenIDE-Module-Requires: org.openide.windows.WindowManager - +Manifest-Version: 1.0 +AutoUpdate-Show-In-Client: true +OpenIDE-Module: org.sleuthkit.autopsy.keywordsearch/5 +OpenIDE-Module-Implementation-Version: 9 +OpenIDE-Module-Install: org/sleuthkit/autopsy/keywordsearch/Installer.class +OpenIDE-Module-Layer: org/sleuthkit/autopsy/keywordsearch/layer.xml +OpenIDE-Module-Localizing-Bundle: org/sleuthkit/autopsy/keywordsearch/Bundle.properties +OpenIDE-Module-Requires: org.openide.windows.WindowManager + diff --git a/KeywordSearch/nbproject/project.properties b/KeywordSearch/nbproject/project.properties index 140caac79cb84fcceb74da9685265b40bc26f5b4..4f3228693fea5989f8a4f47d9810719c11eb5746 100644 --- a/KeywordSearch/nbproject/project.properties +++ b/KeywordSearch/nbproject/project.properties @@ -1,6 +1,6 @@ -javac.source=1.7 -javac.compilerargs=-Xlint -Xlint:-serial -license.file=../LICENSE-2.0.txt -nbm.homepage=http://www.sleuthkit.org/autopsy/ -nbm.needs.restart=true -spec.version.base=3.2 +javac.source=1.7 +javac.compilerargs=-Xlint -Xlint:-serial +license.file=../LICENSE-2.0.txt +nbm.homepage=http://www.sleuthkit.org/autopsy/ +nbm.needs.restart=true +spec.version.base=3.2 diff --git a/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Bundle.properties b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Bundle.properties index 65dbef957cf2aaecf5471764e771da5675050879..2d260d1242ae993c9343bf88f32dccc522ddf33f 100644 --- a/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Bundle.properties +++ b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/Bundle.properties @@ -1,91 +1,91 @@ -OpenIDE-Module-Display-Category=Ingest Module -OpenIDE-Module-Long-Description=\ - Keyword Search ingest module.\n\n\ - The module indexes files found in the disk image at ingest time. \ - It then periodically runs the search on the indexed files using one or more keyword lists (containing pure words and/or regular expressions) and posts results.\n\n\ - The module also contains additional tools integrated in the main GUI, such as keyword list configuration, keyword seach bar in the top-right corner, extracted text viewer and search results viewer showing highlighted keywords found. -OpenIDE-Module-Name=KeywordSearch -ListBundleName=Keyword Lists -ListBundleConfig=Keyword List Configuration -IndexProgressPanel.statusText.text=Status text -IndexProgressPanel.cancelButton.text=Cancel -ExtractedContentPanel.hitLabel.text=Matches on page: -ExtractedContentPanel.hitCountLabel.text=- -ExtractedContentPanel.hitOfLabel.text=of -ExtractedContentPanel.hitTotalLabel.text=- -ExtractedContentPanel.hitButtonsLabel.text=Match -ExtractedContentPanel.hitPreviousButton.text= -ExtractedContentPanel.hitNextButton.text= -ExtractedContentPanel.copyMenuItem.text=Copy -ExtractedContentPanel.selectAllMenuItem.text=Select All -KeywordSearchEditListPanel.saveListButton.text=Copy List -KeywordSearchEditListPanel.addWordField.text= -KeywordSearchEditListPanel.addWordButton.text=Add -KeywordSearchEditListPanel.chRegex.text=Regular Expression -KeywordSearchEditListPanel.deleteWordButton.text=Remove Selected -KeywordSearchEditListPanel.cutMenuItem.text=Cut -KeywordSearchEditListPanel.selectAllMenuItem.text=Select All -KeywordSearchEditListPanel.pasteMenuItem.text=Paste -KeywordSearchEditListPanel.copyMenuItem.text=Copy -KeywordSearchEditListPanel.exportButton.text=Export List -KeywordSearchEditListPanel.deleteListButton.text=Delete List -KeywordSearchListsManagementPanel.newListButton.text=New List -KeywordSearchEditListPanel.useForIngestCheckbox.text=Enable for ingest -KeywordSearchListsManagementPanel.importButton.text=Import List -KeywordSearchPanel.searchBox.text=Search... -KeywordSearchPanel.regExCheckboxMenuItem.text=Use Regular Expressions -KeywordSearchPanel.settingsLabel.text= -KeywordSearchListsViewerPanel.searchAddButton.text=Search -KeywordSearchListsViewerPanel.manageListsButton.text=Manage Lists -KeywordSearchListsViewerPanel.ingestIndexLabel.text=Files Indexed: -KeywordSearchEditListPanel.selectorsCombo.toolTipText=Regular Expression selector type (optional) -KeywordSearchPanel.searchButton.text= -KeywordSearchPanel.cutMenuItem.text=Cut -KeywordSearchPanel.copyMenuItem.text=Copy -KeywordSearchPanel.pasteMenuItem.text=Paste -KeywordSearchPanel.selectAllMenuItem.text=Select All -ExtractedContentPanel.pageButtonsLabel.text=Page -ExtractedContentPanel.pageNextButton.text= -ExtractedContentPanel.pagePreviousButton.actionCommand=pagePreviousButton -ExtractedContentPanel.pagePreviousButton.text= -ExtractedContentPanel.pagesLabel.text=Page: -ExtractedContentPanel.pageOfLabel.text=of -ExtractedContentPanel.pageCurLabel.text=- -ExtractedContentPanel.pageTotalLabel.text=- -ExtractedContentPanel.hitLabel.toolTipText= -KeywordSearchEditListPanel.ingestMessagesCheckbox.text=Enable sending messages to inbox during ingest -KeywordSearchEditListPanel.ingestMessagesCheckbox.toolTipText=Send messages during triage / ingest when hits on keyword from this list occur -KeywordSearchConfigurationPanel2.skipNSRLCheckBox.text=Do not add files in NSRL (known files) to keyword index during ingest -KeywordSearchConfigurationPanel2.skipNSRLCheckBox.toolTipText=Requires Hash DB service to had run previously, or be selected for next ingest. -KeywordSearchConfigurationPanel2.filesIndexedValue.text=- -KeywordSearchConfigurationPanel2.filesIndexedLabel.text=Files in keyword index: -KeywordSearchIngestSimplePanel.languagesLabel.text=Scripts enabled for string extraction from unknown file types: -KeywordSearchIngestSimplePanel.languagesValLabel.text=- -KeywordSearchIngestSimplePanel.languagesLabel.toolTipText=Scripts enabled for string extraction from unknown file types. Changes can be done in Advanced Settings. -KeywordSearchIngestSimplePanel.languagesValLabel.toolTipText= -KeywordSearchConfigurationPanel3.languagesLabel.text=Enabled scripts (languages): -KeywordSearchConfigurationPanel2.chunksLabel.text=Chunks in keyword index: -KeywordSearchConfigurationPanel2.chunksValLabel.text=- -KeywordSearchConfigurationPanel3.enableUTF8Checkbox.text=Enable UTF8 text extraction -KeywordSearchConfigurationPanel3.enableUTF16Checkbox.text=Enable UTF16LE and UTF16BE string extraction -KeywordSearchEditListPanel.keywordOptionsLabel.text=Keyword Options -KeywordSearchEditListPanel.listOptionsLabel.text=List Options -KeywordSearchConfigurationPanel3.ingestSettingsLabel.text=Ingest settings for string extraction from unknown file types (changes effective on next ingest): -KeywordSearchConfigurationPanel2.settingsLabel.text=Settings -KeywordSearchConfigurationPanel2.informationLabel.text=Information -KeywordSearchListsManagementPanel.keywordListsLabel.text=Keyword Lists: -KeywordSearchEditListPanel.keywordsLabel.text=Keywords: -KeywordSearchConfigurationPanel2.timeRadioButton1.toolTipText=20 mins. (fastest ingest time) -KeywordSearchConfigurationPanel2.timeRadioButton1.text=20 minutes (slowest feedback, fastest ingest) -KeywordSearchConfigurationPanel2.timeRadioButton2.toolTipText=10 minutes (faster overall ingest time than default) -KeywordSearchConfigurationPanel2.timeRadioButton2.text=10 minutes (slower feedback, faster ingest) -KeywordSearchConfigurationPanel2.timeRadioButton3.toolTipText=5 minutes (overall ingest time will be longer) -KeywordSearchConfigurationPanel2.timeRadioButton3.text=5 minutes (default) -KeywordSearchIngestSimplePanel.encodingsLabel.text=Encodings: -KeywordSearchIngestSimplePanel.keywordSearchEncodings.text=- -KeywordSearchIngestSimplePanel.titleLabel.text=Select keyword lists to enable during ingest: -OpenIDE-Module-Short-Description=Keyword Search ingest module, extracted text viewer and keyword search tools -KeywordSearchListsViewerPanel.manageListsButton.toolTipText=Manage keyword lists, their settings and associated keywords. The settings are shared among all cases. -KeywordSearchConfigurationPanel2.frequencyLabel.text=Results update frequency during ingest: -KeywordSearchConfigurationPanel2.timeRadioButton4.text_1=1 minute (faster feedback, longest ingest) -KeywordSearchConfigurationPanel2.timeRadioButton4.toolTipText=1 minute (overall ingest time will be longest) +OpenIDE-Module-Display-Category=Ingest Module +OpenIDE-Module-Long-Description=\ + Keyword Search ingest module.\n\n\ + The module indexes files found in the disk image at ingest time. \ + It then periodically runs the search on the indexed files using one or more keyword lists (containing pure words and/or regular expressions) and posts results.\n\n\ + The module also contains additional tools integrated in the main GUI, such as keyword list configuration, keyword seach bar in the top-right corner, extracted text viewer and search results viewer showing highlighted keywords found. +OpenIDE-Module-Name=KeywordSearch +ListBundleName=Keyword Lists +ListBundleConfig=Keyword List Configuration +IndexProgressPanel.statusText.text=Status text +IndexProgressPanel.cancelButton.text=Cancel +ExtractedContentPanel.hitLabel.text=Matches on page: +ExtractedContentPanel.hitCountLabel.text=- +ExtractedContentPanel.hitOfLabel.text=of +ExtractedContentPanel.hitTotalLabel.text=- +ExtractedContentPanel.hitButtonsLabel.text=Match +ExtractedContentPanel.hitPreviousButton.text= +ExtractedContentPanel.hitNextButton.text= +ExtractedContentPanel.copyMenuItem.text=Copy +ExtractedContentPanel.selectAllMenuItem.text=Select All +KeywordSearchEditListPanel.saveListButton.text=Copy List +KeywordSearchEditListPanel.addWordField.text= +KeywordSearchEditListPanel.addWordButton.text=Add +KeywordSearchEditListPanel.chRegex.text=Regular Expression +KeywordSearchEditListPanel.deleteWordButton.text=Remove Selected +KeywordSearchEditListPanel.cutMenuItem.text=Cut +KeywordSearchEditListPanel.selectAllMenuItem.text=Select All +KeywordSearchEditListPanel.pasteMenuItem.text=Paste +KeywordSearchEditListPanel.copyMenuItem.text=Copy +KeywordSearchEditListPanel.exportButton.text=Export List +KeywordSearchEditListPanel.deleteListButton.text=Delete List +KeywordSearchListsManagementPanel.newListButton.text=New List +KeywordSearchEditListPanel.useForIngestCheckbox.text=Enable for ingest +KeywordSearchListsManagementPanel.importButton.text=Import List +KeywordSearchPanel.searchBox.text=Search... +KeywordSearchPanel.regExCheckboxMenuItem.text=Use Regular Expressions +KeywordSearchPanel.settingsLabel.text= +KeywordSearchListsViewerPanel.searchAddButton.text=Search +KeywordSearchListsViewerPanel.manageListsButton.text=Manage Lists +KeywordSearchListsViewerPanel.ingestIndexLabel.text=Files Indexed: +KeywordSearchEditListPanel.selectorsCombo.toolTipText=Regular Expression selector type (optional) +KeywordSearchPanel.searchButton.text= +KeywordSearchPanel.cutMenuItem.text=Cut +KeywordSearchPanel.copyMenuItem.text=Copy +KeywordSearchPanel.pasteMenuItem.text=Paste +KeywordSearchPanel.selectAllMenuItem.text=Select All +ExtractedContentPanel.pageButtonsLabel.text=Page +ExtractedContentPanel.pageNextButton.text= +ExtractedContentPanel.pagePreviousButton.actionCommand=pagePreviousButton +ExtractedContentPanel.pagePreviousButton.text= +ExtractedContentPanel.pagesLabel.text=Page: +ExtractedContentPanel.pageOfLabel.text=of +ExtractedContentPanel.pageCurLabel.text=- +ExtractedContentPanel.pageTotalLabel.text=- +ExtractedContentPanel.hitLabel.toolTipText= +KeywordSearchEditListPanel.ingestMessagesCheckbox.text=Enable sending messages to inbox during ingest +KeywordSearchEditListPanel.ingestMessagesCheckbox.toolTipText=Send messages during triage / ingest when hits on keyword from this list occur +KeywordSearchConfigurationPanel2.skipNSRLCheckBox.text=Do not add files in NSRL (known files) to keyword index during ingest +KeywordSearchConfigurationPanel2.skipNSRLCheckBox.toolTipText=Requires Hash DB service to had run previously, or be selected for next ingest. +KeywordSearchConfigurationPanel2.filesIndexedValue.text=- +KeywordSearchConfigurationPanel2.filesIndexedLabel.text=Files in keyword index: +KeywordSearchIngestSimplePanel.languagesLabel.text=Scripts enabled for string extraction from unknown file types: +KeywordSearchIngestSimplePanel.languagesValLabel.text=- +KeywordSearchIngestSimplePanel.languagesLabel.toolTipText=Scripts enabled for string extraction from unknown file types. Changes can be done in Advanced Settings. +KeywordSearchIngestSimplePanel.languagesValLabel.toolTipText= +KeywordSearchConfigurationPanel3.languagesLabel.text=Enabled scripts (languages): +KeywordSearchConfigurationPanel2.chunksLabel.text=Chunks in keyword index: +KeywordSearchConfigurationPanel2.chunksValLabel.text=- +KeywordSearchConfigurationPanel3.enableUTF8Checkbox.text=Enable UTF8 text extraction +KeywordSearchConfigurationPanel3.enableUTF16Checkbox.text=Enable UTF16LE and UTF16BE string extraction +KeywordSearchEditListPanel.keywordOptionsLabel.text=Keyword Options +KeywordSearchEditListPanel.listOptionsLabel.text=List Options +KeywordSearchConfigurationPanel3.ingestSettingsLabel.text=Ingest settings for string extraction from unknown file types (changes effective on next ingest): +KeywordSearchConfigurationPanel2.settingsLabel.text=Settings +KeywordSearchConfigurationPanel2.informationLabel.text=Information +KeywordSearchListsManagementPanel.keywordListsLabel.text=Keyword Lists: +KeywordSearchEditListPanel.keywordsLabel.text=Keywords: +KeywordSearchConfigurationPanel2.timeRadioButton1.toolTipText=20 mins. (fastest ingest time) +KeywordSearchConfigurationPanel2.timeRadioButton1.text=20 minutes (slowest feedback, fastest ingest) +KeywordSearchConfigurationPanel2.timeRadioButton2.toolTipText=10 minutes (faster overall ingest time than default) +KeywordSearchConfigurationPanel2.timeRadioButton2.text=10 minutes (slower feedback, faster ingest) +KeywordSearchConfigurationPanel2.timeRadioButton3.toolTipText=5 minutes (overall ingest time will be longer) +KeywordSearchConfigurationPanel2.timeRadioButton3.text=5 minutes (default) +KeywordSearchIngestSimplePanel.encodingsLabel.text=Encodings: +KeywordSearchIngestSimplePanel.keywordSearchEncodings.text=- +KeywordSearchIngestSimplePanel.titleLabel.text=Select keyword lists to enable during ingest: +OpenIDE-Module-Short-Description=Keyword Search ingest module, extracted text viewer and keyword search tools +KeywordSearchListsViewerPanel.manageListsButton.toolTipText=Manage keyword lists, their settings and associated keywords. The settings are shared among all cases. +KeywordSearchConfigurationPanel2.frequencyLabel.text=Results update frequency during ingest: +KeywordSearchConfigurationPanel2.timeRadioButton4.text_1=1 minute (faster feedback, longest ingest) +KeywordSearchConfigurationPanel2.timeRadioButton4.toolTipText=1 minute (overall ingest time will be longest) diff --git a/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/docs/keywordsearch-about.html b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/docs/keywordsearch-about.html index ec2a45f0566a6b8f1d26bc814e82ce1c99f90d8c..b4290c98deadaeccff128eee794b2c19f5d9fb3d 100644 --- a/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/docs/keywordsearch-about.html +++ b/KeywordSearch/src/org/sleuthkit/autopsy/keywordsearch/docs/keywordsearch-about.html @@ -1,81 +1,81 @@ -<!-- -Autopsy Forensic Browser - -Copyright 2011 Basis Technology Corp. -Contact: carrier <at> sleuthkit <dot> org - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ---> -<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"> -<html> - <head> - <title>Keyword Search</title> - <link rel="stylesheet" href="nbdocs:/org/sleuthkit/autopsy/core/docs/ide.css" type="text/css"> - <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> - </head> - <body> - <h2>Keyword Search</h2> - <p> - Autopsy ships a keyword search module, which provides the <a href="nbdocs:/org/sleuthkit/autopsy/ingest/docs/ingest-about.html">ingest capability</a> - and also supports a manual text search mode. - </p> - <p>The keyword search ingest module extracts text from the files on the image being ingested and adds them to the index that can then be searched.</p> - <p> - Autopsy tries its best to extract maximum amount of text from the files being indexed. - First, the indexing will try to extract text from supported file formats, such as pure text file format, MS Office Documents, PDF files, Email files, and many others. - If the file is not supported by the standard text extractor, Autopsy will fallback to string extraction algorithm. - String extraction on unknown file formats or arbitrary binary files can often still extract a good amount of text from the file, often good enough to provide additional clues. - However, string extraction will not be able to extract text strings from binary files that have been encrypted. - </p> - <p> - Autopsy ships with some built-in lists that define regular expressions and enable user to search for Phone Numbers, IP addresses, URLs and E-mail addresses. - However, enabling some of these very general lists can produce a very large number of hits, many of them can be false-positives. - </p> - <p> - Once files are in the index, they can be searched quickly for specific keywords, regular expressions, - or using keyword search lists that can contain a mixture of keywords and regular expressions. - Search queries can be executed automatically by the ingest during the ingest run, or at the end of the ingest, depending on the current settings and the time it takes to ingest the image. - </p> - <p>Search queries can also be executed manually by the user at any time, as long as there are some files already indexed and ready to be searched.</p> - <p> - Keyword search module will save the search results regardless whether the search is performed by the ingest process, or manually by the user. - The saved results are available in the Directory Tree in the left hand side panel. - </p> - <p> - To see keyword search results in real-time while ingest is running, add keyword lists using the - <a href="nbdocs:/org/sleuthkit/autopsy/keywordsearch/docs/keywordsearch-configuration.html">Keyword Search Configuration Dialog</a> - and select the "Use during ingest" check box. - You can select "Enable sending messages to inbox during ingest" per list, if the hits on that list should be reported in the Inbox, which is recommended for very specific searches. - </p> - <p> - See <a href="nbdocs:/org/sleuthkit/autopsy/ingest/docs/ingest-about.html">(Ingest)</a> - for more information on ingest in general. - </p> - <p> - Once there are files in the index, the <a href="nbdocs:/org/sleuthkit/autopsy/keywordsearch/docs/keywordsearch-bar.html">Keyword Search Bar</a> - will be available for use to manually search at any time. - </p> - </body> -</html> -<!-- - Tip: to create a link which will open in an external web browser, try: - <object classid="java:org.netbeans.modules.javahelp.BrowserDisplayer"> - <param name="content" value="http://www.netbeans.org/"> - <param name="text" value="<html><u>http://www.netbeans.org/</u></html>"> - <param name="textFontSize" value="medium"> - <param name="textColor" value="blue"> - </object> - To create a link to a help set from another module, you need to know the code name base and path, e.g.: - <a href="nbdocs://org.netbeans.modules.usersguide/org/netbeans/modules/usersguide/configure/configure_options.html">Using the Options Window</a> - (This link will behave sanely if that module is disabled or missing.) ---> +<!-- +Autopsy Forensic Browser + +Copyright 2011 Basis Technology Corp. +Contact: carrier <at> sleuthkit <dot> org + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +--> +<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"> +<html> + <head> + <title>Keyword Search</title> + <link rel="stylesheet" href="nbdocs:/org/sleuthkit/autopsy/core/docs/ide.css" type="text/css"> + <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> + </head> + <body> + <h2>Keyword Search</h2> + <p> + Autopsy ships a keyword search module, which provides the <a href="nbdocs:/org/sleuthkit/autopsy/ingest/docs/ingest-about.html">ingest capability</a> + and also supports a manual text search mode. + </p> + <p>The keyword search ingest module extracts text from the files on the image being ingested and adds them to the index that can then be searched.</p> + <p> + Autopsy tries its best to extract maximum amount of text from the files being indexed. + First, the indexing will try to extract text from supported file formats, such as pure text file format, MS Office Documents, PDF files, Email files, and many others. + If the file is not supported by the standard text extractor, Autopsy will fallback to string extraction algorithm. + String extraction on unknown file formats or arbitrary binary files can often still extract a good amount of text from the file, often good enough to provide additional clues. + However, string extraction will not be able to extract text strings from binary files that have been encrypted. + </p> + <p> + Autopsy ships with some built-in lists that define regular expressions and enable user to search for Phone Numbers, IP addresses, URLs and E-mail addresses. + However, enabling some of these very general lists can produce a very large number of hits, many of them can be false-positives. + </p> + <p> + Once files are in the index, they can be searched quickly for specific keywords, regular expressions, + or using keyword search lists that can contain a mixture of keywords and regular expressions. + Search queries can be executed automatically by the ingest during the ingest run, or at the end of the ingest, depending on the current settings and the time it takes to ingest the image. + </p> + <p>Search queries can also be executed manually by the user at any time, as long as there are some files already indexed and ready to be searched.</p> + <p> + Keyword search module will save the search results regardless whether the search is performed by the ingest process, or manually by the user. + The saved results are available in the Directory Tree in the left hand side panel. + </p> + <p> + To see keyword search results in real-time while ingest is running, add keyword lists using the + <a href="nbdocs:/org/sleuthkit/autopsy/keywordsearch/docs/keywordsearch-configuration.html">Keyword Search Configuration Dialog</a> + and select the "Use during ingest" check box. + You can select "Enable sending messages to inbox during ingest" per list, if the hits on that list should be reported in the Inbox, which is recommended for very specific searches. + </p> + <p> + See <a href="nbdocs:/org/sleuthkit/autopsy/ingest/docs/ingest-about.html">(Ingest)</a> + for more information on ingest in general. + </p> + <p> + Once there are files in the index, the <a href="nbdocs:/org/sleuthkit/autopsy/keywordsearch/docs/keywordsearch-bar.html">Keyword Search Bar</a> + will be available for use to manually search at any time. + </p> + </body> +</html> +<!-- + Tip: to create a link which will open in an external web browser, try: + <object classid="java:org.netbeans.modules.javahelp.BrowserDisplayer"> + <param name="content" value="http://www.netbeans.org/"> + <param name="text" value="<html><u>http://www.netbeans.org/</u></html>"> + <param name="textFontSize" value="medium"> + <param name="textColor" value="blue"> + </object> + To create a link to a help set from another module, you need to know the code name base and path, e.g.: + <a href="nbdocs://org.netbeans.modules.usersguide/org/netbeans/modules/usersguide/configure/configure_options.html">Using the Options Window</a> + (This link will behave sanely if that module is disabled or missing.) +--> diff --git a/RecentActivity/manifest.mf b/RecentActivity/manifest.mf index 14b58804befdbd5c99206dc3af16ef9dd87225c9..b6f0a41ec0e1cb14d7393f65f29fc0b906a1741f 100644 --- a/RecentActivity/manifest.mf +++ b/RecentActivity/manifest.mf @@ -1,10 +1,10 @@ -Manifest-Version: 1.0 -OpenIDE-Module: org.sleuthkit.autopsy.recentactivity/5 -OpenIDE-Module-Implementation-Version: 9 -OpenIDE-Module-Layer: org/sleuthkit/autopsy/recentactivity/layer.xml -OpenIDE-Module-Localizing-Bundle: org/sleuthkit/autopsy/recentactivity/Bundle.properties -OpenIDE-Module-Requires: - org.openide.modules.InstalledFileLocator, - org.openide.windows.TopComponent$Registry, - org.openide.windows.WindowManager - +Manifest-Version: 1.0 +OpenIDE-Module: org.sleuthkit.autopsy.recentactivity/5 +OpenIDE-Module-Implementation-Version: 9 +OpenIDE-Module-Layer: org/sleuthkit/autopsy/recentactivity/layer.xml +OpenIDE-Module-Localizing-Bundle: org/sleuthkit/autopsy/recentactivity/Bundle.properties +OpenIDE-Module-Requires: + org.openide.modules.InstalledFileLocator, + org.openide.windows.TopComponent$Registry, + org.openide.windows.WindowManager + diff --git a/RecentActivity/nbproject/project.properties b/RecentActivity/nbproject/project.properties index 4ce77193f79dd134164547d3f1ac621b0dbf898f..2cb871f4159ba1dba9029aed4bcf12d96c62396c 100644 --- a/RecentActivity/nbproject/project.properties +++ b/RecentActivity/nbproject/project.properties @@ -1,7 +1,7 @@ -file.reference.gson-2.1.jar=release/modules/ext/gson-2.1.jar -javac.source=1.7 -javac.compilerargs=-Xlint -Xlint:-serial -license.file=../LICENSE-2.0.txt -nbm.homepage=http://www.sleuthkit.org/autopsy/ -nbm.needs.restart=true -spec.version.base=3.0 +file.reference.gson-2.1.jar=release/modules/ext/gson-2.1.jar +javac.source=1.7 +javac.compilerargs=-Xlint -Xlint:-serial +license.file=../LICENSE-2.0.txt +nbm.homepage=http://www.sleuthkit.org/autopsy/ +nbm.needs.restart=true +spec.version.base=3.0 diff --git a/SevenZip/manifest.mf b/SevenZip/manifest.mf index 9989549bec4dbd8fb72e58e0ecdb17d0b8df28fb..ca53e48be81ffe55329ad90336a966ba6953726d 100644 --- a/SevenZip/manifest.mf +++ b/SevenZip/manifest.mf @@ -1,6 +1,6 @@ -Manifest-Version: 1.0 -OpenIDE-Module: org.sleuthkit.autopsy.sevenzip/1 -OpenIDE-Module-Implementation-Version: 3 -OpenIDE-Module-Localizing-Bundle: org/sleuthkit/autopsy/sevenzip/Bundle.properties - - +Manifest-Version: 1.0 +OpenIDE-Module: org.sleuthkit.autopsy.sevenzip/1 +OpenIDE-Module-Implementation-Version: 3 +OpenIDE-Module-Localizing-Bundle: org/sleuthkit/autopsy/sevenzip/Bundle.properties + + diff --git a/Testing/manifest.mf b/Testing/manifest.mf index 53c457afbb1caa3ba5fb890c5ffb99ed99250e70..381f4bb1334cde1573c230656c4c03a72c1755b8 100644 --- a/Testing/manifest.mf +++ b/Testing/manifest.mf @@ -1,6 +1,6 @@ -Manifest-Version: 1.0 -AutoUpdate-Show-In-Client: false -OpenIDE-Module: org.sleuthkit.autopsy.testing/3 -OpenIDE-Module-Implementation-Version: 7 -OpenIDE-Module-Localizing-Bundle: org/sleuthkit/autopsy/testing/Bundle.properties - +Manifest-Version: 1.0 +AutoUpdate-Show-In-Client: false +OpenIDE-Module: org.sleuthkit.autopsy.testing/3 +OpenIDE-Module-Implementation-Version: 7 +OpenIDE-Module-Localizing-Bundle: org/sleuthkit/autopsy/testing/Bundle.properties + diff --git a/Testing/src/org/sleuthkit/autopsy/testing/Bundle.properties b/Testing/src/org/sleuthkit/autopsy/testing/Bundle.properties index 023a96d3804ef8fdbae2e0915c34ed289ef36f48..125ec1c48586c91f2e0eae8edbaaa61607676fcc 100644 --- a/Testing/src/org/sleuthkit/autopsy/testing/Bundle.properties +++ b/Testing/src/org/sleuthkit/autopsy/testing/Bundle.properties @@ -1 +1 @@ -OpenIDE-Module-Name=Testing +OpenIDE-Module-Name=Testing diff --git a/Timeline/manifest.mf b/Timeline/manifest.mf index 32102423364965d9787072b5b973fa320178ee47..6cc867f901f7b0879530b7d25912e4a83c549b9f 100644 --- a/Timeline/manifest.mf +++ b/Timeline/manifest.mf @@ -1,7 +1,7 @@ -Manifest-Version: 1.0 -OpenIDE-Module: org.sleuthkit.autopsy.timeline/1 -OpenIDE-Module-Layer: org/sleuthkit/autopsy/timeline/layer.xml -OpenIDE-Module-Localizing-Bundle: org/sleuthkit/autopsy/timeline/Bundle.properties -OpenIDE-Module-Requires: org.openide.windows.WindowManager -OpenIDE-Module-Implementation-Version: 3 - +Manifest-Version: 1.0 +OpenIDE-Module: org.sleuthkit.autopsy.timeline/1 +OpenIDE-Module-Layer: org/sleuthkit/autopsy/timeline/layer.xml +OpenIDE-Module-Localizing-Bundle: org/sleuthkit/autopsy/timeline/Bundle.properties +OpenIDE-Module-Requires: org.openide.windows.WindowManager +OpenIDE-Module-Implementation-Version: 3 + diff --git a/docs/QuickStartGuide/index.html b/docs/QuickStartGuide/index.html index 7fe6d0867b8d6529a03431a14e7cfefec0fc7452..7bafa3b45209aad7d8ad59d134a455e17500cf28 100644 --- a/docs/QuickStartGuide/index.html +++ b/docs/QuickStartGuide/index.html @@ -1,221 +1,221 @@ -<html> - - <head> - <link rel="stylesheet" href="nbdocs:/org/sleuthkit/autopsy/core/docs/ide.css" type="text/css"> - <style> - h1 { font-size: 145%; color: #666666; } - h2 { font-size: 120%; color: #666666; } - </style> - <title>Autopsy 3 Quick Start Guide</title> - </head> - <body> - - <p align="center" style="font-size: 145%;"><strong>Autopsy 3 Quick Start Guide</strong></p> - <p align="center" style="font-size: 120%;">June 2013</p> - <p align="center"><a href="http://www.sleuthkit.org/autopsy/">www.sleuthkit.org/autopsy/</a></p> - - - <h1>Installation</h1> - <p> - The current version of Autopsy 3 runs only on Microsoft Windows. - We have gotten it to run on other platforms, such as Linux and OS X, but we do not have it in a state that makes it easy to distribute and find the needed libraries. - </p> - <p> - The Windows installer will make a directory for Autopsy and place all of the needed files inside of it. - The installer includes all dependencies, including Sleuth Kit and Java. - </p> - <p>Note that Autopsy 3 is a complete rewrite from Autopsy 2 and none of this document is relevant to Autopsy 2.</p> - - <h1>Adding a Data Source (image, local disk, logical files)</h1> - <p> - Data sources are added to a <strong>case</strong>. A case can have a single data source or it can have multiple data source if they are related. - Currently, a single report is generated for an entire case, so if you need to report on individual data sources, then you should use one data source per case. - </p> - - <h2>Creating a Case</h2> - <p> - To create a case, use either the "Create New Case" option on the Welcome screen or from the "File" menu. - This will start the <strong>New Case Wizard</strong>. You will need to supply it with the name of the case and a directory to store the case results into. - You can optionally provide case numbers and other details. - </p> - - - <h2>Adding a Data Source</h2> - <p> - The next step is to add input data source to the case. - The <strong>Add Data Source Wizard</strong> will start automatically after the case is created or you can manually start it from the "File" menu or toolbar. - You will need to choose the type of input data source to add (image, local disk or logical files and folders). - Next, supply it with the location of the source to add. - </p> - <ul> - <li>For a disk image, browse to the first file in the set (Autopsy will find the rest of the files). Autopsy currently supports E01 and raw (dd) files. - </li> - <li> - For local disk, select one of the detected disks. - Autopsy will add the current view of the disk to the case (i.e. snapshot of the meta-data). - However, the individual file content (not meta-data) does get updated with the changes made to the disk. - Note, you may need run Autopsy as an Administrator to detect all disks. - </li> - <li>For logical files (a single file or folder of files), use the "Add" button to add one or more files or folders on your system to the case. Folders will be recursively added to the case.</li> - </ul> - - - <p> - There are a couple of options in the wizard that will allow you to make the ingest process faster. - These typically deal with deleted files. - It will take longer if unallocated space is analyzed and the entire drive is searched for deleted files. - In some scenarios, these recovery steps must be performed and in other scenarios these steps are not needed and instead fast results on the allocated files are needed. - Use these options to control how long the analysis will take. - </p> - - <p> - Autopsy will start to analyze these data sources and add them to the case and internal database. While it is doing that, it will prompt you to configure the Ingest Modules. </p> - - - <h2>Ingest Modules</h2> - <p> - You will next be prompted to configure the Ingest Modules. - Ingest modules will run in the background and perform specific tasks. - The Ingest Modules analyze files in a prioritized order so that files in a user's directory are analyzed before files in other folders. - Ingest modules can be developed by third-parties and here are some of the standard ingest modules that come with Autopsy: - </p> - <ul> - <li><strong>Recent Activity</strong> - extracts user activity as saved by web browsers and the OS. Also runs regripper on the registry hive. - </li> - <li><strong>Hash Lookup</strong> - uses hash databases to ignore known files from the NIST NSRL and flag known bad files. - Use the "Advanced" button to add and configure the hash databases to use during this process. - You will get updates on known bad file hits as the ingest occurs. You can later add hash databases - via the Tools -> Options menu in the main UI. You can download an index of the NIST NSRL from - <a href="http://sourceforge.net/projects/autopsy/files/NSRL/">here</a>. - </li> - <li><strong>Keyword Search</strong> - uses keyword lists to identify files with specific words in them. - You can select the keyword lists to search for automatically and you can create new lists using the "Advanced" button. - Note that with keyword search, you can always conduct searches after ingest has finished. - The keyword lists that you select during ingest will be searched for at periodic intervals and you will get the results in real-time. - You do not need to wait for all files to be indexed. - </li> - <li><strong>Archive Extractor</strong> opens ZIP, RAR, and other archive formats and sends the files from those archive files back - through the pipelines for analysis.</li> - <li><strong>Exif Image Parser</strong> extracts EXIF information from JPEG files and posts the results into the tree in the main UI.</li> - <li><strong>Thunderbird Parser</strong> Identifies Thunderbird MBOX files and extracts the e-mails from them.</li> - </ul> - <p> - When you select a module, you will have the option to change its settings. - For example, you can configure which keyword search lists to use during ingest and which hash databases to use. - Refer to the help system inside of Autopsy for details on configuring each module. - </p> - <p> - While ingest modules are running in the background, you will see a progress bar in the lower right. - You can use the GUI to review incoming results and perform other tasks while ingest at that time. - </p> - - - <h1>Analysis Basics</h1> - <img src="screenshot.png" alt="Autopsy Screenshot" /> - <p>You will start all of your analysis techniques from the tree on the left.</p> - <ul> - <li>The Data Sources root node shows all data in the case.</li> - <ul> - <li>The individual image nodes show the file system structure of the disk images or local disks in the case.</li> - <li>The LogicalFileSet nodes show the logical files in the case.</li> - </ul> - <li>The Views node shows the same data from a file type or timeline perspective.</li> - <li>The Results node shows the output from the ingest modules.</li> - </ul> - - <p> - When you select a node from the tree on the left, a list of files will be shown in the upper right. - You can use the Thumbnail view in the upper right to view the pictures. - When you select a file from the upper right, its contents will be shown in the lower right. - You can use the tabs in the lower right to view the text of the file, an image, or the hex data. - </p> - - <p> - If you are viewing files from the Views and Results nodes, you can right-click on a file to go to its file system location. - This feature is useful to see what else the user stored in the same folder as the file that you are currently looking at. - You can also right click on a file to extract it to the local system. - </p> - <p> - If you want to search for single keywords, then you can use the search box in the upper right of the program. - The results will be shown in a table in the upper right. - </p> - - <p> You can tag (or bookmark) arbitrary files so that you can more quickly find them later or so that you can include them specifically in a report.</p> - - <h2>Ingest Inbox</h2> - <p> - As you are going through the results in the tree, the ingest modules are running in the background. - The results are shown in the tree as soon as the ingest modules find them and report them. - </p> - <p> - The Ingest Inbox receives messages from the ingest modules as they find results. - You can open the inbox to see what has been recently found. - It keeps track of what messages you have read. - </p> - <p> - The intended use of this inbox is that you can focus on some data for a while and then check back on the inbox at a time that is convenient for them. - You can then see what else was found while you were focused on the previous task. - You may learn that a known bad file was found or that a file was found with a relevant keyword and then decide to focus on that for a while. - </p> - <p> When you select a message, you can then jump to the Results tree where more details can be found or jump to the file's location in the filesystem.</p> - - <h2>Timeline (Beta)</h2> - <p>There is a basic timeline view that you can access via the Tools -> Make Timeline feature. This will take a few minutes to create the timeline for analysis. Its features are still in development.</p> - - - <h1>Example Use Cases</h1> - <p>In this section, we will provide examples of how to do common analysis tasks.</p> - - <h2>Web Artifacts</h2> - <p> - If you want to view the user's recent web activity, make sure that the Recent Activity ingest module was enabled. - You can then go to the "Results " node in the tree on the left and then into the "Extracted Data" node. - There, you can find bookmarks, cookies, downloads, and history. - </p> - - <h2>Known Bad Hash Files</h2> - <p> - If you want to see if the data source had known bad files, make sure that the Hash Lookup ingest module was enabled. - You can then view the "Hashset Hits" section in the "Results" area of the tree on the left. - Note that hash lookup can take a long time, so this section will be updated as long as the ingest process is occurring. - Use the Ingest Inbox to keep track of what known bad files were recently found. - </p> - <p> - When you find a known bad file in this interface, you may want to right click on the file to also view the file's original location. - You may find additional files that are relevant and stored in the same folder as this file. - </p> - - <h2>Media: Images and Videos</h2> - <p> - If you want to see all images and video on the disk image, then go to the "Views" section in the tree on the left and then "File Types". - Select either "Images" or "Videos". - You can use the thumbnail option in the upper right to view thumbnails of all images. - </p> - <ul class="note"> - <li><strong>Note</strong>: - We are working on making this more efficient when there are lots of images and we are working on the feature to display video thumbnails. - </li> - </ul> - <p>You can select an image or video from the upper right and view the video or image in the lower right. Video will be played with sound.</p> - - - <h1>Reporting</h1> - <p> - A final report can be generated that will include all analysis results. - Use the "Generate Report" button to create this. - It will create an HTML or XLS report in the Reports folder of the case folder. - If you forgot the location of your case folder, you can determine it using the "Case Properties" option in the "File" menu. - There is also an option to export report files to a separate folder outside of the case folder. - </p> - - <hr> - <p><i>Copyright © 2012-2013 Basis Technology.</i></p> - <p><i> - This work is licensed under a - <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/us/">Creative Commons Attribution-Share Alike 3.0 United States License</a>. - </i></p> - </body> -</html> +<html> + + <head> + <link rel="stylesheet" href="nbdocs:/org/sleuthkit/autopsy/core/docs/ide.css" type="text/css"> + <style> + h1 { font-size: 145%; color: #666666; } + h2 { font-size: 120%; color: #666666; } + </style> + <title>Autopsy 3 Quick Start Guide</title> + </head> + <body> + + <p align="center" style="font-size: 145%;"><strong>Autopsy 3 Quick Start Guide</strong></p> + <p align="center" style="font-size: 120%;">June 2013</p> + <p align="center"><a href="http://www.sleuthkit.org/autopsy/">www.sleuthkit.org/autopsy/</a></p> + + + <h1>Installation</h1> + <p> + The current version of Autopsy 3 runs only on Microsoft Windows. + We have gotten it to run on other platforms, such as Linux and OS X, but we do not have it in a state that makes it easy to distribute and find the needed libraries. + </p> + <p> + The Windows installer will make a directory for Autopsy and place all of the needed files inside of it. + The installer includes all dependencies, including Sleuth Kit and Java. + </p> + <p>Note that Autopsy 3 is a complete rewrite from Autopsy 2 and none of this document is relevant to Autopsy 2.</p> + + <h1>Adding a Data Source (image, local disk, logical files)</h1> + <p> + Data sources are added to a <strong>case</strong>. A case can have a single data source or it can have multiple data source if they are related. + Currently, a single report is generated for an entire case, so if you need to report on individual data sources, then you should use one data source per case. + </p> + + <h2>Creating a Case</h2> + <p> + To create a case, use either the "Create New Case" option on the Welcome screen or from the "File" menu. + This will start the <strong>New Case Wizard</strong>. You will need to supply it with the name of the case and a directory to store the case results into. + You can optionally provide case numbers and other details. + </p> + + + <h2>Adding a Data Source</h2> + <p> + The next step is to add input data source to the case. + The <strong>Add Data Source Wizard</strong> will start automatically after the case is created or you can manually start it from the "File" menu or toolbar. + You will need to choose the type of input data source to add (image, local disk or logical files and folders). + Next, supply it with the location of the source to add. + </p> + <ul> + <li>For a disk image, browse to the first file in the set (Autopsy will find the rest of the files). Autopsy currently supports E01 and raw (dd) files. + </li> + <li> + For local disk, select one of the detected disks. + Autopsy will add the current view of the disk to the case (i.e. snapshot of the meta-data). + However, the individual file content (not meta-data) does get updated with the changes made to the disk. + Note, you may need run Autopsy as an Administrator to detect all disks. + </li> + <li>For logical files (a single file or folder of files), use the "Add" button to add one or more files or folders on your system to the case. Folders will be recursively added to the case.</li> + </ul> + + + <p> + There are a couple of options in the wizard that will allow you to make the ingest process faster. + These typically deal with deleted files. + It will take longer if unallocated space is analyzed and the entire drive is searched for deleted files. + In some scenarios, these recovery steps must be performed and in other scenarios these steps are not needed and instead fast results on the allocated files are needed. + Use these options to control how long the analysis will take. + </p> + + <p> + Autopsy will start to analyze these data sources and add them to the case and internal database. While it is doing that, it will prompt you to configure the Ingest Modules. </p> + + + <h2>Ingest Modules</h2> + <p> + You will next be prompted to configure the Ingest Modules. + Ingest modules will run in the background and perform specific tasks. + The Ingest Modules analyze files in a prioritized order so that files in a user's directory are analyzed before files in other folders. + Ingest modules can be developed by third-parties and here are some of the standard ingest modules that come with Autopsy: + </p> + <ul> + <li><strong>Recent Activity</strong> + extracts user activity as saved by web browsers and the OS. Also runs regripper on the registry hive. + </li> + <li><strong>Hash Lookup</strong> + uses hash databases to ignore known files from the NIST NSRL and flag known bad files. + Use the "Advanced" button to add and configure the hash databases to use during this process. + You will get updates on known bad file hits as the ingest occurs. You can later add hash databases + via the Tools -> Options menu in the main UI. You can download an index of the NIST NSRL from + <a href="http://sourceforge.net/projects/autopsy/files/NSRL/">here</a>. + </li> + <li><strong>Keyword Search</strong> + uses keyword lists to identify files with specific words in them. + You can select the keyword lists to search for automatically and you can create new lists using the "Advanced" button. + Note that with keyword search, you can always conduct searches after ingest has finished. + The keyword lists that you select during ingest will be searched for at periodic intervals and you will get the results in real-time. + You do not need to wait for all files to be indexed. + </li> + <li><strong>Archive Extractor</strong> opens ZIP, RAR, and other archive formats and sends the files from those archive files back + through the pipelines for analysis.</li> + <li><strong>Exif Image Parser</strong> extracts EXIF information from JPEG files and posts the results into the tree in the main UI.</li> + <li><strong>Thunderbird Parser</strong> Identifies Thunderbird MBOX files and extracts the e-mails from them.</li> + </ul> + <p> + When you select a module, you will have the option to change its settings. + For example, you can configure which keyword search lists to use during ingest and which hash databases to use. + Refer to the help system inside of Autopsy for details on configuring each module. + </p> + <p> + While ingest modules are running in the background, you will see a progress bar in the lower right. + You can use the GUI to review incoming results and perform other tasks while ingest at that time. + </p> + + + <h1>Analysis Basics</h1> + <img src="screenshot.png" alt="Autopsy Screenshot" /> + <p>You will start all of your analysis techniques from the tree on the left.</p> + <ul> + <li>The Data Sources root node shows all data in the case.</li> + <ul> + <li>The individual image nodes show the file system structure of the disk images or local disks in the case.</li> + <li>The LogicalFileSet nodes show the logical files in the case.</li> + </ul> + <li>The Views node shows the same data from a file type or timeline perspective.</li> + <li>The Results node shows the output from the ingest modules.</li> + </ul> + + <p> + When you select a node from the tree on the left, a list of files will be shown in the upper right. + You can use the Thumbnail view in the upper right to view the pictures. + When you select a file from the upper right, its contents will be shown in the lower right. + You can use the tabs in the lower right to view the text of the file, an image, or the hex data. + </p> + + <p> + If you are viewing files from the Views and Results nodes, you can right-click on a file to go to its file system location. + This feature is useful to see what else the user stored in the same folder as the file that you are currently looking at. + You can also right click on a file to extract it to the local system. + </p> + <p> + If you want to search for single keywords, then you can use the search box in the upper right of the program. + The results will be shown in a table in the upper right. + </p> + + <p> You can tag (or bookmark) arbitrary files so that you can more quickly find them later or so that you can include them specifically in a report.</p> + + <h2>Ingest Inbox</h2> + <p> + As you are going through the results in the tree, the ingest modules are running in the background. + The results are shown in the tree as soon as the ingest modules find them and report them. + </p> + <p> + The Ingest Inbox receives messages from the ingest modules as they find results. + You can open the inbox to see what has been recently found. + It keeps track of what messages you have read. + </p> + <p> + The intended use of this inbox is that you can focus on some data for a while and then check back on the inbox at a time that is convenient for them. + You can then see what else was found while you were focused on the previous task. + You may learn that a known bad file was found or that a file was found with a relevant keyword and then decide to focus on that for a while. + </p> + <p> When you select a message, you can then jump to the Results tree where more details can be found or jump to the file's location in the filesystem.</p> + + <h2>Timeline (Beta)</h2> + <p>There is a basic timeline view that you can access via the Tools -> Make Timeline feature. This will take a few minutes to create the timeline for analysis. Its features are still in development.</p> + + + <h1>Example Use Cases</h1> + <p>In this section, we will provide examples of how to do common analysis tasks.</p> + + <h2>Web Artifacts</h2> + <p> + If you want to view the user's recent web activity, make sure that the Recent Activity ingest module was enabled. + You can then go to the "Results " node in the tree on the left and then into the "Extracted Data" node. + There, you can find bookmarks, cookies, downloads, and history. + </p> + + <h2>Known Bad Hash Files</h2> + <p> + If you want to see if the data source had known bad files, make sure that the Hash Lookup ingest module was enabled. + You can then view the "Hashset Hits" section in the "Results" area of the tree on the left. + Note that hash lookup can take a long time, so this section will be updated as long as the ingest process is occurring. + Use the Ingest Inbox to keep track of what known bad files were recently found. + </p> + <p> + When you find a known bad file in this interface, you may want to right click on the file to also view the file's original location. + You may find additional files that are relevant and stored in the same folder as this file. + </p> + + <h2>Media: Images and Videos</h2> + <p> + If you want to see all images and video on the disk image, then go to the "Views" section in the tree on the left and then "File Types". + Select either "Images" or "Videos". + You can use the thumbnail option in the upper right to view thumbnails of all images. + </p> + <ul class="note"> + <li><strong>Note</strong>: + We are working on making this more efficient when there are lots of images and we are working on the feature to display video thumbnails. + </li> + </ul> + <p>You can select an image or video from the upper right and view the video or image in the lower right. Video will be played with sound.</p> + + + <h1>Reporting</h1> + <p> + A final report can be generated that will include all analysis results. + Use the "Generate Report" button to create this. + It will create an HTML or XLS report in the Reports folder of the case folder. + If you forgot the location of your case folder, you can determine it using the "Case Properties" option in the "File" menu. + There is also an option to export report files to a separate folder outside of the case folder. + </p> + + <hr> + <p><i>Copyright © 2012-2013 Basis Technology.</i></p> + <p><i> + This work is licensed under a + <a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/us/">Creative Commons Attribution-Share Alike 3.0 United States License</a>. + </i></p> + </body> +</html> diff --git a/docs/doxygen/needs_a_home.dox b/docs/doxygen/needs_a_home.dox index c6badf6b36db1983669e0ade827bca6e6bf89b95..b0a2b42d4f3777aa3c1150817288352aaef3e7fc 100755 --- a/docs/doxygen/needs_a_home.dox +++ b/docs/doxygen/needs_a_home.dox @@ -1,30 +1,30 @@ - -<!-- @@@ MOVE THIS SOMEWHERE ELSE -- the directory tree package maybe?? --> - -The component is by default registered with the ingest manager as an ingest event listener. -The viewer first loads all the viewer-supported data currently in the blackboard when Autopsy starts. -During the ingest process the viewer receives events from ingest modules -(relayed by ingest manager) and it selectively refreshes parts of the tree providing real-time updates to the user. -When ingest is completed, the viewer responds to the final ingest data event generated by the ingest manager, -and performs a final refresh of all viewer-supported data in the blackboard. - - -Node content support capabilities are registered in the node's Lookup. - - -<!-- @@@ This is too detailed for here, but maybe should be broken up and put into the sections on making a result viewer and such… ---> - -\section design_data_flow Data Flow - -\subsection design_data_flow_create Creating Nodes in DataExplorer - -Data flows between the UI zones using a NetBeans node. The DataExplorer modules create the NetBeans nodes. They query the SQLite database or do whatever they want to identify the set of files that are of interest. They create the NetBeans nodes based on Sleuthkit data model objects. See the org.sleuthkit.autopsy.datamodel package for more details on this. - -\subsection design_data_flow_toResult Getting Nodes to DataResult - -Each DataExplorer TopComponent is responsible for creating its own DataResult TopComponent to display its results. It can choose to re-use the same TopComponent for multiple searches (as DirectoryTree does) or it can choose to make a new one each time (as FileSearch does). The setNode() method on the DataResult object is used to set the root node to display. A dummy root node must be created as the parent if a parent does not already exist. - -The DataExplorer is responsible for setting the double-click and right-click actions associated with the node. The default single click action is to pass data to DataContent. To override this, you must create a new DataResultViewer instance that overrides the propertyChange() method. The DataExplorer adds actions to wrapping the node in a FilterNode variant. The FilterNode then defines the actions for the node by overriding the getPreferredAction() and getActions() methods. As an example, org.sleuthkit.autopsy.directorytree.DataResultFilterNode and org.sleuthkit.autopsy.directorytree.DataResultFilterChildren wraps the nodes that are passed over by the DirectoryTree DataExplorer. - -DataResult can send data back to its DataExplorer by making a custom action that looks up it's instance (DataExplorer.getInstance()). + +<!-- @@@ MOVE THIS SOMEWHERE ELSE -- the directory tree package maybe?? --> + +The component is by default registered with the ingest manager as an ingest event listener. +The viewer first loads all the viewer-supported data currently in the blackboard when Autopsy starts. +During the ingest process the viewer receives events from ingest modules +(relayed by ingest manager) and it selectively refreshes parts of the tree providing real-time updates to the user. +When ingest is completed, the viewer responds to the final ingest data event generated by the ingest manager, +and performs a final refresh of all viewer-supported data in the blackboard. + + +Node content support capabilities are registered in the node's Lookup. + + +<!-- @@@ This is too detailed for here, but maybe should be broken up and put into the sections on making a result viewer and such… +--> + +\section design_data_flow Data Flow + +\subsection design_data_flow_create Creating Nodes in DataExplorer + +Data flows between the UI zones using a NetBeans node. The DataExplorer modules create the NetBeans nodes. They query the SQLite database or do whatever they want to identify the set of files that are of interest. They create the NetBeans nodes based on Sleuthkit data model objects. See the org.sleuthkit.autopsy.datamodel package for more details on this. + +\subsection design_data_flow_toResult Getting Nodes to DataResult + +Each DataExplorer TopComponent is responsible for creating its own DataResult TopComponent to display its results. It can choose to re-use the same TopComponent for multiple searches (as DirectoryTree does) or it can choose to make a new one each time (as FileSearch does). The setNode() method on the DataResult object is used to set the root node to display. A dummy root node must be created as the parent if a parent does not already exist. + +The DataExplorer is responsible for setting the double-click and right-click actions associated with the node. The default single click action is to pass data to DataContent. To override this, you must create a new DataResultViewer instance that overrides the propertyChange() method. The DataExplorer adds actions to wrapping the node in a FilterNode variant. The FilterNode then defines the actions for the node by overriding the getPreferredAction() and getActions() methods. As an example, org.sleuthkit.autopsy.directorytree.DataResultFilterNode and org.sleuthkit.autopsy.directorytree.DataResultFilterChildren wraps the nodes that are passed over by the DirectoryTree DataExplorer. + +DataResult can send data back to its DataExplorer by making a custom action that looks up it's instance (DataExplorer.getInstance()). diff --git a/docs/doxygen/workflow.dox b/docs/doxygen/workflow.dox index e7e3b9c8826a6cc78f77f3320b4f3b1ace6a3ade..c9bdf78486c803b38b339d4a95a80b8987e518f6 100644 --- a/docs/doxygen/workflow.dox +++ b/docs/doxygen/workflow.dox @@ -1,53 +1,53 @@ -/*! \page workflow_page General Workflow and Design - -\section design_overview Overview -This section outlines the internal Autopsy design from the typical analysis work flow perspective. -This page is organized based on these phases: -- A Case is created. -- Images are added to the case and ingest modules are run. -- Results are manually reviewed and searched. -- Reports are generated. - -\section design_case Creating a Case -The first step in Autopsy work flow is creating a case. This is done in the org.sleuthkit.autopsy.casemodule package (see \ref casemodule_overview for details). This module contains the wizards needed and deals with how to store the information. You should not need to do much modifications in this package. But, you will want to use the org.sleuthkit.autopsy.casemodule.Case object to access all data related to this case. - - -\section design_image Adding an Image and Running Ingest Modules - -After case is created, one or more disk images can be added to the case. There is a wizard to guide that process and it is located in the org.sleuthkit.autopsy.casemodule package. Refer to the package section \ref casemodule_add_image for more details on the wizard. Most developers will not need to touch this code though. An important concept though is that adding an image to a case means that Autopsy uses The Sleuth Kit to enumerate all of the files in the file system and make a database entry for them in the embedded SQLite database that was created for the case. The database will be used for all further analysis. - -After image has been added to the case, the user can select one or more ingest modules to be executed on the image. Ingest modules focus on a specific type of analysis task and run in the background. They either analyze the entire disk image or individual files. The user will see the results from the modules in the result tree and in the ingest inbox. - -The org.sleuthkit.autopsy.ingest package provides the basic infrastructure for the ingest module management. - -If you want to develop a module that analyzes drive data, then this is probably the type of module that you want to build. See \ref mod_ingest_page for more details on making an ingest module. - - -\section design_view Viewing Results - -The UI has three main areas. The tree on the left-hand side, the result viewers in the upper right, and the content viewers in the lower right. Data passes between these areas by encapsulating them in Netbeans Node objects (see org.openide.nodes.Node). These allow Autopsy to generically handle all types of data. The org.sleuthkit.autopsy.datamodel package wraps the generic org.sleuthkit.datamodel Sleuth Kit objects as Netbeans Nodes. - -Nodes are modeled in a parent-child hierarchy with other nodes. All data within a Case is represented in a hierarchy with the disk images being one level below the case and volumes and such below the image. - -The tree on the left hand-side shows the analysis results. -Its contents are populated from the central database. -This is where you can browse the file system contents and see the results from the blackboard. -<!-- @@@(see \ref blackboard_page). --> -The tree is implemented in the org.sleuthkit.autopsy.directorytree package. - -The area in the upper right is the result viewer area. When a node is selected from the tree, the node and its children are sent to this area. This area is used to view a set of nodes. The viewer is itself a framework with modules that display the data in different layouts. For example, the standard version comes with a table viewer and a thumbnail viewer. Refer to \ref mod_result_page for details on building a data result module. - -When an item is selected from the result viewer area, it is passed to the bottom right content viewers. It too is a framework with many modules that know how to show information about a specific file in different ways. For example, there are viewers that show the data in a hex dump format, extract the strings, and display pictures and movies. -See \ref mod_content_page for details on building new content viewers. - -\section design_report Report generation - -When ingest is complete, the user can generate reports. -There is a reporting framework to enable many different formats. Autopsy currently comes with generic html, xml and Excel reports. See the org.sleuthkit.autopsy.report package for details on the framework and -\ref mod_report_page for details on building a new report module. - - - - - -*/ +/*! \page workflow_page General Workflow and Design + +\section design_overview Overview +This section outlines the internal Autopsy design from the typical analysis work flow perspective. +This page is organized based on these phases: +- A Case is created. +- Images are added to the case and ingest modules are run. +- Results are manually reviewed and searched. +- Reports are generated. + +\section design_case Creating a Case +The first step in Autopsy work flow is creating a case. This is done in the org.sleuthkit.autopsy.casemodule package (see \ref casemodule_overview for details). This module contains the wizards needed and deals with how to store the information. You should not need to do much modifications in this package. But, you will want to use the org.sleuthkit.autopsy.casemodule.Case object to access all data related to this case. + + +\section design_image Adding an Image and Running Ingest Modules + +After case is created, one or more disk images can be added to the case. There is a wizard to guide that process and it is located in the org.sleuthkit.autopsy.casemodule package. Refer to the package section \ref casemodule_add_image for more details on the wizard. Most developers will not need to touch this code though. An important concept though is that adding an image to a case means that Autopsy uses The Sleuth Kit to enumerate all of the files in the file system and make a database entry for them in the embedded SQLite database that was created for the case. The database will be used for all further analysis. + +After image has been added to the case, the user can select one or more ingest modules to be executed on the image. Ingest modules focus on a specific type of analysis task and run in the background. They either analyze the entire disk image or individual files. The user will see the results from the modules in the result tree and in the ingest inbox. + +The org.sleuthkit.autopsy.ingest package provides the basic infrastructure for the ingest module management. + +If you want to develop a module that analyzes drive data, then this is probably the type of module that you want to build. See \ref mod_ingest_page for more details on making an ingest module. + + +\section design_view Viewing Results + +The UI has three main areas. The tree on the left-hand side, the result viewers in the upper right, and the content viewers in the lower right. Data passes between these areas by encapsulating them in Netbeans Node objects (see org.openide.nodes.Node). These allow Autopsy to generically handle all types of data. The org.sleuthkit.autopsy.datamodel package wraps the generic org.sleuthkit.datamodel Sleuth Kit objects as Netbeans Nodes. + +Nodes are modeled in a parent-child hierarchy with other nodes. All data within a Case is represented in a hierarchy with the disk images being one level below the case and volumes and such below the image. + +The tree on the left hand-side shows the analysis results. +Its contents are populated from the central database. +This is where you can browse the file system contents and see the results from the blackboard. +<!-- @@@(see \ref blackboard_page). --> +The tree is implemented in the org.sleuthkit.autopsy.directorytree package. + +The area in the upper right is the result viewer area. When a node is selected from the tree, the node and its children are sent to this area. This area is used to view a set of nodes. The viewer is itself a framework with modules that display the data in different layouts. For example, the standard version comes with a table viewer and a thumbnail viewer. Refer to \ref mod_result_page for details on building a data result module. + +When an item is selected from the result viewer area, it is passed to the bottom right content viewers. It too is a framework with many modules that know how to show information about a specific file in different ways. For example, there are viewers that show the data in a hex dump format, extract the strings, and display pictures and movies. +See \ref mod_content_page for details on building new content viewers. + +\section design_report Report generation + +When ingest is complete, the user can generate reports. +There is a reporting framework to enable many different formats. Autopsy currently comes with generic html, xml and Excel reports. See the org.sleuthkit.autopsy.report package for details on the framework and +\ref mod_report_page for details on building a new report module. + + + + + +*/ diff --git a/nbproject/platform.properties b/nbproject/platform.properties index e0bdd68b7351ea5d0b49ac9de4bdb7edb8ce75df..a9fa87f7496bddf978517768f6a4ae1c84e24456 100644 --- a/nbproject/platform.properties +++ b/nbproject/platform.properties @@ -1,120 +1,120 @@ -branding.token=autopsy -netbeans-plat-version=7.3.1 -suite.dir=${basedir} -nbplatform.active.dir=${suite.dir}/netbeans-plat/${netbeans-plat-version} -harness.dir=${nbplatform.active.dir}/harness -bootstrap.url=http://deadlock.netbeans.org/hudson/job/nbms-and-javadoc/lastStableBuild/artifact/nbbuild/netbeans/harness/tasks.jar -autoupdate.catalog.url=http://dlc.sun.com.edgesuite.net/netbeans/updates/${netbeans-plat-version}/uc/final/distribution/catalog.xml.gz -cluster.path=\ - ${nbplatform.active.dir}/harness:\ - ${nbplatform.active.dir}/java:\ - ${nbplatform.active.dir}/platform -disabled.modules=\ - org.apache.tools.ant.module,\ - org.netbeans.api.debugger.jpda,\ - org.netbeans.api.java,\ - org.netbeans.lib.nbjavac,\ - org.netbeans.libs.cglib,\ - org.netbeans.libs.javacapi,\ - org.netbeans.libs.javacimpl,\ - org.netbeans.libs.springframework,\ - org.netbeans.modules.ant.browsetask,\ - org.netbeans.modules.ant.debugger,\ - org.netbeans.modules.ant.freeform,\ - org.netbeans.modules.ant.grammar,\ - org.netbeans.modules.ant.kit,\ - org.netbeans.modules.beans,\ - org.netbeans.modules.classfile,\ - org.netbeans.modules.dbschema,\ - org.netbeans.modules.debugger.jpda,\ - org.netbeans.modules.debugger.jpda.ant,\ - org.netbeans.modules.debugger.jpda.kit,\ - org.netbeans.modules.debugger.jpda.projects,\ - org.netbeans.modules.debugger.jpda.ui,\ - org.netbeans.modules.debugger.jpda.visual,\ - org.netbeans.modules.findbugs.installer,\ - org.netbeans.modules.form,\ - org.netbeans.modules.form.binding,\ - org.netbeans.modules.form.j2ee,\ - org.netbeans.modules.form.kit,\ - org.netbeans.modules.form.nb,\ - org.netbeans.modules.form.refactoring,\ - org.netbeans.modules.hibernate,\ - org.netbeans.modules.hibernatelib,\ - org.netbeans.modules.hudson.ant,\ - org.netbeans.modules.hudson.maven,\ - org.netbeans.modules.i18n,\ - org.netbeans.modules.i18n.form,\ - org.netbeans.modules.j2ee.core.utilities,\ - org.netbeans.modules.j2ee.eclipselink,\ - org.netbeans.modules.j2ee.eclipselinkmodelgen,\ - org.netbeans.modules.j2ee.jpa.refactoring,\ - org.netbeans.modules.j2ee.jpa.verification,\ - org.netbeans.modules.j2ee.metadata,\ - org.netbeans.modules.j2ee.metadata.model.support,\ - org.netbeans.modules.j2ee.persistence,\ - org.netbeans.modules.j2ee.persistence.kit,\ - org.netbeans.modules.j2ee.persistenceapi,\ - org.netbeans.modules.java.api.common,\ - org.netbeans.modules.java.debug,\ - org.netbeans.modules.java.editor,\ - org.netbeans.modules.java.editor.lib,\ - org.netbeans.modules.java.examples,\ - org.netbeans.modules.java.freeform,\ - org.netbeans.modules.java.guards,\ - org.netbeans.modules.java.helpset,\ - org.netbeans.modules.java.hints,\ - org.netbeans.modules.java.hints.declarative,\ - org.netbeans.modules.java.hints.declarative.test,\ - org.netbeans.modules.java.hints.legacy.spi,\ - org.netbeans.modules.java.hints.test,\ - org.netbeans.modules.java.hints.ui,\ - org.netbeans.modules.java.j2seplatform,\ - org.netbeans.modules.java.j2seproject,\ - org.netbeans.modules.java.kit,\ - org.netbeans.modules.java.lexer,\ - org.netbeans.modules.java.navigation,\ - org.netbeans.modules.java.platform,\ - org.netbeans.modules.java.preprocessorbridge,\ - org.netbeans.modules.java.project,\ - org.netbeans.modules.java.source,\ - org.netbeans.modules.java.source.ant,\ - org.netbeans.modules.java.source.queries,\ - org.netbeans.modules.java.source.queriesimpl,\ - org.netbeans.modules.java.sourceui,\ - org.netbeans.modules.java.testrunner,\ - org.netbeans.modules.javadoc,\ - org.netbeans.modules.javawebstart,\ - org.netbeans.modules.junit,\ - org.netbeans.modules.maven,\ - org.netbeans.modules.maven.checkstyle,\ - org.netbeans.modules.maven.coverage,\ - org.netbeans.modules.maven.embedder,\ - org.netbeans.modules.maven.grammar,\ - org.netbeans.modules.maven.graph,\ - org.netbeans.modules.maven.hints,\ - org.netbeans.modules.maven.indexer,\ - org.netbeans.modules.maven.junit,\ - org.netbeans.modules.maven.kit,\ - org.netbeans.modules.maven.model,\ - org.netbeans.modules.maven.osgi,\ - org.netbeans.modules.maven.persistence,\ - org.netbeans.modules.maven.refactoring,\ - org.netbeans.modules.maven.repository,\ - org.netbeans.modules.maven.search,\ - org.netbeans.modules.maven.spring,\ - org.netbeans.modules.projectimport.eclipse.core,\ - org.netbeans.modules.projectimport.eclipse.j2se,\ - org.netbeans.modules.refactoring.java,\ - org.netbeans.modules.spellchecker.bindings.java,\ - org.netbeans.modules.spring.beans,\ - org.netbeans.modules.testng,\ - org.netbeans.modules.testng.ant,\ - org.netbeans.modules.testng.maven,\ - org.netbeans.modules.websvc.jaxws21,\ - org.netbeans.modules.websvc.jaxws21api,\ - org.netbeans.modules.websvc.saas.codegen.java,\ - org.netbeans.modules.xml.jaxb,\ - org.netbeans.modules.xml.tools.java,\ - org.netbeans.spi.java.hints - +branding.token=autopsy +netbeans-plat-version=7.3.1 +suite.dir=${basedir} +nbplatform.active.dir=${suite.dir}/netbeans-plat/${netbeans-plat-version} +harness.dir=${nbplatform.active.dir}/harness +bootstrap.url=http://deadlock.netbeans.org/hudson/job/nbms-and-javadoc/lastStableBuild/artifact/nbbuild/netbeans/harness/tasks.jar +autoupdate.catalog.url=http://dlc.sun.com.edgesuite.net/netbeans/updates/${netbeans-plat-version}/uc/final/distribution/catalog.xml.gz +cluster.path=\ + ${nbplatform.active.dir}/harness:\ + ${nbplatform.active.dir}/java:\ + ${nbplatform.active.dir}/platform +disabled.modules=\ + org.apache.tools.ant.module,\ + org.netbeans.api.debugger.jpda,\ + org.netbeans.api.java,\ + org.netbeans.lib.nbjavac,\ + org.netbeans.libs.cglib,\ + org.netbeans.libs.javacapi,\ + org.netbeans.libs.javacimpl,\ + org.netbeans.libs.springframework,\ + org.netbeans.modules.ant.browsetask,\ + org.netbeans.modules.ant.debugger,\ + org.netbeans.modules.ant.freeform,\ + org.netbeans.modules.ant.grammar,\ + org.netbeans.modules.ant.kit,\ + org.netbeans.modules.beans,\ + org.netbeans.modules.classfile,\ + org.netbeans.modules.dbschema,\ + org.netbeans.modules.debugger.jpda,\ + org.netbeans.modules.debugger.jpda.ant,\ + org.netbeans.modules.debugger.jpda.kit,\ + org.netbeans.modules.debugger.jpda.projects,\ + org.netbeans.modules.debugger.jpda.ui,\ + org.netbeans.modules.debugger.jpda.visual,\ + org.netbeans.modules.findbugs.installer,\ + org.netbeans.modules.form,\ + org.netbeans.modules.form.binding,\ + org.netbeans.modules.form.j2ee,\ + org.netbeans.modules.form.kit,\ + org.netbeans.modules.form.nb,\ + org.netbeans.modules.form.refactoring,\ + org.netbeans.modules.hibernate,\ + org.netbeans.modules.hibernatelib,\ + org.netbeans.modules.hudson.ant,\ + org.netbeans.modules.hudson.maven,\ + org.netbeans.modules.i18n,\ + org.netbeans.modules.i18n.form,\ + org.netbeans.modules.j2ee.core.utilities,\ + org.netbeans.modules.j2ee.eclipselink,\ + org.netbeans.modules.j2ee.eclipselinkmodelgen,\ + org.netbeans.modules.j2ee.jpa.refactoring,\ + org.netbeans.modules.j2ee.jpa.verification,\ + org.netbeans.modules.j2ee.metadata,\ + org.netbeans.modules.j2ee.metadata.model.support,\ + org.netbeans.modules.j2ee.persistence,\ + org.netbeans.modules.j2ee.persistence.kit,\ + org.netbeans.modules.j2ee.persistenceapi,\ + org.netbeans.modules.java.api.common,\ + org.netbeans.modules.java.debug,\ + org.netbeans.modules.java.editor,\ + org.netbeans.modules.java.editor.lib,\ + org.netbeans.modules.java.examples,\ + org.netbeans.modules.java.freeform,\ + org.netbeans.modules.java.guards,\ + org.netbeans.modules.java.helpset,\ + org.netbeans.modules.java.hints,\ + org.netbeans.modules.java.hints.declarative,\ + org.netbeans.modules.java.hints.declarative.test,\ + org.netbeans.modules.java.hints.legacy.spi,\ + org.netbeans.modules.java.hints.test,\ + org.netbeans.modules.java.hints.ui,\ + org.netbeans.modules.java.j2seplatform,\ + org.netbeans.modules.java.j2seproject,\ + org.netbeans.modules.java.kit,\ + org.netbeans.modules.java.lexer,\ + org.netbeans.modules.java.navigation,\ + org.netbeans.modules.java.platform,\ + org.netbeans.modules.java.preprocessorbridge,\ + org.netbeans.modules.java.project,\ + org.netbeans.modules.java.source,\ + org.netbeans.modules.java.source.ant,\ + org.netbeans.modules.java.source.queries,\ + org.netbeans.modules.java.source.queriesimpl,\ + org.netbeans.modules.java.sourceui,\ + org.netbeans.modules.java.testrunner,\ + org.netbeans.modules.javadoc,\ + org.netbeans.modules.javawebstart,\ + org.netbeans.modules.junit,\ + org.netbeans.modules.maven,\ + org.netbeans.modules.maven.checkstyle,\ + org.netbeans.modules.maven.coverage,\ + org.netbeans.modules.maven.embedder,\ + org.netbeans.modules.maven.grammar,\ + org.netbeans.modules.maven.graph,\ + org.netbeans.modules.maven.hints,\ + org.netbeans.modules.maven.indexer,\ + org.netbeans.modules.maven.junit,\ + org.netbeans.modules.maven.kit,\ + org.netbeans.modules.maven.model,\ + org.netbeans.modules.maven.osgi,\ + org.netbeans.modules.maven.persistence,\ + org.netbeans.modules.maven.refactoring,\ + org.netbeans.modules.maven.repository,\ + org.netbeans.modules.maven.search,\ + org.netbeans.modules.maven.spring,\ + org.netbeans.modules.projectimport.eclipse.core,\ + org.netbeans.modules.projectimport.eclipse.j2se,\ + org.netbeans.modules.refactoring.java,\ + org.netbeans.modules.spellchecker.bindings.java,\ + org.netbeans.modules.spring.beans,\ + org.netbeans.modules.testng,\ + org.netbeans.modules.testng.ant,\ + org.netbeans.modules.testng.maven,\ + org.netbeans.modules.websvc.jaxws21,\ + org.netbeans.modules.websvc.jaxws21api,\ + org.netbeans.modules.websvc.saas.codegen.java,\ + org.netbeans.modules.xml.jaxb,\ + org.netbeans.modules.xml.tools.java,\ + org.netbeans.spi.java.hints + diff --git a/test/README.txt b/test/README.txt index d0064b4f959890c4386d78763cc3e6ec5605e1b5..854f5e1a33107124f90dfe74330eb014e63a35a2 100644 --- a/test/README.txt +++ b/test/README.txt @@ -1,13 +1,13 @@ -This folder contains the data and scripts required to run regression tests -for Autopsy. There is a 'Testing' folder in the root directory that contains -the Java code that drives Autopsy to perform the tests. - -To run these tests: -- You will need python3. We run this from within Cygwin. -- Download the input images by typing 'ant test-download-imgs' in the root Autopsy folder. - This will place images in 'test/input'. -- Run 'python3 regression.py' from inside of the 'test/scripts' folder. -- Alternatively, run 'python3 regression.py -l [CONFIGFILE] to run the tests on a specified - list of images using a configuration file. See config.xml in the 'test/scripts' folder to - see configuration file formatting. -- Run 'python3 regression.py -h' to see other options. +This folder contains the data and scripts required to run regression tests +for Autopsy. There is a 'Testing' folder in the root directory that contains +the Java code that drives Autopsy to perform the tests. + +To run these tests: +- You will need python3. We run this from within Cygwin. +- Download the input images by typing 'ant test-download-imgs' in the root Autopsy folder. + This will place images in 'test/input'. +- Run 'python3 regression.py' from inside of the 'test/scripts' folder. +- Alternatively, run 'python3 regression.py -l [CONFIGFILE] to run the tests on a specified + list of images using a configuration file. See config.xml in the 'test/scripts' folder to + see configuration file formatting. +- Run 'python3 regression.py -h' to see other options. diff --git a/test/script/Emailer.py b/test/script/Emailer.py index 5d12e6afa3213fec3ccdb4adfe3ed383edb33caf..7e661e12eaa63aa611fd03c65b28d450e076d43a 100644 --- a/test/script/Emailer.py +++ b/test/script/Emailer.py @@ -1,49 +1,49 @@ -import smtplib -from email.mime.image import MIMEImage -from email.mime.multipart import MIMEMultipart -from email.mime.text import MIMEText -from email.mime.base import MIMEBase -from email import encoders -import xml -from xml.dom.minidom import parse, parseString - -def send_email(to, server, subj, body, attachments): - """Send an email with the given information. - - Args: - to: a String, the email address to send the email to - server: a String, the mail server to send from - subj: a String, the subject line of the message - body: a String, the body of the message - attachments: a listof_pathto_File, the attachements to include - """ - msg = MIMEMultipart() - msg['Subject'] = subj - # me == the sender's email address - # family = the list of all recipients' email addresses - msg['From'] = 'AutopsyTest' - msg['To'] = to - msg.preamble = 'This is a test' - container = MIMEText(body, 'plain') - msg.attach(container) - Build_email(msg, attachments) - s = smtplib.SMTP(server) - try: - print('Sending Email') - s.sendmail(msg['From'], msg['To'], msg.as_string()) - except Exception as e: - print(str(e)) - s.quit() - -def Build_email(msg, attachments): - for file in attachments: - part = MIMEBase('application', "octet-stream") - atach = open(file, "rb") - attch = atach.read() - noml = file.split("\\") - nom = noml[len(noml)-1] - part.set_payload(attch) - encoders.encode_base64(part) - part.add_header('Content-Disposition', 'attachment; filename="' + nom + '"') - msg.attach(part) - +import smtplib +from email.mime.image import MIMEImage +from email.mime.multipart import MIMEMultipart +from email.mime.text import MIMEText +from email.mime.base import MIMEBase +from email import encoders +import xml +from xml.dom.minidom import parse, parseString + +def send_email(to, server, subj, body, attachments): + """Send an email with the given information. + + Args: + to: a String, the email address to send the email to + server: a String, the mail server to send from + subj: a String, the subject line of the message + body: a String, the body of the message + attachments: a listof_pathto_File, the attachements to include + """ + msg = MIMEMultipart() + msg['Subject'] = subj + # me == the sender's email address + # family = the list of all recipients' email addresses + msg['From'] = 'AutopsyTest' + msg['To'] = to + msg.preamble = 'This is a test' + container = MIMEText(body, 'plain') + msg.attach(container) + Build_email(msg, attachments) + s = smtplib.SMTP(server) + try: + print('Sending Email') + s.sendmail(msg['From'], msg['To'], msg.as_string()) + except Exception as e: + print(str(e)) + s.quit() + +def Build_email(msg, attachments): + for file in attachments: + part = MIMEBase('application', "octet-stream") + atach = open(file, "rb") + attch = atach.read() + noml = file.split("\\") + nom = noml[len(noml)-1] + part.set_payload(attch) + encoders.encode_base64(part) + part.add_header('Content-Disposition', 'attachment; filename="' + nom + '"') + msg.attach(part) + diff --git a/test/script/regression.py b/test/script/regression.py index 498762dd2319e7574a79a9f3a624ac42abb03e6c..b07b3a48e22ea3b3209f68e7b074a51b1eb518aa 100644 --- a/test/script/regression.py +++ b/test/script/regression.py @@ -1,1857 +1,1857 @@ -#!/usr/bin/python -# -*- coding: utf_8 -*- - - # Autopsy Forensic Browser - # - # Copyright 2013 Basis Technology Corp. - # - # Licensed under the Apache License, Version 2.0 (the "License"); - # you may not use this file except in compliance with the License. - # You may obtain a copy of the License at - # - # http://www.apache.org/licenses/LICENSE-2.0 - # - # Unless required by applicable law or agreed to in writing, software - # distributed under the License is distributed on an "AS IS" BASIS, - # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - # See the License for the specific language governing permissions and - # limitations under the License. -from tskdbdiff import TskDbDiff, TskDbDiffException -import codecs -import datetime -import logging -import os -import re -import shutil -import socket -import sqlite3 -import subprocess -import sys -from sys import platform as _platform -import time -import traceback -import xml -from time import localtime, strftime -from xml.dom.minidom import parse, parseString -import smtplib -from email.mime.image import MIMEImage -from email.mime.multipart import MIMEMultipart -from email.mime.text import MIMEText -import re -import zipfile -import zlib -import Emailer -import srcupdater -from regression_utils import * - -# -# Please read me... -# -# This is the regression testing Python script. -# It uses an ant command to run build.xml for RegressionTest.java -# -# The code is cleanly sectioned and commented. -# Please follow the current formatting. -# It is a long and potentially confusing script. -# -# Variable, function, and class names are written in Python conventions: -# this_is_a_variable this_is_a_function() ThisIsAClass -# -# - - -# Data Definitions: -# -# pathto_X: A path to type X. -# ConfigFile: An XML file formatted according to the template in myconfig.xml -# ParsedConfig: A dom object that represents a ConfigFile -# SQLCursor: A cursor recieved from a connection to an SQL database -# Nat: A Natural Number -# Image: An image -# - -# Enumeration of database types used for the simplification of generating database paths -DBType = enum('OUTPUT', 'GOLD', 'BACKUP') - -# Common filename of the output and gold databases (although they are in different directories -DB_FILENAME = "autopsy.db" - -# Backup database filename -BACKUP_DB_FILENAME = "autopsy_backup.db" - -# TODO: Double check this purpose statement -# Folder name for gold standard database testing -AUTOPSY_TEST_CASE = "AutopsyTestCase" - -# TODO: Double check this purpose statement -# The filename of the log to store error messages -COMMON_LOG = "AutopsyErrors.txt" - -Day = 0 - -#----------------------# -# Main # -#----------------------# -def main(): - """Parse the command-line arguments, create the configuration, and run the tests.""" - args = Args() - parse_result = args.parse() - test_config = TestConfiguration(args) - # The arguments were given wrong: - if not parse_result: - return - if(not args.fr): - antin = ["ant"] - antin.append("-f") - antin.append(os.path.join("..","..","build.xml")) - antin.append("test-download-imgs") - if SYS is OS.CYGWIN: - subprocess.call(antin) - elif SYS is OS.WIN: - theproc = subprocess.Popen(antin, shell = True, stdout=subprocess.PIPE) - theproc.communicate() - # Otherwise test away! - TestRunner.run_tests(test_config) - - -class TestRunner(object): - """A collection of functions to run the regression tests.""" - - def run_tests(test_config): - """Run the tests specified by the main TestConfiguration. - - Executes the AutopsyIngest for each image and dispatches the results based on - the mode (rebuild or testing) - """ - test_data_list = [ TestData(image, test_config) for image in test_config.images ] - - Reports.html_add_images(test_config.html_log, test_config.images) - - logres =[] - for test_data in test_data_list: - Errors.clear_print_logs() - Errors.set_testing_phase(test_data.image) - if not (test_config.args.rebuild or os.path.exists(test_data.gold_archive)): - msg = "Gold standard doesn't exist, skipping image:" - Errors.print_error(msg) - Errors.print_error(test_data.gold_archive) - continue - TestRunner._run_autopsy_ingest(test_data) - - if test_config.args.rebuild: - TestRunner.rebuild(test_data) - else: - logres.append(TestRunner._run_test(test_data)) - test_data.printout = Errors.printout - test_data.printerror = Errors.printerror - - Reports.write_html_foot(test_config.html_log) - # TODO: move this elsewhere - if (len(logres)>0): - for lm in logres: - for ln in lm: - Errors.add_email_msg(ln) - - # TODO: possibly worth putting this in a sub method - if all([ test_data.overall_passed for test_data in test_data_list ]): - Errors.add_email_msg("All images passed.\n") - else: - msg = "The following images failed:\n" - for test_data in test_data_list: - if not test_data.overall_passed: - msg += "\t" + test_data.image + "\n" - Errors.add_email_msg(msg) - html = open(test_config.html_log) - Errors.add_email_attachment(html.name) - html.close() - - if test_config.email_enabled: - Emailer.send_email(test_config.mail_to, test_config.mail_server, - test_config.mail_subject, Errors.email_body, Errors.email_attachs) - - def _run_autopsy_ingest(test_data): - """Run Autopsy ingest for the image in the given TestData. - - Also generates the necessary logs for rebuilding or diff. - - Args: - test_data: the TestData to run the ingest on. - """ - if image_type(test_data.image_file) == IMGTYPE.UNKNOWN: - Errors.print_error("Error: Image type is unrecognized:") - Errors.print_error(test_data.image_file + "\n") - return - - logging.debug("--------------------") - logging.debug(test_data.image_name) - logging.debug("--------------------") - TestRunner._run_ant(test_data) - time.sleep(2) # Give everything a second to process - - try: - # Dump the database before we diff or use it for rebuild - TskDbDiff.dump_output_db(test_data.get_db_path(DBType.OUTPUT), test_data.get_db_dump_path(DBType.OUTPUT), - test_data.get_sorted_data_path(DBType.OUTPUT)) - except sqlite3.OperationalError as e: - print("Ingest did not run properly.", - "Make sure no other instances of Autopsy are open and try again.") - sys.exit() - - # merges logs into a single log for later diff / rebuild - copy_logs(test_data) - Logs.generate_log_data(test_data) - - TestRunner._handle_solr(test_data) - TestRunner._handle_exception(test_data) - - #TODO: figure out return type of _run_test (logres) - def _run_test(test_data): - """Compare the results of the output to the gold standard. - - Args: - test_data: the TestData - - Returns: - logres? - """ - TestRunner._extract_gold(test_data) - - # Look for core exceptions - # @@@ Should be moved to TestResultsDiffer, but it didn't know about logres -- need to look into that - logres = Logs.search_common_log("TskCoreException", test_data) - - TestResultsDiffer.run_diff(test_data) - print("Html report passed: ", test_data.html_report_passed) - print("Errors diff passed: ", test_data.errors_diff_passed) - print("DB diff passed: ", test_data.db_diff_passed) - test_data.overall_passed = (test_data.html_report_passed and - test_data.errors_diff_passed and test_data.db_diff_passed) - - Reports.generate_reports(test_data) - if(not test_data.overall_passed): - Errors.add_email_attachment(test_data.common_log_path) - return logres - - def _extract_gold(test_data): - """Extract gold archive file to output/gold/tmp/ - - Args: - test_data: the TestData - """ - extrctr = zipfile.ZipFile(test_data.gold_archive, 'r', compression=zipfile.ZIP_DEFLATED) - extrctr.extractall(test_data.main_config.gold) - extrctr.close - time.sleep(2) - - def _handle_solr(test_data): - """Clean up SOLR index if in keep mode (-k). - - Args: - test_data: the TestData - """ - if not test_data.main_config.args.keep: - if clear_dir(test_data.solr_index): - print_report([], "DELETE SOLR INDEX", "Solr index deleted.") - else: - print_report([], "KEEP SOLR INDEX", "Solr index has been kept.") - - def _handle_exception(test_data): - """If running in exception mode, print exceptions to log. - - Args: - test_data: the TestData - """ - if test_data.main_config.args.exception: - exceptions = search_logs(test_data.main_config.args.exception_string, test_data) - okay = ("No warnings or exceptions found containing text '" + - test_data.main_config.args.exception_string + "'.") - print_report(exceptions, "EXCEPTION", okay) - - def rebuild(test_data): - """Rebuild the gold standard with the given TestData. - - Copies the test-generated database and html report files into the gold directory. - """ - test_config = test_data.main_config - # Errors to print - errors = [] - # Delete the current gold standards - gold_dir = test_config.img_gold - clear_dir(test_config.img_gold) - tmpdir = make_path(gold_dir, test_data.image_name) - dbinpth = test_data.get_db_path(DBType.OUTPUT) - dboutpth = make_path(tmpdir, DB_FILENAME) - dataoutpth = make_path(tmpdir, test_data.image_name + "SortedData.txt") - dbdumpinpth = test_data.get_db_dump_path(DBType.OUTPUT) - dbdumpoutpth = make_path(tmpdir, test_data.image_name + "DBDump.txt") - if not os.path.exists(test_config.img_gold): - os.makedirs(test_config.img_gold) - if not os.path.exists(tmpdir): - os.makedirs(tmpdir) - try: - shutil.copy(dbinpth, dboutpth) - if file_exists(test_data.get_sorted_data_path(DBType.OUTPUT)): - shutil.copy(test_data.get_sorted_data_path(DBType.OUTPUT), dataoutpth) - shutil.copy(dbdumpinpth, dbdumpoutpth) - error_pth = make_path(tmpdir, test_data.image_name+"SortedErrors.txt") - shutil.copy(test_data.sorted_log, error_pth) - except IOError as e: - Errors.print_error(str(e)) - Errors.add_email_message("Not rebuilt properly") - print(str(e)) - print(traceback.format_exc()) - # Rebuild the HTML report - output_html_report_dir = test_data.get_html_report_path(DBType.OUTPUT) - gold_html_report_dir = make_path(tmpdir, "Report") - - try: - shutil.copytree(output_html_report_dir, gold_html_report_dir) - except OSError as e: - errors.append(e.error()) - except Exception as e: - errors.append("Error: Unknown fatal error when rebuilding the gold html report.") - errors.append(str(e) + "\n") - print(traceback.format_exc()) - oldcwd = os.getcwd() - zpdir = gold_dir - os.chdir(zpdir) - os.chdir("..") - img_gold = "tmp" - img_archive = make_path(test_data.image_name+"-archive.zip") - comprssr = zipfile.ZipFile(img_archive, 'w',compression=zipfile.ZIP_DEFLATED) - TestRunner.zipdir(img_gold, comprssr) - comprssr.close() - os.chdir(oldcwd) - del_dir(test_config.img_gold) - okay = "Sucessfully rebuilt all gold standards." - print_report(errors, "REBUILDING", okay) - - def zipdir(path, zip): - for root, dirs, files in os.walk(path): - for file in files: - zip.write(os.path.join(root, file)) - - def _run_ant(test_data): - """Construct and run the ant build command for the given TestData. - - Tests Autopsy by calling RegressionTest.java via the ant build file. - - Args: - test_data: the TestData - """ - test_config = test_data.main_config - # Set up the directories - if dir_exists(test_data.output_path): - shutil.rmtree(test_data.output_path) - os.makedirs(test_data.output_path) - test_data.ant = ["ant"] - test_data.ant.append("-v") - test_data.ant.append("-f") - # case.ant.append(case.build_path) - test_data.ant.append(os.path.join("..","..","Testing","build.xml")) - test_data.ant.append("regression-test") - test_data.ant.append("-l") - test_data.ant.append(test_data.antlog_dir) - test_data.ant.append("-Dimg_path=" + test_data.image_file) - test_data.ant.append("-Dknown_bad_path=" + test_config.known_bad_path) - test_data.ant.append("-Dkeyword_path=" + test_config.keyword_path) - test_data.ant.append("-Dnsrl_path=" + test_config.nsrl_path) - test_data.ant.append("-Dgold_path=" + test_config.gold) - test_data.ant.append("-Dout_path=" + - make_local_path(test_data.output_path)) - test_data.ant.append("-Dignore_unalloc=" + "%s" % test_config.args.unallocated) - test_data.ant.append("-Dtest.timeout=" + str(test_config.timeout)) - - Errors.print_out("Ingesting Image:\n" + test_data.image_file + "\n") - Errors.print_out("CMD: " + " ".join(test_data.ant)) - Errors.print_out("Starting test...\n") - antoutpth = make_local_path(test_data.main_config.output_dir, "antRunOutput.txt") - antout = open(antoutpth, "a") - if SYS is OS.CYGWIN: - subprocess.call(test_data.ant, stdout=subprocess.PIPE) - elif SYS is OS.WIN: - theproc = subprocess.Popen(test_data.ant, shell = True, stdout=subprocess.PIPE) - theproc.communicate() - antout.close() - - -class TestData(object): - """Container for the input and output of a single image. - - Represents data for the test of a single image, including path to the image, - database paths, etc. - - Attributes: - main_config: the global TestConfiguration - ant: a listof_String, the ant command for this TestData - image_file: a pathto_Image, the image for this TestData - image: a String, the image file's name - image_name: a String, the image file's name with a trailing (0) - output_path: pathto_Dir, the output directory for this TestData - autopsy_data_file: a pathto_File, the IMAGE_NAMEAutopsy_data.txt file - warning_log: a pathto_File, the AutopsyLogs.txt file - antlog_dir: a pathto_File, the antlog.txt file - test_dbdump: a pathto_File, the database dump, IMAGENAMEDump.txt - common_log_path: a pathto_File, the IMAGE_NAMECOMMON_LOG file - sorted_log: a pathto_File, the IMAGENAMESortedErrors.txt file - reports_dir: a pathto_Dir, the AutopsyTestCase/Reports folder - gold_data_dir: a pathto_Dir, the gold standard directory - gold_archive: a pathto_File, the gold standard archive - logs_dir: a pathto_Dir, the location where autopsy logs are stored - solr_index: a pathto_Dir, the locatino of the solr index - html_report_passed: a boolean, did the HTML report diff pass? - errors_diff_passed: a boolean, did the error diff pass? - db_diff_passed: a boolean, did the db diff pass? - overall_passed: a boolean, did the test pass? - total_test_time: a String representation of the test duration - start_date: a String representation of this TestData's start date - end_date: a String representation of the TestData's end date - total_ingest_time: a String representation of the total ingest time - artifact_count: a Nat, the number of artifacts - artifact_fail: a Nat, the number of artifact failures - heap_space: a String representation of TODO - service_times: a String representation of TODO - autopsy_version: a String, the version of autopsy that was run - ingest_messages: a Nat, the number of ingest messages - indexed_files: a Nat, the number of files indexed during the ingest - indexed_chunks: a Nat, the number of chunks indexed during the ingest - printerror: a listof_String, the error messages printed during this TestData's test - printout: a listof_String, the messages pritned during this TestData's test - """ - - def __init__(self, image, main_config): - """Init this TestData with it's image and the test configuration. - - Args: - image: the Image to be tested. - main_config: the global TestConfiguration. - """ - # Configuration Data - self.main_config = main_config - self.ant = [] - self.image_file = str(image) - # TODO: This 0 should be be refactored out, but it will require rebuilding and changing of outputs. - self.image = get_image_name(self.image_file) - self.image_name = self.image + "(0)" - # Directory structure and files - self.output_path = make_path(self.main_config.output_dir, self.image_name) - self.autopsy_data_file = make_path(self.output_path, self.image_name + "Autopsy_data.txt") - self.warning_log = make_local_path(self.output_path, "AutopsyLogs.txt") - self.antlog_dir = make_local_path(self.output_path, "antlog.txt") - self.test_dbdump = make_path(self.output_path, self.image_name + - "DBDump.txt") - self.common_log_path = make_local_path(self.output_path, self.image_name + COMMON_LOG) - self.sorted_log = make_local_path(self.output_path, self.image_name + "SortedErrors.txt") - self.reports_dir = make_path(self.output_path, AUTOPSY_TEST_CASE, "Reports") - self.gold_data_dir = make_path(self.main_config.img_gold, self.image_name) - self.gold_archive = make_path(self.main_config.gold, - self.image_name + "-archive.zip") - self.logs_dir = make_path(self.output_path, "logs") - self.solr_index = make_path(self.output_path, AUTOPSY_TEST_CASE, - "ModuleOutput", "KeywordSearch") - # Results and Info - self.html_report_passed = False - self.errors_diff_passed = False - self.db_diff_passed = False - self.overall_passed = False - # Ingest info - self.total_test_time = "" - self.start_date = "" - self.end_date = "" - self.total_ingest_time = "" - self.artifact_count = 0 - self.artifact_fail = 0 - self.heap_space = "" - self.service_times = "" - self.autopsy_version = "" - self.ingest_messages = 0 - self.indexed_files = 0 - self.indexed_chunks = 0 - # Error tracking - self.printerror = [] - self.printout = [] - - def ant_to_string(self): - string = "" - for arg in self.ant: - string += (arg + " ") - return string - - def get_db_path(self, db_type): - """Get the path to the database file that corresponds to the given DBType. - - Args: - DBType: the DBType of the path to be generated. - """ - if(db_type == DBType.GOLD): - db_path = make_path(self.gold_data_dir, DB_FILENAME) - elif(db_type == DBType.OUTPUT): - db_path = make_path(self.main_config.output_dir, self.image_name, AUTOPSY_TEST_CASE, DB_FILENAME) - else: - db_path = make_path(self.main_config.output_dir, self.image_name, AUTOPSY_TEST_CASE, BACKUP_DB_FILENAME) - return db_path - - def get_html_report_path(self, html_type): - """Get the path to the HTML Report folder that corresponds to the given DBType. - - Args: - DBType: the DBType of the path to be generated. - """ - if(html_type == DBType.GOLD): - return make_path(self.gold_data_dir, "Report") - else: - # Autopsy creates an HTML report folder in the form AutopsyTestCase DATE-TIME - # It's impossible to get the exact time the folder was created, but the folder - # we are looking for is the only one in the self.reports_dir folder - html_path = "" - for fs in os.listdir(self.reports_dir): - html_path = make_path(self.reports_dir, fs) - if os.path.isdir(html_path): - break - return make_path(html_path, os.listdir(html_path)[0]) - - def get_sorted_data_path(self, file_type): - """Get the path to the SortedData file that corresponds to the given DBType. - - Args: - file_type: the DBType of the path to be generated - """ - return self._get_path_to_file(file_type, "SortedData.txt") - - def get_sorted_errors_path(self, file_type): - """Get the path to the SortedErrors file that correspodns to the given - DBType. - - Args: - file_type: the DBType of the path to be generated - """ - return self._get_path_to_file(file_type, "SortedErrors.txt") - - def get_db_dump_path(self, file_type): - """Get the path to the DBDump file that corresponds to the given DBType. - - Args: - file_type: the DBType of the path to be generated - """ - return self._get_path_to_file(file_type, "DBDump.txt") - - def _get_path_to_file(self, file_type, file_name): - """Get the path to the specified file with the specified type. - - Args: - file_type: the DBType of the path to be generated - file_name: a String, the filename of the path to be generated - """ - full_filename = self.image_name + file_name - if(file_type == DBType.GOLD): - return make_path(self.gold_data_dir, full_filename) - else: - return make_path(self.output_path, full_filename) - - -class TestConfiguration(object): - """Container for test configuration data. - - The Master Test Configuration. Encapsulates consolidated high level input from - config XML file and command-line arguments. - - Attributes: - args: an Args, the command line arguments - output_dir: a pathto_Dir, the output directory - input_dir: a pathto_Dir, the input directory - gold: a pathto_Dir, the gold directory - img_gold: a pathto_Dir, the temp directory where gold images are unzipped to - csv: a pathto_File, the local csv file - global_csv: a pathto_File, the global csv file - html_log: a pathto_File - known_bad_path: - keyword_path: - nsrl_path: - build_path: a pathto_File, the ant build file which runs the tests - autopsy_version: - ingest_messages: a Nat, number of ingest messages - indexed_files: a Nat, the number of indexed files - indexed_chunks: a Nat, the number of indexed chunks - timer: - images: a listof_Image, the images to be tested - timeout: a Nat, the amount of time before killing the test - ant: a listof_String, the ant command to run the tests - """ - - def __init__(self, args): - """Inits TestConfiguration and loads a config file if available. - - Args: - args: an Args, the command line arguments. - """ - self.args = args - # Paths: - self.output_dir = "" - self.input_dir = make_local_path("..","input") - self.gold = make_path("..", "output", "gold") - self.img_gold = make_path(self.gold, 'tmp') - # Logs: - self.csv = "" - self.global_csv = "" - self.html_log = "" - # Ant info: - self.known_bad_path = make_path(self.input_dir, "notablehashes.txt-md5.idx") - self.keyword_path = make_path(self.input_dir, "notablekeywords.xml") - self.nsrl_path = make_path(self.input_dir, "nsrl.txt-md5.idx") - self.build_path = make_path("..", "build.xml") - # Infinite Testing info - timer = 0 - self.images = [] - # Email info - self.email_enabled = args.email_enabled - self.mail_server = "" - self.mail_to = "" - self.mail_subject = "" - # Set the timeout to something huge - # The entire tester should not timeout before this number in ms - # However it only seems to take about half this time - # And it's very buggy, so we're being careful - self.timeout = 24 * 60 * 60 * 1000 * 1000 - - if not self.args.single: - self._load_config_file(self.args.config_file) - else: - self.images.append(self.args.single_file) - self._init_logs() - #self._init_imgs() - #self._init_build_info() - - - def _load_config_file(self, config_file): - """Updates this TestConfiguration's attributes from the config file. - - Initializes this TestConfiguration by iterating through the XML config file - command-line argument. Populates self.images and optional email configuration - - Args: - config_file: ConfigFile - the configuration file to load - """ - try: - count = 0 - parsed_config = parse(config_file) - logres = [] - counts = {} - if parsed_config.getElementsByTagName("indir"): - self.input_dir = parsed_config.getElementsByTagName("indir")[0].getAttribute("value").encode().decode("utf_8") - if parsed_config.getElementsByTagName("global_csv"): - self.global_csv = parsed_config.getElementsByTagName("global_csv")[0].getAttribute("value").encode().decode("utf_8") - self.global_csv = make_local_path(self.global_csv) - if parsed_config.getElementsByTagName("golddir"): - self.gold = parsed_config.getElementsByTagName("golddir")[0].getAttribute("value").encode().decode("utf_8") - self.img_gold = make_path(self.gold, 'tmp') - - self._init_imgs(parsed_config) - self._init_build_info(parsed_config) - self._init_email_info(parsed_config) - - except IOError as e: - msg = "There was an error loading the configuration file.\n" - msg += "\t" + str(e) - Errors.add_email_msg(msg) - logging.critical(traceback.format_exc()) - print(traceback.format_exc()) - - def _init_logs(self): - """Setup output folder, logs, and reporting infrastructure.""" - if(not dir_exists(make_path("..", "output", "results"))): - os.makedirs(make_path("..", "output", "results",)) - self.output_dir = make_path("..", "output", "results", time.strftime("%Y.%m.%d-%H.%M.%S")) - os.makedirs(self.output_dir) - self.csv = make_local_path(self.output_dir, "CSV.txt") - self.html_log = make_path(self.output_dir, "AutopsyTestCase.html") - log_name = self.output_dir + "\\regression.log" - logging.basicConfig(filename=log_name, level=logging.DEBUG) - - def _init_build_info(self, parsed_config): - """Initializes paths that point to information necessary to run the AutopsyIngest.""" - build_elements = parsed_config.getElementsByTagName("build") - if build_elements: - build_element = build_elements[0] - build_path = build_element.getAttribute("value").encode().decode("utf_8") - self.build_path = build_path - - def _init_imgs(self, parsed_config): - """Initialize the list of images to run tests on.""" - for element in parsed_config.getElementsByTagName("image"): - value = element.getAttribute("value").encode().decode("utf_8") - print ("Image in Config File: " + value) - if file_exists(value): - self.images.append(value) - else: - msg = "File: " + value + " doesn't exist" - Errors.print_error(msg) - Errors.add_email_msg(msg) - image_count = len(self.images) - - # Sanity check to see if there are obvious gold images that we are not testing - gold_count = 0 - for file in os.listdir(self.gold): - if not(file == 'tmp'): - gold_count+=1 - - if (image_count > gold_count): - print("******Alert: There are more input images than gold standards, some images will not be properly tested.\n") - elif (image_count < gold_count): - print("******Alert: There are more gold standards than input images, this will not check all gold Standards.\n") - - def _init_email_info(self, parsed_config): - """Initializes email information dictionary""" - email_elements = parsed_config.getElementsByTagName("email") - if email_elements: - mail_to = email_elements[0] - self.mail_to = mail_to.getAttribute("value").encode().decode("utf_8") - mail_server_elements = parsed_config.getElementsByTagName("mail_server") - if mail_server_elements: - mail_from = mail_server_elements[0] - self.mail_server = mail_from.getAttribute("value").encode().decode("utf_8") - subject_elements = parsed_config.getElementsByTagName("subject") - if subject_elements: - subject = subject_elements[0] - self.mail_subject = subject.getAttribute("value").encode().decode("utf_8") - if self.mail_server and self.mail_to and self.args.email_enabled: - self.email_enabled = True - print("Email will be sent to ", self.mail_to) - else: - print("No email will be sent.") - - -#-------------------------------------------------# -# Functions relating to comparing outputs # -#-------------------------------------------------# -class TestResultsDiffer(object): - """Compares results for a single test.""" - - def run_diff(test_data): - """Compares results for a single test. - - Args: - test_data: the TestData to use. - databaseDiff: TskDbDiff object created based off test_data - """ - try: - output_db = test_data.get_db_path(DBType.OUTPUT) - gold_db = test_data.get_db_path(DBType.GOLD) - output_dir = test_data.output_path - gold_bb_dump = test_data.get_sorted_data_path(DBType.GOLD) - gold_dump = test_data.get_db_dump_path(DBType.GOLD) - test_data.db_diff_pass = all(TskDbDiff(output_db, gold_db, output_dir=output_dir, gold_bb_dump=gold_bb_dump, - gold_dump=gold_dump).run_diff()) - - # Compare Exceptions - # replace is a fucntion that replaces strings of digits with 'd' - # this is needed so dates and times will not cause the diff to fail - replace = lambda file: re.sub(re.compile("\d"), "d", file) - output_errors = test_data.get_sorted_errors_path(DBType.OUTPUT) - gold_errors = test_data.get_sorted_errors_path(DBType.GOLD) - passed = TestResultsDiffer._compare_text(output_errors, gold_errors, - replace) - test_data.errors_diff_passed = passed - - # Compare html output - gold_report_path = test_data.get_html_report_path(DBType.GOLD) - output_report_path = test_data.get_html_report_path(DBType.OUTPUT) - passed = TestResultsDiffer._html_report_diff(gold_report_path, - output_report_path) - test_data.html_report_passed = passed - - # Clean up tmp folder - del_dir(test_data.gold_data_dir) - - except sqlite3.OperationalError as e: - Errors.print_error("Tests failed while running the diff:\n") - Errors.print_error(str(e)) - except TskDbDiffException as e: - Errors.print_error(str(e)) - except Exception as e: - Errors.print_error("Tests failed due to an error, try rebuilding or creating gold standards.\n") - Errors.print_error(str(e) + "\n") - print(traceback.format_exc()) - - def _compare_text(output_file, gold_file, process=None): - """Compare two text files. - - Args: - output_file: a pathto_File, the output text file - gold_file: a pathto_File, the input text file - pre-process: (optional) a function of String -> String that will be - called on each input file before the diff, if specified. - """ - if(not file_exists(output_file)): - return False - output_data = codecs.open(output_file, "r", "utf_8").read() - gold_data = codecs.open(gold_file, "r", "utf_8").read() - - if process is not None: - output_data = process(output_data) - gold_data = process(gold_data) - - if (not(gold_data == output_data)): - diff_path = os.path.splitext(os.path.basename(output_file))[0] - diff_path += "-Diff.txt" - diff_file = codecs.open(diff_path, "wb", "utf_8") - dffcmdlst = ["diff", output_file, gold_file] - subprocess.call(dffcmdlst, stdout = diff_file) - Errors.add_email_attachment(diff_path) - msg = "There was a difference in " - msg += os.path.basename(output_file) + ".\n" - Errors.add_email_msg(msg) - Errors.print_error(msg) - return False - else: - return True - - def _html_report_diff(gold_report_path, output_report_path): - """Compare the output and gold html reports. - - Args: - gold_report_path: a pathto_Dir, the gold HTML report directory - output_report_path: a pathto_Dir, the output HTML report directory - - Returns: - true, if the reports match, false otherwise. - """ - try: - gold_html_files = get_files_by_ext(gold_report_path, ".html") - output_html_files = get_files_by_ext(output_report_path, ".html") - - #ensure both reports have the same number of files and are in the same order - if(len(gold_html_files) != len(output_html_files)): - msg = "The reports did not have the same number or files." - msg += "One of the reports may have been corrupted." - Errors.print_error(msg) - else: - gold_html_files.sort() - output_html_files.sort() - - total = {"Gold": 0, "New": 0} - for gold, output in zip(gold_html_files, output_html_files): - count = TestResultsDiffer._compare_report_files(gold, output) - total["Gold"] += count[0] - total["New"] += count[1] - - okay = "The test report matches the gold report." - errors=["Gold report had " + str(total["Gold"]) +" errors", "New report had " + str(total["New"]) + " errors."] - print_report(errors, "REPORT COMPARISON", okay) - - if total["Gold"] == total["New"]: - return True - else: - Errors.print_error("The reports did not match each other.\n " + errors[0] +" and the " + errors[1]) - return False - except OSError as e: - e.print_error() - return False - except Exception as e: - Errors.print_error("Error: Unknown fatal error comparing reports.") - Errors.print_error(str(e) + "\n") - logging.critical(traceback.format_exc()) - return False - - def _compare_report_files(a_path, b_path): - """Compares the two specified report html files. - - Args: - a_path: a pathto_File, the first html report file - b_path: a pathto_File, the second html report file - - Returns: - a tuple of (Nat, Nat), which represent the length of each - unordered list in the html report files, or (0, 0) if the - lenghts are the same. - """ - a_file = open(a_path) - b_file = open(b_path) - a = a_file.read() - b = b_file.read() - a = a[a.find("<ul>"):] - b = b[b.find("<ul>"):] - - a_list = TestResultsDiffer._split(a, 50) - b_list = TestResultsDiffer._split(b, 50) - if not len(a_list) == len(b_list): - ex = (len(a_list), len(b_list)) - return ex - else: - return (0, 0) - - # Split a string into an array of string of the given size - def _split(input, size): - return [input[start:start+size] for start in range(0, len(input), size)] - - -class Reports(object): - def generate_reports(test_data): - """Generate the reports for a single test - - Args: - test_data: the TestData - """ - Reports._generate_html(test_data) - if test_data.main_config.global_csv: - Reports._generate_csv(test_data.main_config.global_csv, test_data) - else: - Reports._generate_csv(test_data.main_config.csv, test_data) - - def _generate_html(test_data): - """Generate the HTML log file.""" - # If the file doesn't exist yet, this is the first test_config to run for - # this test, so we need to make the start of the html log - html_log = test_data.main_config.html_log - if not file_exists(html_log): - Reports.write_html_head() - with open(html_log, "a") as html: - # The image title - title = "<h1><a name='" + test_data.image_name + "'>" + test_data.image_name + " \ - <span>tested on <strong>" + socket.gethostname() + "</strong></span></a></h1>\ - <h2 align='center'>\ - <a href='#" + test_data.image_name + "-errors'>Errors and Warnings</a> |\ - <a href='#" + test_data.image_name + "-info'>Information</a> |\ - <a href='#" + test_data.image_name + "-general'>General Output</a> |\ - <a href='#" + test_data.image_name + "-logs'>Logs</a>\ - </h2>" - # The script errors found - if not test_data.overall_passed: - ids = 'errors1' - else: - ids = 'errors' - errors = "<div id='" + ids + "'>\ - <h2><a name='" + test_data.image_name + "-errors'>Errors and Warnings</a></h2>\ - <hr color='#FF0000'>" - # For each error we have logged in the test_config - for error in test_data.printerror: - # Replace < and > to avoid any html display errors - errors += "<p>" + error.replace("<", "<").replace(">", ">") + "</p>" - # If there is a \n, we probably want a <br /> in the html - if "\n" in error: - errors += "<br />" - errors += "</div>" - - # Links to the logs - logs = "<div id='logs'>\ - <h2><a name='" + test_data.image_name + "-logs'>Logs</a></h2>\ - <hr color='#282828'>" - logs_path = test_data.logs_dir - for file in os.listdir(logs_path): - logs += "<p><a href='file:\\" + make_path(logs_path, file) + "' target='_blank'>" + file + "</a></p>" - logs += "</div>" - - # All the testing information - info = "<div id='info'>\ - <h2><a name='" + test_data.image_name + "-info'>Information</a></h2>\ - <hr color='#282828'>\ - <table cellspacing='5px'>" - # The individual elements - info += "<tr><td>Image Path:</td>" - info += "<td>" + test_data.image_file + "</td></tr>" - info += "<tr><td>Image Name:</td>" - info += "<td>" + test_data.image_name + "</td></tr>" - info += "<tr><td>test_config Output Directory:</td>" - info += "<td>" + test_data.main_config.output_dir + "</td></tr>" - info += "<tr><td>Autopsy Version:</td>" - info += "<td>" + test_data.autopsy_version + "</td></tr>" - info += "<tr><td>Heap Space:</td>" - info += "<td>" + test_data.heap_space + "</td></tr>" - info += "<tr><td>Test Start Date:</td>" - info += "<td>" + test_data.start_date + "</td></tr>" - info += "<tr><td>Test End Date:</td>" - info += "<td>" + test_data.end_date + "</td></tr>" - info += "<tr><td>Total Test Time:</td>" - info += "<td>" + test_data.total_test_time + "</td></tr>" - info += "<tr><td>Total Ingest Time:</td>" - info += "<td>" + test_data.total_ingest_time + "</td></tr>" - info += "<tr><td>Exceptions Count:</td>" - info += "<td>" + str(len(get_exceptions(test_data))) + "</td></tr>" - info += "<tr><td>Autopsy OutOfMemoryExceptions:</td>" - info += "<td>" + str(len(search_logs("OutOfMemoryException", test_data))) + "</td></tr>" - info += "<tr><td>Autopsy OutOfMemoryErrors:</td>" - info += "<td>" + str(len(search_logs("OutOfMemoryError", test_data))) + "</td></tr>" - info += "<tr><td>Tika OutOfMemoryErrors/Exceptions:</td>" - info += "<td>" + str(Reports._get_num_memory_errors("tika", test_data)) + "</td></tr>" - info += "<tr><td>Solr OutOfMemoryErrors/Exceptions:</td>" - info += "<td>" + str(Reports._get_num_memory_errors("solr", test_data)) + "</td></tr>" - info += "<tr><td>TskCoreExceptions:</td>" - info += "<td>" + str(len(search_log_set("autopsy", "TskCoreException", test_data))) + "</td></tr>" - info += "<tr><td>TskDataExceptions:</td>" - info += "<td>" + str(len(search_log_set("autopsy", "TskDataException", test_data))) + "</td></tr>" - info += "<tr><td>Ingest Messages Count:</td>" - info += "<td>" + str(test_data.ingest_messages) + "</td></tr>" - info += "<tr><td>Indexed Files Count:</td>" - info += "<td>" + str(test_data.indexed_files) + "</td></tr>" - info += "<tr><td>Indexed File Chunks Count:</td>" - info += "<td>" + str(test_data.indexed_chunks) + "</td></tr>" - info += "<tr><td>Out Of Disk Space:\ - <p style='font-size: 11px;'>(will skew other test results)</p></td>" - info += "<td>" + str(len(search_log_set("autopsy", "Stopping ingest due to low disk space on disk", test_data))) + "</td></tr>" -# info += "<tr><td>TSK Objects Count:</td>" -# info += "<td>" + str(test_data.db_diff_results.output_objs) + "</td></tr>" -# info += "<tr><td>Artifacts Count:</td>" -# info += "<td>" + str(test_data.db_diff_results.output_artifacts)+ "</td></tr>" -# info += "<tr><td>Attributes Count:</td>" -# info += "<td>" + str(test_data.db_diff_results.output_attrs) + "</td></tr>" - info += "</table>\ - </div>" - # For all the general print statements in the test_config - output = "<div id='general'>\ - <h2><a name='" + test_data.image_name + "-general'>General Output</a></h2>\ - <hr color='#282828'>" - # For each printout in the test_config's list - for out in test_data.printout: - output += "<p>" + out + "</p>" - # If there was a \n it probably means we want a <br /> in the html - if "\n" in out: - output += "<br />" - output += "</div>" - - html.write(title) - html.write(errors) - html.write(info) - html.write(logs) - html.write(output) - - def write_html_head(html_log): - """Write the top of the HTML log file. - - Args: - html_log: a pathto_File, the global HTML log - """ - with open(str(html_log), "a") as html: - head = "<html>\ - <head>\ - <title>AutopsyTesttest_config Output</title>\ - </head>\ - <style type='text/css'>\ - body { font-family: 'Courier New'; font-size: 12px; }\ - h1 { background: #444; margin: 0px auto; padding: 0px; color: #FFF; border: 1px solid #000; font-family: Tahoma; text-align: center; }\ - h1 span { font-size: 12px; font-weight: 100; }\ - h2 { font-family: Tahoma; padding: 0px; margin: 0px; }\ - hr { width: 100%; height: 1px; border: none; margin-top: 10px; margin-bottom: 10px; }\ - #errors { background: #CCCCCC; border: 1px solid #282828; color: #282828; padding: 10px; margin: 20px; }\ - #errors1 { background: #CC0000; border: 1px solid #282828; color: #282828; padding: 10px; margin: 20px; }\ - #info { background: #CCCCCC; border: 1px solid #282828; color: #282828; padding: 10px; margin: 20px; }\ - #general { background: #CCCCCC; border: 1px solid #282828; color: #282828; padding: 10px; margin: 20px; }\ - #logs { background: #CCCCCC; border: 1px solid #282828; color: #282828; padding: 10px; margin: 20px; }\ - #errors p, #info p, #general p, #logs p { pading: 0px; margin: 0px; margin-left: 5px; }\ - #info table td { color: ##282828; font-size: 12px; min-width: 225px; }\ - #logs a { color: ##282828; }\ - </style>\ - <body>" - html.write(head) - - def write_html_foot(html_log): - """Write the bottom of the HTML log file. - - Args: - html_log: a pathto_File, the global HTML log - """ - with open(html_log, "a") as html: - head = "</body></html>" - html.write(head) - - def html_add_images(html_log, full_image_names): - """Add all the image names to the HTML log. - - Args: - full_image_names: a listof_String, each representing an image name - html_log: a pathto_File, the global HTML log - """ - # If the file doesn't exist yet, this is the first test_config to run for - # this test, so we need to make the start of the html log - if not file_exists(html_log): - Reports.write_html_head(html_log) - with open(html_log, "a") as html: - links = [] - for full_name in full_image_names: - name = get_image_name(full_name) - links.append("<a href='#" + name + "(0)'>" + name + "</a>") - html.write("<p align='center'>" + (" | ".join(links)) + "</p>") - - def _generate_csv(csv_path, test_data): - """Generate the CSV log file""" - # If the CSV file hasn't already been generated, this is the - # first run, and we need to add the column names - if not file_exists(csv_path): - Reports.csv_header(csv_path) - # Now add on the fields to a new row - with open(csv_path, "a") as csv: - # Variables that need to be written - vars = [] - vars.append( test_data.image_file ) - vars.append( test_data.image_name ) - vars.append( test_data.main_config.output_dir ) - vars.append( socket.gethostname() ) - vars.append( test_data.autopsy_version ) - vars.append( test_data.heap_space ) - vars.append( test_data.start_date ) - vars.append( test_data.end_date ) - vars.append( test_data.total_test_time ) - vars.append( test_data.total_ingest_time ) - vars.append( test_data.service_times ) - vars.append( str(len(get_exceptions(test_data))) ) - vars.append( str(Reports._get_num_memory_errors("autopsy", test_data)) ) - vars.append( str(Reports._get_num_memory_errors("tika", test_data)) ) - vars.append( str(Reports._get_num_memory_errors("solr", test_data)) ) - vars.append( str(len(search_log_set("autopsy", "TskCoreException", test_data))) ) - vars.append( str(len(search_log_set("autopsy", "TskDataException", test_data))) ) - vars.append( str(test_data.ingest_messages) ) - vars.append( str(test_data.indexed_files) ) - vars.append( str(test_data.indexed_chunks) ) - vars.append( str(len(search_log_set("autopsy", "Stopping ingest due to low disk space on disk", test_data))) ) -# vars.append( str(test_data.db_diff_results.output_objs) ) -# vars.append( str(test_data.db_diff_results.output_artifacts) ) -# vars.append( str(test_data.db_diff_results.output_objs) ) - vars.append( make_local_path("gold", test_data.image_name, DB_FILENAME) ) -# vars.append( test_data.db_diff_results.get_artifact_comparison() ) -# vars.append( test_data.db_diff_results.get_attribute_comparison() ) - vars.append( make_local_path("gold", test_data.image_name, "standard.html") ) - vars.append( str(test_data.html_report_passed) ) - vars.append( test_data.ant_to_string() ) - # Join it together with a ", " - output = "|".join(vars) - output += "\n" - # Write to the log! - csv.write(output) - - def csv_header(csv_path): - """Generate the CSV column names.""" - with open(csv_path, "w") as csv: - titles = [] - titles.append("Image Path") - titles.append("Image Name") - titles.append("Output test_config Directory") - titles.append("Host Name") - titles.append("Autopsy Version") - titles.append("Heap Space Setting") - titles.append("Test Start Date") - titles.append("Test End Date") - titles.append("Total Test Time") - titles.append("Total Ingest Time") - titles.append("Service Times") - titles.append("Autopsy Exceptions") - titles.append("Autopsy OutOfMemoryErrors/Exceptions") - titles.append("Tika OutOfMemoryErrors/Exceptions") - titles.append("Solr OutOfMemoryErrors/Exceptions") - titles.append("TskCoreExceptions") - titles.append("TskDataExceptions") - titles.append("Ingest Messages Count") - titles.append("Indexed Files Count") - titles.append("Indexed File Chunks Count") - titles.append("Out Of Disk Space") -# titles.append("Tsk Objects Count") -# titles.append("Artifacts Count") -# titles.append("Attributes Count") - titles.append("Gold Database Name") -# titles.append("Artifacts Comparison") -# titles.append("Attributes Comparison") - titles.append("Gold Report Name") - titles.append("Report Comparison") - titles.append("Ant Command Line") - output = "|".join(titles) - output += "\n" - csv.write(output) - - def _get_num_memory_errors(type, test_data): - """Get the number of OutOfMemory errors and Exceptions. - - Args: - type: a String representing the type of log to check. - test_data: the TestData to examine. - """ - return (len(search_log_set(type, "OutOfMemoryError", test_data)) + - len(search_log_set(type, "OutOfMemoryException", test_data))) - -class Logs(object): - - def generate_log_data(test_data): - """Find and handle relevent data from the Autopsy logs. - - Args: - test_data: the TestData whose logs to examine - """ - Logs._generate_common_log(test_data) - try: - Logs._fill_ingest_data(test_data) - except Exception as e: - Errors.print_error("Error: Unknown fatal error when filling test_config data.") - Errors.print_error(str(e) + "\n") - logging.critical(traceback.format_exc()) - # If running in verbose mode (-v) - if test_data.main_config.args.verbose: - errors = Logs._report_all_errors() - okay = "No warnings or errors in any log files." - print_report(errors, "VERBOSE", okay) - - def _generate_common_log(test_data): - """Generate the common log, the log of all exceptions and warnings from - each log file generated by Autopsy. - - Args: - test_data: the TestData to generate a log for - """ - try: - logs_path = test_data.logs_dir - common_log = codecs.open(test_data.common_log_path, "w", "utf_8") - warning_log = codecs.open(test_data.warning_log, "w", "utf_8") - common_log.write("--------------------------------------------------\n") - common_log.write(test_data.image_name + "\n") - common_log.write("--------------------------------------------------\n") - rep_path = make_local_path(test_data.main_config.output_dir) - rep_path = rep_path.replace("\\\\", "\\") - for file in os.listdir(logs_path): - log = codecs.open(make_path(logs_path, file), "r", "utf_8") - for line in log: - line = line.replace(rep_path, "test_data") - if line.startswith("Exception"): - common_log.write(file +": " + line) - elif line.startswith("Error"): - common_log.write(file +": " + line) - elif line.startswith("SEVERE"): - common_log.write(file +":" + line) - else: - warning_log.write(file +": " + line) - log.close() - common_log.write("\n") - common_log.close() - print(test_data.sorted_log) - srtcmdlst = ["sort", test_data.common_log_path, "-o", test_data.sorted_log] - subprocess.call(srtcmdlst) - except (OSError, IOError) as e: - Errors.print_error("Error: Unable to generate the common log.") - Errors.print_error(str(e) + "\n") - Errors.print_error(traceback.format_exc()) - logging.critical(traceback.format_exc()) - - def _fill_ingest_data(test_data): - """Fill the TestDatas variables that require the log files. - - Args: - test_data: the TestData to modify - """ - try: - # Open autopsy.log.0 - log_path = make_path(test_data.logs_dir, "autopsy.log.0") - log = open(log_path) - - # Set the TestData start time based off the first line of autopsy.log.0 - # *** If logging time format ever changes this will break *** - test_data.start_date = log.readline().split(" org.")[0] - - # Set the test_data ending time based off the "create" time (when the file was copied) - test_data.end_date = time.ctime(os.path.getmtime(log_path)) - except IOError as e: - Errors.print_error("Error: Unable to open autopsy.log.0.") - Errors.print_error(str(e) + "\n") - logging.warning(traceback.format_exc()) - # Start date must look like: "Jul 16, 2012 12:57:53 PM" - # End date must look like: "Mon Jul 16 13:02:42 2012" - # *** If logging time format ever changes this will break *** - start = datetime.datetime.strptime(test_data.start_date, "%b %d, %Y %I:%M:%S %p") - end = datetime.datetime.strptime(test_data.end_date, "%a %b %d %H:%M:%S %Y") - test_data.total_test_time = str(end - start) - - try: - # Set Autopsy version, heap space, ingest time, and service times - - version_line = search_logs("INFO: Application name: Autopsy, version:", test_data)[0] - test_data.autopsy_version = get_word_at(version_line, 5).rstrip(",") - - test_data.heap_space = search_logs("Heap memory usage:", test_data)[0].rstrip().split(": ")[1] - - ingest_line = search_logs("Ingest (including enqueue)", test_data)[0] - test_data.total_ingest_time = get_word_at(ingest_line, 6).rstrip() - - message_line = search_log_set("autopsy", "Ingest messages count:", test_data)[0] - test_data.ingest_messages = int(message_line.rstrip().split(": ")[2]) - - files_line = search_log_set("autopsy", "Indexed files count:", test_data)[0] - test_data.indexed_files = int(files_line.rstrip().split(": ")[2]) - - chunks_line = search_log_set("autopsy", "Indexed file chunks count:", test_data)[0] - test_data.indexed_chunks = int(chunks_line.rstrip().split(": ")[2]) - except (OSError, IOError) as e: - Errors.print_error("Error: Unable to find the required information to fill test_config data.") - Errors.print_error(str(e) + "\n") - logging.critical(traceback.format_exc()) - print(traceback.format_exc()) - try: - service_lines = search_log("autopsy.log.0", "to process()", test_data) - service_list = [] - for line in service_lines: - words = line.split(" ") - # Kind of forcing our way into getting this data - # If this format changes, the tester will break - i = words.index("secs.") - times = words[i-4] + " " - times += words[i-3] + " " - times += words[i-2] + " " - times += words[i-1] + " " - times += words[i] - service_list.append(times) - test_data.service_times = "; ".join(service_list) - except (OSError, IOError) as e: - Errors.print_error("Error: Unknown fatal error when finding service times.") - Errors.print_error(str(e) + "\n") - logging.critical(traceback.format_exc()) - - def _report_all_errors(): - """Generate a list of all the errors found in the common log. - - Returns: - a listof_String, the errors found in the common log - """ - try: - return get_warnings() + get_exceptions() - except (OSError, IOError) as e: - Errors.print_error("Error: Unknown fatal error when reporting all errors.") - Errors.print_error(str(e) + "\n") - logging.warning(traceback.format_exc()) - - def search_common_log(string, test_data): - """Search the common log for any instances of a given string. - - Args: - string: the String to search for. - test_data: the TestData that holds the log to search. - - Returns: - a listof_String, all the lines that the string is found on - """ - results = [] - log = codecs.open(test_data.common_log_path, "r", "utf_8") - for line in log: - if string in line: - results.append(line) - log.close() - return results - - -def print_report(errors, name, okay): - """Print a report with the specified information. - - Args: - errors: a listof_String, the errors to report. - name: a String, the name of the report. - okay: the String to print when there are no errors. - """ - if errors: - Errors.print_error("--------< " + name + " >----------") - for error in errors: - Errors.print_error(str(error)) - Errors.print_error("--------< / " + name + " >--------\n") - else: - Errors.print_out("-----------------------------------------------------------------") - Errors.print_out("< " + name + " - " + okay + " />") - Errors.print_out("-----------------------------------------------------------------\n") - - -def get_exceptions(test_data): - """Get a list of the exceptions in the autopsy logs. - - Args: - test_data: the TestData to use to find the exceptions. - Returns: - a listof_String, the exceptions found in the logs. - """ - exceptions = [] - logs_path = test_data.logs_dir - results = [] - for file in os.listdir(logs_path): - if "autopsy.log" in file: - log = codecs.open(make_path(logs_path, file), "r", "utf_8") - ex = re.compile("\SException") - er = re.compile("\SError") - for line in log: - if ex.search(line) or er.search(line): - exceptions.append(line) - log.close() - return exceptions - -def get_warnings(test_data): - """Get a list of the warnings listed in the common log. - - Args: - test_data: the TestData to use to find the warnings - - Returns: - listof_String, the warnings found. - """ - warnings = [] - common_log = codecs.open(test_data.warning_log, "r", "utf_8") - for line in common_log: - if "warning" in line.lower(): - warnings.append(line) - common_log.close() - return warnings - -def copy_logs(test_data): - """Copy the Autopsy generated logs to output directory. - - Args: - test_data: the TestData whose logs will be copied - """ - try: - log_dir = os.path.join("..", "..", "Testing","build","test","qa-functional","work","userdir0","var","log") - shutil.copytree(log_dir, test_data.logs_dir) - except OSError as e: - printerror(test_data,"Error: Failed to copy the logs.") - printerror(test_data,str(e) + "\n") - logging.warning(traceback.format_exc()) - -def setDay(): - global Day - Day = int(strftime("%d", localtime())) - -def getLastDay(): - return Day - -def getDay(): - return int(strftime("%d", localtime())) - -def newDay(): - return getLastDay() != getDay() - -#------------------------------------------------------------# -# Exception classes to manage "acceptable" thrown exceptions # -# versus unexpected and fatal exceptions # -#------------------------------------------------------------# - -class FileNotFoundException(Exception): - """ - If a file cannot be found by one of the helper functions, - they will throw a FileNotFoundException unless the purpose - is to return False. - """ - def __init__(self, file): - self.file = file - self.strerror = "FileNotFoundException: " + file - - def print_error(self): - Errors.print_error("Error: File could not be found at:") - Errors.print_error(self.file + "\n") - - def error(self): - error = "Error: File could not be found at:\n" + self.file + "\n" - return error - -class DirNotFoundException(Exception): - """ - If a directory cannot be found by a helper function, - it will throw this exception - """ - def __init__(self, dir): - self.dir = dir - self.strerror = "DirNotFoundException: " + dir - - def print_error(self): - Errors.print_error("Error: Directory could not be found at:") - Errors.print_error(self.dir + "\n") - - def error(self): - error = "Error: Directory could not be found at:\n" + self.dir + "\n" - return error - - -class Errors: - """A class used to manage error reporting. - - Attributes: - printout: a listof_String, the non-error messages that were printed - printerror: a listof_String, the error messages that were printed - email_body: a String, the body of the report email - email_msg_prefix: a String, the prefix for lines added to the email - email_attchs: a listof_pathto_File, the files to be attached to the - report email - """ - printout = [] - printerror = [] - email_body = "" - email_msg_prefix = "Configuration" - email_attachs = [] - - def set_testing_phase(image_name): - """Change the email message prefix to be the given testing phase. - - Args: - image_name: a String, representing the current image being tested - """ - Errors.email_msg_prefix = image_name - - def print_out(msg): - """Print out an informational message. - - Args: - msg: a String, the message to be printed - """ - print(msg) - Errors.printout.append(msg) - - def print_error(msg): - """Print out an error message. - - Args: - msg: a String, the error message to be printed. - """ - print(msg) - Errors.printerror.append(msg) - - def clear_print_logs(): - """Reset the image-specific attributes of the Errors class.""" - Errors.printout = [] - Errors.printerror = [] - - def add_email_msg(msg): - """Add the given message to the body of the report email. - - Args: - msg: a String, the message to be added to the email - """ - Errors.email_body += Errors.email_msg_prefix + ":" + msg - - def add_email_attachment(path): - """Add the given file to be an attachment for the report email - - Args: - file: a pathto_File, the file to add - """ - Errors.email_attachs.append(path) - - -class DiffResults(object): - """Container for the results of the database diff tests. - - Stores artifact, object, and attribute counts and comparisons generated by - TskDbDiff. - - Attributes: - gold_attrs: a Nat, the number of gold attributes - output_attrs: a Nat, the number of output attributes - gold_objs: a Nat, the number of gold objects - output_objs: a Nat, the number of output objects - artifact_comp: a listof_String, describing the differences - attribute_comp: a listof_String, describing the differences - passed: a boolean, did the diff pass? - """ - def __init__(self, tsk_diff): - """Inits a DiffResults - - Args: - tsk_diff: a TskDBDiff - """ - self.gold_attrs = tsk_diff.gold_attributes - self.output_attrs = tsk_diff.autopsy_attributes - self.gold_objs = tsk_diff.gold_objects - self.output_objs = tsk_diff.autopsy_objects - self.artifact_comp = tsk_diff.artifact_comparison - self.attribute_comp = tsk_diff.attribute_comparison - self.gold_artifacts = len(tsk_diff.gold_artifacts) - self.output_artifacts = len(tsk_diff.autopsy_artifacts) - self.passed = tsk_diff.passed - - def get_artifact_comparison(self): - if not self.artifact_comp: - return "All counts matched" - else: - return "; ".join(self.artifact_comp) - - def get_attribute_comparison(self): - if not self.attribute_comp: - return "All counts matched" - list = [] - for error in self.attribute_comp: - list.append(error) - return ";".join(list) - - -#-------------------------------------------------------------# -# Parses argv and stores booleans to match command line input # -#-------------------------------------------------------------# -class Args(object): - """A container for command line options and arguments. - - Attributes: - single: a boolean indicating whether to run in single file mode - single_file: an Image to run the test on - rebuild: a boolean indicating whether to run in rebuild mode - list: a boolean indicating a config file was specified - unallocated: a boolean indicating unallocated space should be ignored - ignore: a boolean indicating the input directory should be ingnored - keep: a boolean indicating whether to keep the SOLR index - verbose: a boolean indicating whether verbose output should be printed - exeception: a boolean indicating whether errors containing exception - exception_string should be printed - exception_sring: a String representing and exception name - fr: a boolean indicating whether gold standard images will be downloaded - """ - def __init__(self): - self.single = False - self.single_file = "" - self.rebuild = False - self.list = False - self.config_file = "" - self.unallocated = False - self.ignore = False - self.keep = False - self.verbose = False - self.exception = False - self.exception_string = "" - self.fr = False - self.email_enabled = False - - def parse(self): - """Get the command line arguments and parse them.""" - nxtproc = [] - nxtproc.append("python3") - nxtproc.append(sys.argv.pop(0)) - while sys.argv: - arg = sys.argv.pop(0) - nxtproc.append(arg) - if(arg == "-f"): - #try: @@@ Commented out until a more specific except statement is added - arg = sys.argv.pop(0) - print("Running on a single file:") - print(path_fix(arg) + "\n") - self.single = True - self.single_file = path_fix(arg) - #except: - # print("Error: No single file given.\n") - # return False - elif(arg == "-r" or arg == "--rebuild"): - print("Running in rebuild mode.\n") - self.rebuild = True - elif(arg == "-l" or arg == "--list"): - try: - arg = sys.argv.pop(0) - nxtproc.append(arg) - print("Running from configuration file:") - print(arg + "\n") - self.list = True - self.config_file = arg - except: - print("Error: No configuration file given.\n") - return False - elif(arg == "-u" or arg == "--unallocated"): - print("Ignoring unallocated space.\n") - self.unallocated = True - elif(arg == "-k" or arg == "--keep"): - print("Keeping the Solr index.\n") - self.keep = True - elif(arg == "-v" or arg == "--verbose"): - print("Running in verbose mode:") - print("Printing all thrown exceptions.\n") - self.verbose = True - elif(arg == "-e" or arg == "--exception"): - try: - arg = sys.argv.pop(0) - nxtproc.append(arg) - print("Running in exception mode: ") - print("Printing all exceptions with the string '" + arg + "'\n") - self.exception = True - self.exception_string = arg - except: - print("Error: No exception string given.") - elif arg == "-h" or arg == "--help": - print(usage()) - return False - elif arg == "-fr" or arg == "--forcerun": - print("Not downloading new images") - self.fr = True - elif arg == "--email": - self.email_enabled = True - else: - print(usage()) - return False - # Return the args were sucessfully parsed - return self._sanity_check() - - def _sanity_check(self): - """Check to make sure there are no conflicting arguments and the - specified files exist. - - Returns: - False if there are conflicting arguments or a specified file does - not exist, True otherwise - """ - if self.single and self.list: - print("Cannot run both from config file and on a single file.") - return False - if self.list: - if not file_exists(self.config_file): - print("Configuration file does not exist at:", - self.config_file) - return False - elif self.single: - if not file_exists(self.single_file): - msg = "Image file does not exist at: " + self.single_file - return False - if (not self.single) and (not self.ignore) and (not self.list): - self.config_file = "config.xml" - if not file_exists(self.config_file): - msg = "Configuration file does not exist at: " + self.config_file - return False - - return True - -#### -# Helper Functions -#### -def search_logs(string, test_data): - """Search through all the known log files for a given string. - - Args: - string: the String to search for. - test_data: the TestData that holds the logs to search. - - Returns: - a listof_String, the lines that contained the given String. - """ - logs_path = test_data.logs_dir - results = [] - for file in os.listdir(logs_path): - log = codecs.open(make_path(logs_path, file), "r", "utf_8") - for line in log: - if string in line: - results.append(line) - log.close() - return results - -def search_log(log, string, test_data): - """Search the given log for any instances of a given string. - - Args: - log: a pathto_File, the log to search in - string: the String to search for. - test_data: the TestData that holds the log to search. - - Returns: - a listof_String, all the lines that the string is found on - """ - logs_path = make_path(test_data.logs_dir, log) - try: - results = [] - log = codecs.open(logs_path, "r", "utf_8") - for line in log: - if string in line: - results.append(line) - log.close() - if results: - return results - except: - raise FileNotFoundException(logs_path) - -# Search through all the the logs of the given type -# Types include autopsy, tika, and solr -def search_log_set(type, string, test_data): - """Search through all logs to the given type for the given string. - - Args: - type: the type of log to search in. - string: the String to search for. - test_data: the TestData containing the logs to search. - - Returns: - a listof_String, the lines on which the String was found. - """ - logs_path = test_data.logs_dir - results = [] - for file in os.listdir(logs_path): - if type in file: - log = codecs.open(make_path(logs_path, file), "r", "utf_8") - for line in log: - if string in line: - results.append(line) - log.close() - return results - - -def clear_dir(dir): - """Clears all files from a directory and remakes it. - - Args: - dir: a pathto_Dir, the directory to clear - """ - try: - if dir_exists(dir): - shutil.rmtree(dir) - os.makedirs(dir) - return True; - except OSError as e: - printerror(test_data,"Error: Cannot clear the given directory:") - printerror(test_data,dir + "\n") - print(str(e)) - return False; - -def del_dir(dir): - """Delete the given directory. - - Args: - dir: a pathto_Dir, the directory to delete - """ - try: - if dir_exists(dir): - shutil.rmtree(dir) - return True; - except: - printerror(test_data,"Error: Cannot delete the given directory:") - printerror(test_data,dir + "\n") - return False; - -def get_file_in_dir(dir, ext): - """Returns the first file in the given directory with the given extension. - - Args: - dir: a pathto_Dir, the directory to search - ext: a String, the extension to search for - - Returns: - pathto_File, the file that was found - """ - try: - for file in os.listdir(dir): - if file.endswith(ext): - return make_path(dir, file) - # If nothing has been found, raise an exception - raise FileNotFoundException(dir) - except: - raise DirNotFoundException(dir) - -def find_file_in_dir(dir, name, ext): - """Find the file with the given name in the given directory. - - Args: - dir: a pathto_Dir, the directory to search - name: a String, the basename of the file to search for - ext: a String, the extension of the file to search for - """ - try: - for file in os.listdir(dir): - if file.startswith(name): - if file.endswith(ext): - return make_path(dir, file) - raise FileNotFoundException(dir) - except: - raise DirNotFoundException(dir) - - -class OS: - LINUX, MAC, WIN, CYGWIN = range(4) - - -if __name__ == "__main__": - global SYS - if _platform == "linux" or _platform == "linux2": - SYS = OS.LINUX - elif _platform == "darwin": - SYS = OS.MAC - elif _platform == "win32": - SYS = OS.WIN - elif _platform == "cygwin": - SYS = OS.CYGWIN - - if SYS is OS.WIN or SYS is OS.CYGWIN: - main() - else: - print("We only support Windows and Cygwin at this time.") +#!/usr/bin/python +# -*- coding: utf_8 -*- + + # Autopsy Forensic Browser + # + # Copyright 2013 Basis Technology Corp. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. +from tskdbdiff import TskDbDiff, TskDbDiffException +import codecs +import datetime +import logging +import os +import re +import shutil +import socket +import sqlite3 +import subprocess +import sys +from sys import platform as _platform +import time +import traceback +import xml +from time import localtime, strftime +from xml.dom.minidom import parse, parseString +import smtplib +from email.mime.image import MIMEImage +from email.mime.multipart import MIMEMultipart +from email.mime.text import MIMEText +import re +import zipfile +import zlib +import Emailer +import srcupdater +from regression_utils import * + +# +# Please read me... +# +# This is the regression testing Python script. +# It uses an ant command to run build.xml for RegressionTest.java +# +# The code is cleanly sectioned and commented. +# Please follow the current formatting. +# It is a long and potentially confusing script. +# +# Variable, function, and class names are written in Python conventions: +# this_is_a_variable this_is_a_function() ThisIsAClass +# +# + + +# Data Definitions: +# +# pathto_X: A path to type X. +# ConfigFile: An XML file formatted according to the template in myconfig.xml +# ParsedConfig: A dom object that represents a ConfigFile +# SQLCursor: A cursor recieved from a connection to an SQL database +# Nat: A Natural Number +# Image: An image +# + +# Enumeration of database types used for the simplification of generating database paths +DBType = enum('OUTPUT', 'GOLD', 'BACKUP') + +# Common filename of the output and gold databases (although they are in different directories +DB_FILENAME = "autopsy.db" + +# Backup database filename +BACKUP_DB_FILENAME = "autopsy_backup.db" + +# TODO: Double check this purpose statement +# Folder name for gold standard database testing +AUTOPSY_TEST_CASE = "AutopsyTestCase" + +# TODO: Double check this purpose statement +# The filename of the log to store error messages +COMMON_LOG = "AutopsyErrors.txt" + +Day = 0 + +#----------------------# +# Main # +#----------------------# +def main(): + """Parse the command-line arguments, create the configuration, and run the tests.""" + args = Args() + parse_result = args.parse() + test_config = TestConfiguration(args) + # The arguments were given wrong: + if not parse_result: + return + if(not args.fr): + antin = ["ant"] + antin.append("-f") + antin.append(os.path.join("..","..","build.xml")) + antin.append("test-download-imgs") + if SYS is OS.CYGWIN: + subprocess.call(antin) + elif SYS is OS.WIN: + theproc = subprocess.Popen(antin, shell = True, stdout=subprocess.PIPE) + theproc.communicate() + # Otherwise test away! + TestRunner.run_tests(test_config) + + +class TestRunner(object): + """A collection of functions to run the regression tests.""" + + def run_tests(test_config): + """Run the tests specified by the main TestConfiguration. + + Executes the AutopsyIngest for each image and dispatches the results based on + the mode (rebuild or testing) + """ + test_data_list = [ TestData(image, test_config) for image in test_config.images ] + + Reports.html_add_images(test_config.html_log, test_config.images) + + logres =[] + for test_data in test_data_list: + Errors.clear_print_logs() + Errors.set_testing_phase(test_data.image) + if not (test_config.args.rebuild or os.path.exists(test_data.gold_archive)): + msg = "Gold standard doesn't exist, skipping image:" + Errors.print_error(msg) + Errors.print_error(test_data.gold_archive) + continue + TestRunner._run_autopsy_ingest(test_data) + + if test_config.args.rebuild: + TestRunner.rebuild(test_data) + else: + logres.append(TestRunner._run_test(test_data)) + test_data.printout = Errors.printout + test_data.printerror = Errors.printerror + + Reports.write_html_foot(test_config.html_log) + # TODO: move this elsewhere + if (len(logres)>0): + for lm in logres: + for ln in lm: + Errors.add_email_msg(ln) + + # TODO: possibly worth putting this in a sub method + if all([ test_data.overall_passed for test_data in test_data_list ]): + Errors.add_email_msg("All images passed.\n") + else: + msg = "The following images failed:\n" + for test_data in test_data_list: + if not test_data.overall_passed: + msg += "\t" + test_data.image + "\n" + Errors.add_email_msg(msg) + html = open(test_config.html_log) + Errors.add_email_attachment(html.name) + html.close() + + if test_config.email_enabled: + Emailer.send_email(test_config.mail_to, test_config.mail_server, + test_config.mail_subject, Errors.email_body, Errors.email_attachs) + + def _run_autopsy_ingest(test_data): + """Run Autopsy ingest for the image in the given TestData. + + Also generates the necessary logs for rebuilding or diff. + + Args: + test_data: the TestData to run the ingest on. + """ + if image_type(test_data.image_file) == IMGTYPE.UNKNOWN: + Errors.print_error("Error: Image type is unrecognized:") + Errors.print_error(test_data.image_file + "\n") + return + + logging.debug("--------------------") + logging.debug(test_data.image_name) + logging.debug("--------------------") + TestRunner._run_ant(test_data) + time.sleep(2) # Give everything a second to process + + try: + # Dump the database before we diff or use it for rebuild + TskDbDiff.dump_output_db(test_data.get_db_path(DBType.OUTPUT), test_data.get_db_dump_path(DBType.OUTPUT), + test_data.get_sorted_data_path(DBType.OUTPUT)) + except sqlite3.OperationalError as e: + print("Ingest did not run properly.", + "Make sure no other instances of Autopsy are open and try again.") + sys.exit() + + # merges logs into a single log for later diff / rebuild + copy_logs(test_data) + Logs.generate_log_data(test_data) + + TestRunner._handle_solr(test_data) + TestRunner._handle_exception(test_data) + + #TODO: figure out return type of _run_test (logres) + def _run_test(test_data): + """Compare the results of the output to the gold standard. + + Args: + test_data: the TestData + + Returns: + logres? + """ + TestRunner._extract_gold(test_data) + + # Look for core exceptions + # @@@ Should be moved to TestResultsDiffer, but it didn't know about logres -- need to look into that + logres = Logs.search_common_log("TskCoreException", test_data) + + TestResultsDiffer.run_diff(test_data) + print("Html report passed: ", test_data.html_report_passed) + print("Errors diff passed: ", test_data.errors_diff_passed) + print("DB diff passed: ", test_data.db_diff_passed) + test_data.overall_passed = (test_data.html_report_passed and + test_data.errors_diff_passed and test_data.db_diff_passed) + + Reports.generate_reports(test_data) + if(not test_data.overall_passed): + Errors.add_email_attachment(test_data.common_log_path) + return logres + + def _extract_gold(test_data): + """Extract gold archive file to output/gold/tmp/ + + Args: + test_data: the TestData + """ + extrctr = zipfile.ZipFile(test_data.gold_archive, 'r', compression=zipfile.ZIP_DEFLATED) + extrctr.extractall(test_data.main_config.gold) + extrctr.close + time.sleep(2) + + def _handle_solr(test_data): + """Clean up SOLR index if in keep mode (-k). + + Args: + test_data: the TestData + """ + if not test_data.main_config.args.keep: + if clear_dir(test_data.solr_index): + print_report([], "DELETE SOLR INDEX", "Solr index deleted.") + else: + print_report([], "KEEP SOLR INDEX", "Solr index has been kept.") + + def _handle_exception(test_data): + """If running in exception mode, print exceptions to log. + + Args: + test_data: the TestData + """ + if test_data.main_config.args.exception: + exceptions = search_logs(test_data.main_config.args.exception_string, test_data) + okay = ("No warnings or exceptions found containing text '" + + test_data.main_config.args.exception_string + "'.") + print_report(exceptions, "EXCEPTION", okay) + + def rebuild(test_data): + """Rebuild the gold standard with the given TestData. + + Copies the test-generated database and html report files into the gold directory. + """ + test_config = test_data.main_config + # Errors to print + errors = [] + # Delete the current gold standards + gold_dir = test_config.img_gold + clear_dir(test_config.img_gold) + tmpdir = make_path(gold_dir, test_data.image_name) + dbinpth = test_data.get_db_path(DBType.OUTPUT) + dboutpth = make_path(tmpdir, DB_FILENAME) + dataoutpth = make_path(tmpdir, test_data.image_name + "SortedData.txt") + dbdumpinpth = test_data.get_db_dump_path(DBType.OUTPUT) + dbdumpoutpth = make_path(tmpdir, test_data.image_name + "DBDump.txt") + if not os.path.exists(test_config.img_gold): + os.makedirs(test_config.img_gold) + if not os.path.exists(tmpdir): + os.makedirs(tmpdir) + try: + shutil.copy(dbinpth, dboutpth) + if file_exists(test_data.get_sorted_data_path(DBType.OUTPUT)): + shutil.copy(test_data.get_sorted_data_path(DBType.OUTPUT), dataoutpth) + shutil.copy(dbdumpinpth, dbdumpoutpth) + error_pth = make_path(tmpdir, test_data.image_name+"SortedErrors.txt") + shutil.copy(test_data.sorted_log, error_pth) + except IOError as e: + Errors.print_error(str(e)) + Errors.add_email_message("Not rebuilt properly") + print(str(e)) + print(traceback.format_exc()) + # Rebuild the HTML report + output_html_report_dir = test_data.get_html_report_path(DBType.OUTPUT) + gold_html_report_dir = make_path(tmpdir, "Report") + + try: + shutil.copytree(output_html_report_dir, gold_html_report_dir) + except OSError as e: + errors.append(e.error()) + except Exception as e: + errors.append("Error: Unknown fatal error when rebuilding the gold html report.") + errors.append(str(e) + "\n") + print(traceback.format_exc()) + oldcwd = os.getcwd() + zpdir = gold_dir + os.chdir(zpdir) + os.chdir("..") + img_gold = "tmp" + img_archive = make_path(test_data.image_name+"-archive.zip") + comprssr = zipfile.ZipFile(img_archive, 'w',compression=zipfile.ZIP_DEFLATED) + TestRunner.zipdir(img_gold, comprssr) + comprssr.close() + os.chdir(oldcwd) + del_dir(test_config.img_gold) + okay = "Sucessfully rebuilt all gold standards." + print_report(errors, "REBUILDING", okay) + + def zipdir(path, zip): + for root, dirs, files in os.walk(path): + for file in files: + zip.write(os.path.join(root, file)) + + def _run_ant(test_data): + """Construct and run the ant build command for the given TestData. + + Tests Autopsy by calling RegressionTest.java via the ant build file. + + Args: + test_data: the TestData + """ + test_config = test_data.main_config + # Set up the directories + if dir_exists(test_data.output_path): + shutil.rmtree(test_data.output_path) + os.makedirs(test_data.output_path) + test_data.ant = ["ant"] + test_data.ant.append("-v") + test_data.ant.append("-f") + # case.ant.append(case.build_path) + test_data.ant.append(os.path.join("..","..","Testing","build.xml")) + test_data.ant.append("regression-test") + test_data.ant.append("-l") + test_data.ant.append(test_data.antlog_dir) + test_data.ant.append("-Dimg_path=" + test_data.image_file) + test_data.ant.append("-Dknown_bad_path=" + test_config.known_bad_path) + test_data.ant.append("-Dkeyword_path=" + test_config.keyword_path) + test_data.ant.append("-Dnsrl_path=" + test_config.nsrl_path) + test_data.ant.append("-Dgold_path=" + test_config.gold) + test_data.ant.append("-Dout_path=" + + make_local_path(test_data.output_path)) + test_data.ant.append("-Dignore_unalloc=" + "%s" % test_config.args.unallocated) + test_data.ant.append("-Dtest.timeout=" + str(test_config.timeout)) + + Errors.print_out("Ingesting Image:\n" + test_data.image_file + "\n") + Errors.print_out("CMD: " + " ".join(test_data.ant)) + Errors.print_out("Starting test...\n") + antoutpth = make_local_path(test_data.main_config.output_dir, "antRunOutput.txt") + antout = open(antoutpth, "a") + if SYS is OS.CYGWIN: + subprocess.call(test_data.ant, stdout=subprocess.PIPE) + elif SYS is OS.WIN: + theproc = subprocess.Popen(test_data.ant, shell = True, stdout=subprocess.PIPE) + theproc.communicate() + antout.close() + + +class TestData(object): + """Container for the input and output of a single image. + + Represents data for the test of a single image, including path to the image, + database paths, etc. + + Attributes: + main_config: the global TestConfiguration + ant: a listof_String, the ant command for this TestData + image_file: a pathto_Image, the image for this TestData + image: a String, the image file's name + image_name: a String, the image file's name with a trailing (0) + output_path: pathto_Dir, the output directory for this TestData + autopsy_data_file: a pathto_File, the IMAGE_NAMEAutopsy_data.txt file + warning_log: a pathto_File, the AutopsyLogs.txt file + antlog_dir: a pathto_File, the antlog.txt file + test_dbdump: a pathto_File, the database dump, IMAGENAMEDump.txt + common_log_path: a pathto_File, the IMAGE_NAMECOMMON_LOG file + sorted_log: a pathto_File, the IMAGENAMESortedErrors.txt file + reports_dir: a pathto_Dir, the AutopsyTestCase/Reports folder + gold_data_dir: a pathto_Dir, the gold standard directory + gold_archive: a pathto_File, the gold standard archive + logs_dir: a pathto_Dir, the location where autopsy logs are stored + solr_index: a pathto_Dir, the locatino of the solr index + html_report_passed: a boolean, did the HTML report diff pass? + errors_diff_passed: a boolean, did the error diff pass? + db_diff_passed: a boolean, did the db diff pass? + overall_passed: a boolean, did the test pass? + total_test_time: a String representation of the test duration + start_date: a String representation of this TestData's start date + end_date: a String representation of the TestData's end date + total_ingest_time: a String representation of the total ingest time + artifact_count: a Nat, the number of artifacts + artifact_fail: a Nat, the number of artifact failures + heap_space: a String representation of TODO + service_times: a String representation of TODO + autopsy_version: a String, the version of autopsy that was run + ingest_messages: a Nat, the number of ingest messages + indexed_files: a Nat, the number of files indexed during the ingest + indexed_chunks: a Nat, the number of chunks indexed during the ingest + printerror: a listof_String, the error messages printed during this TestData's test + printout: a listof_String, the messages pritned during this TestData's test + """ + + def __init__(self, image, main_config): + """Init this TestData with it's image and the test configuration. + + Args: + image: the Image to be tested. + main_config: the global TestConfiguration. + """ + # Configuration Data + self.main_config = main_config + self.ant = [] + self.image_file = str(image) + # TODO: This 0 should be be refactored out, but it will require rebuilding and changing of outputs. + self.image = get_image_name(self.image_file) + self.image_name = self.image + "(0)" + # Directory structure and files + self.output_path = make_path(self.main_config.output_dir, self.image_name) + self.autopsy_data_file = make_path(self.output_path, self.image_name + "Autopsy_data.txt") + self.warning_log = make_local_path(self.output_path, "AutopsyLogs.txt") + self.antlog_dir = make_local_path(self.output_path, "antlog.txt") + self.test_dbdump = make_path(self.output_path, self.image_name + + "DBDump.txt") + self.common_log_path = make_local_path(self.output_path, self.image_name + COMMON_LOG) + self.sorted_log = make_local_path(self.output_path, self.image_name + "SortedErrors.txt") + self.reports_dir = make_path(self.output_path, AUTOPSY_TEST_CASE, "Reports") + self.gold_data_dir = make_path(self.main_config.img_gold, self.image_name) + self.gold_archive = make_path(self.main_config.gold, + self.image_name + "-archive.zip") + self.logs_dir = make_path(self.output_path, "logs") + self.solr_index = make_path(self.output_path, AUTOPSY_TEST_CASE, + "ModuleOutput", "KeywordSearch") + # Results and Info + self.html_report_passed = False + self.errors_diff_passed = False + self.db_diff_passed = False + self.overall_passed = False + # Ingest info + self.total_test_time = "" + self.start_date = "" + self.end_date = "" + self.total_ingest_time = "" + self.artifact_count = 0 + self.artifact_fail = 0 + self.heap_space = "" + self.service_times = "" + self.autopsy_version = "" + self.ingest_messages = 0 + self.indexed_files = 0 + self.indexed_chunks = 0 + # Error tracking + self.printerror = [] + self.printout = [] + + def ant_to_string(self): + string = "" + for arg in self.ant: + string += (arg + " ") + return string + + def get_db_path(self, db_type): + """Get the path to the database file that corresponds to the given DBType. + + Args: + DBType: the DBType of the path to be generated. + """ + if(db_type == DBType.GOLD): + db_path = make_path(self.gold_data_dir, DB_FILENAME) + elif(db_type == DBType.OUTPUT): + db_path = make_path(self.main_config.output_dir, self.image_name, AUTOPSY_TEST_CASE, DB_FILENAME) + else: + db_path = make_path(self.main_config.output_dir, self.image_name, AUTOPSY_TEST_CASE, BACKUP_DB_FILENAME) + return db_path + + def get_html_report_path(self, html_type): + """Get the path to the HTML Report folder that corresponds to the given DBType. + + Args: + DBType: the DBType of the path to be generated. + """ + if(html_type == DBType.GOLD): + return make_path(self.gold_data_dir, "Report") + else: + # Autopsy creates an HTML report folder in the form AutopsyTestCase DATE-TIME + # It's impossible to get the exact time the folder was created, but the folder + # we are looking for is the only one in the self.reports_dir folder + html_path = "" + for fs in os.listdir(self.reports_dir): + html_path = make_path(self.reports_dir, fs) + if os.path.isdir(html_path): + break + return make_path(html_path, os.listdir(html_path)[0]) + + def get_sorted_data_path(self, file_type): + """Get the path to the SortedData file that corresponds to the given DBType. + + Args: + file_type: the DBType of the path to be generated + """ + return self._get_path_to_file(file_type, "SortedData.txt") + + def get_sorted_errors_path(self, file_type): + """Get the path to the SortedErrors file that correspodns to the given + DBType. + + Args: + file_type: the DBType of the path to be generated + """ + return self._get_path_to_file(file_type, "SortedErrors.txt") + + def get_db_dump_path(self, file_type): + """Get the path to the DBDump file that corresponds to the given DBType. + + Args: + file_type: the DBType of the path to be generated + """ + return self._get_path_to_file(file_type, "DBDump.txt") + + def _get_path_to_file(self, file_type, file_name): + """Get the path to the specified file with the specified type. + + Args: + file_type: the DBType of the path to be generated + file_name: a String, the filename of the path to be generated + """ + full_filename = self.image_name + file_name + if(file_type == DBType.GOLD): + return make_path(self.gold_data_dir, full_filename) + else: + return make_path(self.output_path, full_filename) + + +class TestConfiguration(object): + """Container for test configuration data. + + The Master Test Configuration. Encapsulates consolidated high level input from + config XML file and command-line arguments. + + Attributes: + args: an Args, the command line arguments + output_dir: a pathto_Dir, the output directory + input_dir: a pathto_Dir, the input directory + gold: a pathto_Dir, the gold directory + img_gold: a pathto_Dir, the temp directory where gold images are unzipped to + csv: a pathto_File, the local csv file + global_csv: a pathto_File, the global csv file + html_log: a pathto_File + known_bad_path: + keyword_path: + nsrl_path: + build_path: a pathto_File, the ant build file which runs the tests + autopsy_version: + ingest_messages: a Nat, number of ingest messages + indexed_files: a Nat, the number of indexed files + indexed_chunks: a Nat, the number of indexed chunks + timer: + images: a listof_Image, the images to be tested + timeout: a Nat, the amount of time before killing the test + ant: a listof_String, the ant command to run the tests + """ + + def __init__(self, args): + """Inits TestConfiguration and loads a config file if available. + + Args: + args: an Args, the command line arguments. + """ + self.args = args + # Paths: + self.output_dir = "" + self.input_dir = make_local_path("..","input") + self.gold = make_path("..", "output", "gold") + self.img_gold = make_path(self.gold, 'tmp') + # Logs: + self.csv = "" + self.global_csv = "" + self.html_log = "" + # Ant info: + self.known_bad_path = make_path(self.input_dir, "notablehashes.txt-md5.idx") + self.keyword_path = make_path(self.input_dir, "notablekeywords.xml") + self.nsrl_path = make_path(self.input_dir, "nsrl.txt-md5.idx") + self.build_path = make_path("..", "build.xml") + # Infinite Testing info + timer = 0 + self.images = [] + # Email info + self.email_enabled = args.email_enabled + self.mail_server = "" + self.mail_to = "" + self.mail_subject = "" + # Set the timeout to something huge + # The entire tester should not timeout before this number in ms + # However it only seems to take about half this time + # And it's very buggy, so we're being careful + self.timeout = 24 * 60 * 60 * 1000 * 1000 + + if not self.args.single: + self._load_config_file(self.args.config_file) + else: + self.images.append(self.args.single_file) + self._init_logs() + #self._init_imgs() + #self._init_build_info() + + + def _load_config_file(self, config_file): + """Updates this TestConfiguration's attributes from the config file. + + Initializes this TestConfiguration by iterating through the XML config file + command-line argument. Populates self.images and optional email configuration + + Args: + config_file: ConfigFile - the configuration file to load + """ + try: + count = 0 + parsed_config = parse(config_file) + logres = [] + counts = {} + if parsed_config.getElementsByTagName("indir"): + self.input_dir = parsed_config.getElementsByTagName("indir")[0].getAttribute("value").encode().decode("utf_8") + if parsed_config.getElementsByTagName("global_csv"): + self.global_csv = parsed_config.getElementsByTagName("global_csv")[0].getAttribute("value").encode().decode("utf_8") + self.global_csv = make_local_path(self.global_csv) + if parsed_config.getElementsByTagName("golddir"): + self.gold = parsed_config.getElementsByTagName("golddir")[0].getAttribute("value").encode().decode("utf_8") + self.img_gold = make_path(self.gold, 'tmp') + + self._init_imgs(parsed_config) + self._init_build_info(parsed_config) + self._init_email_info(parsed_config) + + except IOError as e: + msg = "There was an error loading the configuration file.\n" + msg += "\t" + str(e) + Errors.add_email_msg(msg) + logging.critical(traceback.format_exc()) + print(traceback.format_exc()) + + def _init_logs(self): + """Setup output folder, logs, and reporting infrastructure.""" + if(not dir_exists(make_path("..", "output", "results"))): + os.makedirs(make_path("..", "output", "results",)) + self.output_dir = make_path("..", "output", "results", time.strftime("%Y.%m.%d-%H.%M.%S")) + os.makedirs(self.output_dir) + self.csv = make_local_path(self.output_dir, "CSV.txt") + self.html_log = make_path(self.output_dir, "AutopsyTestCase.html") + log_name = self.output_dir + "\\regression.log" + logging.basicConfig(filename=log_name, level=logging.DEBUG) + + def _init_build_info(self, parsed_config): + """Initializes paths that point to information necessary to run the AutopsyIngest.""" + build_elements = parsed_config.getElementsByTagName("build") + if build_elements: + build_element = build_elements[0] + build_path = build_element.getAttribute("value").encode().decode("utf_8") + self.build_path = build_path + + def _init_imgs(self, parsed_config): + """Initialize the list of images to run tests on.""" + for element in parsed_config.getElementsByTagName("image"): + value = element.getAttribute("value").encode().decode("utf_8") + print ("Image in Config File: " + value) + if file_exists(value): + self.images.append(value) + else: + msg = "File: " + value + " doesn't exist" + Errors.print_error(msg) + Errors.add_email_msg(msg) + image_count = len(self.images) + + # Sanity check to see if there are obvious gold images that we are not testing + gold_count = 0 + for file in os.listdir(self.gold): + if not(file == 'tmp'): + gold_count+=1 + + if (image_count > gold_count): + print("******Alert: There are more input images than gold standards, some images will not be properly tested.\n") + elif (image_count < gold_count): + print("******Alert: There are more gold standards than input images, this will not check all gold Standards.\n") + + def _init_email_info(self, parsed_config): + """Initializes email information dictionary""" + email_elements = parsed_config.getElementsByTagName("email") + if email_elements: + mail_to = email_elements[0] + self.mail_to = mail_to.getAttribute("value").encode().decode("utf_8") + mail_server_elements = parsed_config.getElementsByTagName("mail_server") + if mail_server_elements: + mail_from = mail_server_elements[0] + self.mail_server = mail_from.getAttribute("value").encode().decode("utf_8") + subject_elements = parsed_config.getElementsByTagName("subject") + if subject_elements: + subject = subject_elements[0] + self.mail_subject = subject.getAttribute("value").encode().decode("utf_8") + if self.mail_server and self.mail_to and self.args.email_enabled: + self.email_enabled = True + print("Email will be sent to ", self.mail_to) + else: + print("No email will be sent.") + + +#-------------------------------------------------# +# Functions relating to comparing outputs # +#-------------------------------------------------# +class TestResultsDiffer(object): + """Compares results for a single test.""" + + def run_diff(test_data): + """Compares results for a single test. + + Args: + test_data: the TestData to use. + databaseDiff: TskDbDiff object created based off test_data + """ + try: + output_db = test_data.get_db_path(DBType.OUTPUT) + gold_db = test_data.get_db_path(DBType.GOLD) + output_dir = test_data.output_path + gold_bb_dump = test_data.get_sorted_data_path(DBType.GOLD) + gold_dump = test_data.get_db_dump_path(DBType.GOLD) + test_data.db_diff_pass = all(TskDbDiff(output_db, gold_db, output_dir=output_dir, gold_bb_dump=gold_bb_dump, + gold_dump=gold_dump).run_diff()) + + # Compare Exceptions + # replace is a fucntion that replaces strings of digits with 'd' + # this is needed so dates and times will not cause the diff to fail + replace = lambda file: re.sub(re.compile("\d"), "d", file) + output_errors = test_data.get_sorted_errors_path(DBType.OUTPUT) + gold_errors = test_data.get_sorted_errors_path(DBType.GOLD) + passed = TestResultsDiffer._compare_text(output_errors, gold_errors, + replace) + test_data.errors_diff_passed = passed + + # Compare html output + gold_report_path = test_data.get_html_report_path(DBType.GOLD) + output_report_path = test_data.get_html_report_path(DBType.OUTPUT) + passed = TestResultsDiffer._html_report_diff(gold_report_path, + output_report_path) + test_data.html_report_passed = passed + + # Clean up tmp folder + del_dir(test_data.gold_data_dir) + + except sqlite3.OperationalError as e: + Errors.print_error("Tests failed while running the diff:\n") + Errors.print_error(str(e)) + except TskDbDiffException as e: + Errors.print_error(str(e)) + except Exception as e: + Errors.print_error("Tests failed due to an error, try rebuilding or creating gold standards.\n") + Errors.print_error(str(e) + "\n") + print(traceback.format_exc()) + + def _compare_text(output_file, gold_file, process=None): + """Compare two text files. + + Args: + output_file: a pathto_File, the output text file + gold_file: a pathto_File, the input text file + pre-process: (optional) a function of String -> String that will be + called on each input file before the diff, if specified. + """ + if(not file_exists(output_file)): + return False + output_data = codecs.open(output_file, "r", "utf_8").read() + gold_data = codecs.open(gold_file, "r", "utf_8").read() + + if process is not None: + output_data = process(output_data) + gold_data = process(gold_data) + + if (not(gold_data == output_data)): + diff_path = os.path.splitext(os.path.basename(output_file))[0] + diff_path += "-Diff.txt" + diff_file = codecs.open(diff_path, "wb", "utf_8") + dffcmdlst = ["diff", output_file, gold_file] + subprocess.call(dffcmdlst, stdout = diff_file) + Errors.add_email_attachment(diff_path) + msg = "There was a difference in " + msg += os.path.basename(output_file) + ".\n" + Errors.add_email_msg(msg) + Errors.print_error(msg) + return False + else: + return True + + def _html_report_diff(gold_report_path, output_report_path): + """Compare the output and gold html reports. + + Args: + gold_report_path: a pathto_Dir, the gold HTML report directory + output_report_path: a pathto_Dir, the output HTML report directory + + Returns: + true, if the reports match, false otherwise. + """ + try: + gold_html_files = get_files_by_ext(gold_report_path, ".html") + output_html_files = get_files_by_ext(output_report_path, ".html") + + #ensure both reports have the same number of files and are in the same order + if(len(gold_html_files) != len(output_html_files)): + msg = "The reports did not have the same number or files." + msg += "One of the reports may have been corrupted." + Errors.print_error(msg) + else: + gold_html_files.sort() + output_html_files.sort() + + total = {"Gold": 0, "New": 0} + for gold, output in zip(gold_html_files, output_html_files): + count = TestResultsDiffer._compare_report_files(gold, output) + total["Gold"] += count[0] + total["New"] += count[1] + + okay = "The test report matches the gold report." + errors=["Gold report had " + str(total["Gold"]) +" errors", "New report had " + str(total["New"]) + " errors."] + print_report(errors, "REPORT COMPARISON", okay) + + if total["Gold"] == total["New"]: + return True + else: + Errors.print_error("The reports did not match each other.\n " + errors[0] +" and the " + errors[1]) + return False + except OSError as e: + e.print_error() + return False + except Exception as e: + Errors.print_error("Error: Unknown fatal error comparing reports.") + Errors.print_error(str(e) + "\n") + logging.critical(traceback.format_exc()) + return False + + def _compare_report_files(a_path, b_path): + """Compares the two specified report html files. + + Args: + a_path: a pathto_File, the first html report file + b_path: a pathto_File, the second html report file + + Returns: + a tuple of (Nat, Nat), which represent the length of each + unordered list in the html report files, or (0, 0) if the + lenghts are the same. + """ + a_file = open(a_path) + b_file = open(b_path) + a = a_file.read() + b = b_file.read() + a = a[a.find("<ul>"):] + b = b[b.find("<ul>"):] + + a_list = TestResultsDiffer._split(a, 50) + b_list = TestResultsDiffer._split(b, 50) + if not len(a_list) == len(b_list): + ex = (len(a_list), len(b_list)) + return ex + else: + return (0, 0) + + # Split a string into an array of string of the given size + def _split(input, size): + return [input[start:start+size] for start in range(0, len(input), size)] + + +class Reports(object): + def generate_reports(test_data): + """Generate the reports for a single test + + Args: + test_data: the TestData + """ + Reports._generate_html(test_data) + if test_data.main_config.global_csv: + Reports._generate_csv(test_data.main_config.global_csv, test_data) + else: + Reports._generate_csv(test_data.main_config.csv, test_data) + + def _generate_html(test_data): + """Generate the HTML log file.""" + # If the file doesn't exist yet, this is the first test_config to run for + # this test, so we need to make the start of the html log + html_log = test_data.main_config.html_log + if not file_exists(html_log): + Reports.write_html_head() + with open(html_log, "a") as html: + # The image title + title = "<h1><a name='" + test_data.image_name + "'>" + test_data.image_name + " \ + <span>tested on <strong>" + socket.gethostname() + "</strong></span></a></h1>\ + <h2 align='center'>\ + <a href='#" + test_data.image_name + "-errors'>Errors and Warnings</a> |\ + <a href='#" + test_data.image_name + "-info'>Information</a> |\ + <a href='#" + test_data.image_name + "-general'>General Output</a> |\ + <a href='#" + test_data.image_name + "-logs'>Logs</a>\ + </h2>" + # The script errors found + if not test_data.overall_passed: + ids = 'errors1' + else: + ids = 'errors' + errors = "<div id='" + ids + "'>\ + <h2><a name='" + test_data.image_name + "-errors'>Errors and Warnings</a></h2>\ + <hr color='#FF0000'>" + # For each error we have logged in the test_config + for error in test_data.printerror: + # Replace < and > to avoid any html display errors + errors += "<p>" + error.replace("<", "<").replace(">", ">") + "</p>" + # If there is a \n, we probably want a <br /> in the html + if "\n" in error: + errors += "<br />" + errors += "</div>" + + # Links to the logs + logs = "<div id='logs'>\ + <h2><a name='" + test_data.image_name + "-logs'>Logs</a></h2>\ + <hr color='#282828'>" + logs_path = test_data.logs_dir + for file in os.listdir(logs_path): + logs += "<p><a href='file:\\" + make_path(logs_path, file) + "' target='_blank'>" + file + "</a></p>" + logs += "</div>" + + # All the testing information + info = "<div id='info'>\ + <h2><a name='" + test_data.image_name + "-info'>Information</a></h2>\ + <hr color='#282828'>\ + <table cellspacing='5px'>" + # The individual elements + info += "<tr><td>Image Path:</td>" + info += "<td>" + test_data.image_file + "</td></tr>" + info += "<tr><td>Image Name:</td>" + info += "<td>" + test_data.image_name + "</td></tr>" + info += "<tr><td>test_config Output Directory:</td>" + info += "<td>" + test_data.main_config.output_dir + "</td></tr>" + info += "<tr><td>Autopsy Version:</td>" + info += "<td>" + test_data.autopsy_version + "</td></tr>" + info += "<tr><td>Heap Space:</td>" + info += "<td>" + test_data.heap_space + "</td></tr>" + info += "<tr><td>Test Start Date:</td>" + info += "<td>" + test_data.start_date + "</td></tr>" + info += "<tr><td>Test End Date:</td>" + info += "<td>" + test_data.end_date + "</td></tr>" + info += "<tr><td>Total Test Time:</td>" + info += "<td>" + test_data.total_test_time + "</td></tr>" + info += "<tr><td>Total Ingest Time:</td>" + info += "<td>" + test_data.total_ingest_time + "</td></tr>" + info += "<tr><td>Exceptions Count:</td>" + info += "<td>" + str(len(get_exceptions(test_data))) + "</td></tr>" + info += "<tr><td>Autopsy OutOfMemoryExceptions:</td>" + info += "<td>" + str(len(search_logs("OutOfMemoryException", test_data))) + "</td></tr>" + info += "<tr><td>Autopsy OutOfMemoryErrors:</td>" + info += "<td>" + str(len(search_logs("OutOfMemoryError", test_data))) + "</td></tr>" + info += "<tr><td>Tika OutOfMemoryErrors/Exceptions:</td>" + info += "<td>" + str(Reports._get_num_memory_errors("tika", test_data)) + "</td></tr>" + info += "<tr><td>Solr OutOfMemoryErrors/Exceptions:</td>" + info += "<td>" + str(Reports._get_num_memory_errors("solr", test_data)) + "</td></tr>" + info += "<tr><td>TskCoreExceptions:</td>" + info += "<td>" + str(len(search_log_set("autopsy", "TskCoreException", test_data))) + "</td></tr>" + info += "<tr><td>TskDataExceptions:</td>" + info += "<td>" + str(len(search_log_set("autopsy", "TskDataException", test_data))) + "</td></tr>" + info += "<tr><td>Ingest Messages Count:</td>" + info += "<td>" + str(test_data.ingest_messages) + "</td></tr>" + info += "<tr><td>Indexed Files Count:</td>" + info += "<td>" + str(test_data.indexed_files) + "</td></tr>" + info += "<tr><td>Indexed File Chunks Count:</td>" + info += "<td>" + str(test_data.indexed_chunks) + "</td></tr>" + info += "<tr><td>Out Of Disk Space:\ + <p style='font-size: 11px;'>(will skew other test results)</p></td>" + info += "<td>" + str(len(search_log_set("autopsy", "Stopping ingest due to low disk space on disk", test_data))) + "</td></tr>" +# info += "<tr><td>TSK Objects Count:</td>" +# info += "<td>" + str(test_data.db_diff_results.output_objs) + "</td></tr>" +# info += "<tr><td>Artifacts Count:</td>" +# info += "<td>" + str(test_data.db_diff_results.output_artifacts)+ "</td></tr>" +# info += "<tr><td>Attributes Count:</td>" +# info += "<td>" + str(test_data.db_diff_results.output_attrs) + "</td></tr>" + info += "</table>\ + </div>" + # For all the general print statements in the test_config + output = "<div id='general'>\ + <h2><a name='" + test_data.image_name + "-general'>General Output</a></h2>\ + <hr color='#282828'>" + # For each printout in the test_config's list + for out in test_data.printout: + output += "<p>" + out + "</p>" + # If there was a \n it probably means we want a <br /> in the html + if "\n" in out: + output += "<br />" + output += "</div>" + + html.write(title) + html.write(errors) + html.write(info) + html.write(logs) + html.write(output) + + def write_html_head(html_log): + """Write the top of the HTML log file. + + Args: + html_log: a pathto_File, the global HTML log + """ + with open(str(html_log), "a") as html: + head = "<html>\ + <head>\ + <title>AutopsyTesttest_config Output</title>\ + </head>\ + <style type='text/css'>\ + body { font-family: 'Courier New'; font-size: 12px; }\ + h1 { background: #444; margin: 0px auto; padding: 0px; color: #FFF; border: 1px solid #000; font-family: Tahoma; text-align: center; }\ + h1 span { font-size: 12px; font-weight: 100; }\ + h2 { font-family: Tahoma; padding: 0px; margin: 0px; }\ + hr { width: 100%; height: 1px; border: none; margin-top: 10px; margin-bottom: 10px; }\ + #errors { background: #CCCCCC; border: 1px solid #282828; color: #282828; padding: 10px; margin: 20px; }\ + #errors1 { background: #CC0000; border: 1px solid #282828; color: #282828; padding: 10px; margin: 20px; }\ + #info { background: #CCCCCC; border: 1px solid #282828; color: #282828; padding: 10px; margin: 20px; }\ + #general { background: #CCCCCC; border: 1px solid #282828; color: #282828; padding: 10px; margin: 20px; }\ + #logs { background: #CCCCCC; border: 1px solid #282828; color: #282828; padding: 10px; margin: 20px; }\ + #errors p, #info p, #general p, #logs p { pading: 0px; margin: 0px; margin-left: 5px; }\ + #info table td { color: ##282828; font-size: 12px; min-width: 225px; }\ + #logs a { color: ##282828; }\ + </style>\ + <body>" + html.write(head) + + def write_html_foot(html_log): + """Write the bottom of the HTML log file. + + Args: + html_log: a pathto_File, the global HTML log + """ + with open(html_log, "a") as html: + head = "</body></html>" + html.write(head) + + def html_add_images(html_log, full_image_names): + """Add all the image names to the HTML log. + + Args: + full_image_names: a listof_String, each representing an image name + html_log: a pathto_File, the global HTML log + """ + # If the file doesn't exist yet, this is the first test_config to run for + # this test, so we need to make the start of the html log + if not file_exists(html_log): + Reports.write_html_head(html_log) + with open(html_log, "a") as html: + links = [] + for full_name in full_image_names: + name = get_image_name(full_name) + links.append("<a href='#" + name + "(0)'>" + name + "</a>") + html.write("<p align='center'>" + (" | ".join(links)) + "</p>") + + def _generate_csv(csv_path, test_data): + """Generate the CSV log file""" + # If the CSV file hasn't already been generated, this is the + # first run, and we need to add the column names + if not file_exists(csv_path): + Reports.csv_header(csv_path) + # Now add on the fields to a new row + with open(csv_path, "a") as csv: + # Variables that need to be written + vars = [] + vars.append( test_data.image_file ) + vars.append( test_data.image_name ) + vars.append( test_data.main_config.output_dir ) + vars.append( socket.gethostname() ) + vars.append( test_data.autopsy_version ) + vars.append( test_data.heap_space ) + vars.append( test_data.start_date ) + vars.append( test_data.end_date ) + vars.append( test_data.total_test_time ) + vars.append( test_data.total_ingest_time ) + vars.append( test_data.service_times ) + vars.append( str(len(get_exceptions(test_data))) ) + vars.append( str(Reports._get_num_memory_errors("autopsy", test_data)) ) + vars.append( str(Reports._get_num_memory_errors("tika", test_data)) ) + vars.append( str(Reports._get_num_memory_errors("solr", test_data)) ) + vars.append( str(len(search_log_set("autopsy", "TskCoreException", test_data))) ) + vars.append( str(len(search_log_set("autopsy", "TskDataException", test_data))) ) + vars.append( str(test_data.ingest_messages) ) + vars.append( str(test_data.indexed_files) ) + vars.append( str(test_data.indexed_chunks) ) + vars.append( str(len(search_log_set("autopsy", "Stopping ingest due to low disk space on disk", test_data))) ) +# vars.append( str(test_data.db_diff_results.output_objs) ) +# vars.append( str(test_data.db_diff_results.output_artifacts) ) +# vars.append( str(test_data.db_diff_results.output_objs) ) + vars.append( make_local_path("gold", test_data.image_name, DB_FILENAME) ) +# vars.append( test_data.db_diff_results.get_artifact_comparison() ) +# vars.append( test_data.db_diff_results.get_attribute_comparison() ) + vars.append( make_local_path("gold", test_data.image_name, "standard.html") ) + vars.append( str(test_data.html_report_passed) ) + vars.append( test_data.ant_to_string() ) + # Join it together with a ", " + output = "|".join(vars) + output += "\n" + # Write to the log! + csv.write(output) + + def csv_header(csv_path): + """Generate the CSV column names.""" + with open(csv_path, "w") as csv: + titles = [] + titles.append("Image Path") + titles.append("Image Name") + titles.append("Output test_config Directory") + titles.append("Host Name") + titles.append("Autopsy Version") + titles.append("Heap Space Setting") + titles.append("Test Start Date") + titles.append("Test End Date") + titles.append("Total Test Time") + titles.append("Total Ingest Time") + titles.append("Service Times") + titles.append("Autopsy Exceptions") + titles.append("Autopsy OutOfMemoryErrors/Exceptions") + titles.append("Tika OutOfMemoryErrors/Exceptions") + titles.append("Solr OutOfMemoryErrors/Exceptions") + titles.append("TskCoreExceptions") + titles.append("TskDataExceptions") + titles.append("Ingest Messages Count") + titles.append("Indexed Files Count") + titles.append("Indexed File Chunks Count") + titles.append("Out Of Disk Space") +# titles.append("Tsk Objects Count") +# titles.append("Artifacts Count") +# titles.append("Attributes Count") + titles.append("Gold Database Name") +# titles.append("Artifacts Comparison") +# titles.append("Attributes Comparison") + titles.append("Gold Report Name") + titles.append("Report Comparison") + titles.append("Ant Command Line") + output = "|".join(titles) + output += "\n" + csv.write(output) + + def _get_num_memory_errors(type, test_data): + """Get the number of OutOfMemory errors and Exceptions. + + Args: + type: a String representing the type of log to check. + test_data: the TestData to examine. + """ + return (len(search_log_set(type, "OutOfMemoryError", test_data)) + + len(search_log_set(type, "OutOfMemoryException", test_data))) + +class Logs(object): + + def generate_log_data(test_data): + """Find and handle relevent data from the Autopsy logs. + + Args: + test_data: the TestData whose logs to examine + """ + Logs._generate_common_log(test_data) + try: + Logs._fill_ingest_data(test_data) + except Exception as e: + Errors.print_error("Error: Unknown fatal error when filling test_config data.") + Errors.print_error(str(e) + "\n") + logging.critical(traceback.format_exc()) + # If running in verbose mode (-v) + if test_data.main_config.args.verbose: + errors = Logs._report_all_errors() + okay = "No warnings or errors in any log files." + print_report(errors, "VERBOSE", okay) + + def _generate_common_log(test_data): + """Generate the common log, the log of all exceptions and warnings from + each log file generated by Autopsy. + + Args: + test_data: the TestData to generate a log for + """ + try: + logs_path = test_data.logs_dir + common_log = codecs.open(test_data.common_log_path, "w", "utf_8") + warning_log = codecs.open(test_data.warning_log, "w", "utf_8") + common_log.write("--------------------------------------------------\n") + common_log.write(test_data.image_name + "\n") + common_log.write("--------------------------------------------------\n") + rep_path = make_local_path(test_data.main_config.output_dir) + rep_path = rep_path.replace("\\\\", "\\") + for file in os.listdir(logs_path): + log = codecs.open(make_path(logs_path, file), "r", "utf_8") + for line in log: + line = line.replace(rep_path, "test_data") + if line.startswith("Exception"): + common_log.write(file +": " + line) + elif line.startswith("Error"): + common_log.write(file +": " + line) + elif line.startswith("SEVERE"): + common_log.write(file +":" + line) + else: + warning_log.write(file +": " + line) + log.close() + common_log.write("\n") + common_log.close() + print(test_data.sorted_log) + srtcmdlst = ["sort", test_data.common_log_path, "-o", test_data.sorted_log] + subprocess.call(srtcmdlst) + except (OSError, IOError) as e: + Errors.print_error("Error: Unable to generate the common log.") + Errors.print_error(str(e) + "\n") + Errors.print_error(traceback.format_exc()) + logging.critical(traceback.format_exc()) + + def _fill_ingest_data(test_data): + """Fill the TestDatas variables that require the log files. + + Args: + test_data: the TestData to modify + """ + try: + # Open autopsy.log.0 + log_path = make_path(test_data.logs_dir, "autopsy.log.0") + log = open(log_path) + + # Set the TestData start time based off the first line of autopsy.log.0 + # *** If logging time format ever changes this will break *** + test_data.start_date = log.readline().split(" org.")[0] + + # Set the test_data ending time based off the "create" time (when the file was copied) + test_data.end_date = time.ctime(os.path.getmtime(log_path)) + except IOError as e: + Errors.print_error("Error: Unable to open autopsy.log.0.") + Errors.print_error(str(e) + "\n") + logging.warning(traceback.format_exc()) + # Start date must look like: "Jul 16, 2012 12:57:53 PM" + # End date must look like: "Mon Jul 16 13:02:42 2012" + # *** If logging time format ever changes this will break *** + start = datetime.datetime.strptime(test_data.start_date, "%b %d, %Y %I:%M:%S %p") + end = datetime.datetime.strptime(test_data.end_date, "%a %b %d %H:%M:%S %Y") + test_data.total_test_time = str(end - start) + + try: + # Set Autopsy version, heap space, ingest time, and service times + + version_line = search_logs("INFO: Application name: Autopsy, version:", test_data)[0] + test_data.autopsy_version = get_word_at(version_line, 5).rstrip(",") + + test_data.heap_space = search_logs("Heap memory usage:", test_data)[0].rstrip().split(": ")[1] + + ingest_line = search_logs("Ingest (including enqueue)", test_data)[0] + test_data.total_ingest_time = get_word_at(ingest_line, 6).rstrip() + + message_line = search_log_set("autopsy", "Ingest messages count:", test_data)[0] + test_data.ingest_messages = int(message_line.rstrip().split(": ")[2]) + + files_line = search_log_set("autopsy", "Indexed files count:", test_data)[0] + test_data.indexed_files = int(files_line.rstrip().split(": ")[2]) + + chunks_line = search_log_set("autopsy", "Indexed file chunks count:", test_data)[0] + test_data.indexed_chunks = int(chunks_line.rstrip().split(": ")[2]) + except (OSError, IOError) as e: + Errors.print_error("Error: Unable to find the required information to fill test_config data.") + Errors.print_error(str(e) + "\n") + logging.critical(traceback.format_exc()) + print(traceback.format_exc()) + try: + service_lines = search_log("autopsy.log.0", "to process()", test_data) + service_list = [] + for line in service_lines: + words = line.split(" ") + # Kind of forcing our way into getting this data + # If this format changes, the tester will break + i = words.index("secs.") + times = words[i-4] + " " + times += words[i-3] + " " + times += words[i-2] + " " + times += words[i-1] + " " + times += words[i] + service_list.append(times) + test_data.service_times = "; ".join(service_list) + except (OSError, IOError) as e: + Errors.print_error("Error: Unknown fatal error when finding service times.") + Errors.print_error(str(e) + "\n") + logging.critical(traceback.format_exc()) + + def _report_all_errors(): + """Generate a list of all the errors found in the common log. + + Returns: + a listof_String, the errors found in the common log + """ + try: + return get_warnings() + get_exceptions() + except (OSError, IOError) as e: + Errors.print_error("Error: Unknown fatal error when reporting all errors.") + Errors.print_error(str(e) + "\n") + logging.warning(traceback.format_exc()) + + def search_common_log(string, test_data): + """Search the common log for any instances of a given string. + + Args: + string: the String to search for. + test_data: the TestData that holds the log to search. + + Returns: + a listof_String, all the lines that the string is found on + """ + results = [] + log = codecs.open(test_data.common_log_path, "r", "utf_8") + for line in log: + if string in line: + results.append(line) + log.close() + return results + + +def print_report(errors, name, okay): + """Print a report with the specified information. + + Args: + errors: a listof_String, the errors to report. + name: a String, the name of the report. + okay: the String to print when there are no errors. + """ + if errors: + Errors.print_error("--------< " + name + " >----------") + for error in errors: + Errors.print_error(str(error)) + Errors.print_error("--------< / " + name + " >--------\n") + else: + Errors.print_out("-----------------------------------------------------------------") + Errors.print_out("< " + name + " - " + okay + " />") + Errors.print_out("-----------------------------------------------------------------\n") + + +def get_exceptions(test_data): + """Get a list of the exceptions in the autopsy logs. + + Args: + test_data: the TestData to use to find the exceptions. + Returns: + a listof_String, the exceptions found in the logs. + """ + exceptions = [] + logs_path = test_data.logs_dir + results = [] + for file in os.listdir(logs_path): + if "autopsy.log" in file: + log = codecs.open(make_path(logs_path, file), "r", "utf_8") + ex = re.compile("\SException") + er = re.compile("\SError") + for line in log: + if ex.search(line) or er.search(line): + exceptions.append(line) + log.close() + return exceptions + +def get_warnings(test_data): + """Get a list of the warnings listed in the common log. + + Args: + test_data: the TestData to use to find the warnings + + Returns: + listof_String, the warnings found. + """ + warnings = [] + common_log = codecs.open(test_data.warning_log, "r", "utf_8") + for line in common_log: + if "warning" in line.lower(): + warnings.append(line) + common_log.close() + return warnings + +def copy_logs(test_data): + """Copy the Autopsy generated logs to output directory. + + Args: + test_data: the TestData whose logs will be copied + """ + try: + log_dir = os.path.join("..", "..", "Testing","build","test","qa-functional","work","userdir0","var","log") + shutil.copytree(log_dir, test_data.logs_dir) + except OSError as e: + printerror(test_data,"Error: Failed to copy the logs.") + printerror(test_data,str(e) + "\n") + logging.warning(traceback.format_exc()) + +def setDay(): + global Day + Day = int(strftime("%d", localtime())) + +def getLastDay(): + return Day + +def getDay(): + return int(strftime("%d", localtime())) + +def newDay(): + return getLastDay() != getDay() + +#------------------------------------------------------------# +# Exception classes to manage "acceptable" thrown exceptions # +# versus unexpected and fatal exceptions # +#------------------------------------------------------------# + +class FileNotFoundException(Exception): + """ + If a file cannot be found by one of the helper functions, + they will throw a FileNotFoundException unless the purpose + is to return False. + """ + def __init__(self, file): + self.file = file + self.strerror = "FileNotFoundException: " + file + + def print_error(self): + Errors.print_error("Error: File could not be found at:") + Errors.print_error(self.file + "\n") + + def error(self): + error = "Error: File could not be found at:\n" + self.file + "\n" + return error + +class DirNotFoundException(Exception): + """ + If a directory cannot be found by a helper function, + it will throw this exception + """ + def __init__(self, dir): + self.dir = dir + self.strerror = "DirNotFoundException: " + dir + + def print_error(self): + Errors.print_error("Error: Directory could not be found at:") + Errors.print_error(self.dir + "\n") + + def error(self): + error = "Error: Directory could not be found at:\n" + self.dir + "\n" + return error + + +class Errors: + """A class used to manage error reporting. + + Attributes: + printout: a listof_String, the non-error messages that were printed + printerror: a listof_String, the error messages that were printed + email_body: a String, the body of the report email + email_msg_prefix: a String, the prefix for lines added to the email + email_attchs: a listof_pathto_File, the files to be attached to the + report email + """ + printout = [] + printerror = [] + email_body = "" + email_msg_prefix = "Configuration" + email_attachs = [] + + def set_testing_phase(image_name): + """Change the email message prefix to be the given testing phase. + + Args: + image_name: a String, representing the current image being tested + """ + Errors.email_msg_prefix = image_name + + def print_out(msg): + """Print out an informational message. + + Args: + msg: a String, the message to be printed + """ + print(msg) + Errors.printout.append(msg) + + def print_error(msg): + """Print out an error message. + + Args: + msg: a String, the error message to be printed. + """ + print(msg) + Errors.printerror.append(msg) + + def clear_print_logs(): + """Reset the image-specific attributes of the Errors class.""" + Errors.printout = [] + Errors.printerror = [] + + def add_email_msg(msg): + """Add the given message to the body of the report email. + + Args: + msg: a String, the message to be added to the email + """ + Errors.email_body += Errors.email_msg_prefix + ":" + msg + + def add_email_attachment(path): + """Add the given file to be an attachment for the report email + + Args: + file: a pathto_File, the file to add + """ + Errors.email_attachs.append(path) + + +class DiffResults(object): + """Container for the results of the database diff tests. + + Stores artifact, object, and attribute counts and comparisons generated by + TskDbDiff. + + Attributes: + gold_attrs: a Nat, the number of gold attributes + output_attrs: a Nat, the number of output attributes + gold_objs: a Nat, the number of gold objects + output_objs: a Nat, the number of output objects + artifact_comp: a listof_String, describing the differences + attribute_comp: a listof_String, describing the differences + passed: a boolean, did the diff pass? + """ + def __init__(self, tsk_diff): + """Inits a DiffResults + + Args: + tsk_diff: a TskDBDiff + """ + self.gold_attrs = tsk_diff.gold_attributes + self.output_attrs = tsk_diff.autopsy_attributes + self.gold_objs = tsk_diff.gold_objects + self.output_objs = tsk_diff.autopsy_objects + self.artifact_comp = tsk_diff.artifact_comparison + self.attribute_comp = tsk_diff.attribute_comparison + self.gold_artifacts = len(tsk_diff.gold_artifacts) + self.output_artifacts = len(tsk_diff.autopsy_artifacts) + self.passed = tsk_diff.passed + + def get_artifact_comparison(self): + if not self.artifact_comp: + return "All counts matched" + else: + return "; ".join(self.artifact_comp) + + def get_attribute_comparison(self): + if not self.attribute_comp: + return "All counts matched" + list = [] + for error in self.attribute_comp: + list.append(error) + return ";".join(list) + + +#-------------------------------------------------------------# +# Parses argv and stores booleans to match command line input # +#-------------------------------------------------------------# +class Args(object): + """A container for command line options and arguments. + + Attributes: + single: a boolean indicating whether to run in single file mode + single_file: an Image to run the test on + rebuild: a boolean indicating whether to run in rebuild mode + list: a boolean indicating a config file was specified + unallocated: a boolean indicating unallocated space should be ignored + ignore: a boolean indicating the input directory should be ingnored + keep: a boolean indicating whether to keep the SOLR index + verbose: a boolean indicating whether verbose output should be printed + exeception: a boolean indicating whether errors containing exception + exception_string should be printed + exception_sring: a String representing and exception name + fr: a boolean indicating whether gold standard images will be downloaded + """ + def __init__(self): + self.single = False + self.single_file = "" + self.rebuild = False + self.list = False + self.config_file = "" + self.unallocated = False + self.ignore = False + self.keep = False + self.verbose = False + self.exception = False + self.exception_string = "" + self.fr = False + self.email_enabled = False + + def parse(self): + """Get the command line arguments and parse them.""" + nxtproc = [] + nxtproc.append("python3") + nxtproc.append(sys.argv.pop(0)) + while sys.argv: + arg = sys.argv.pop(0) + nxtproc.append(arg) + if(arg == "-f"): + #try: @@@ Commented out until a more specific except statement is added + arg = sys.argv.pop(0) + print("Running on a single file:") + print(path_fix(arg) + "\n") + self.single = True + self.single_file = path_fix(arg) + #except: + # print("Error: No single file given.\n") + # return False + elif(arg == "-r" or arg == "--rebuild"): + print("Running in rebuild mode.\n") + self.rebuild = True + elif(arg == "-l" or arg == "--list"): + try: + arg = sys.argv.pop(0) + nxtproc.append(arg) + print("Running from configuration file:") + print(arg + "\n") + self.list = True + self.config_file = arg + except: + print("Error: No configuration file given.\n") + return False + elif(arg == "-u" or arg == "--unallocated"): + print("Ignoring unallocated space.\n") + self.unallocated = True + elif(arg == "-k" or arg == "--keep"): + print("Keeping the Solr index.\n") + self.keep = True + elif(arg == "-v" or arg == "--verbose"): + print("Running in verbose mode:") + print("Printing all thrown exceptions.\n") + self.verbose = True + elif(arg == "-e" or arg == "--exception"): + try: + arg = sys.argv.pop(0) + nxtproc.append(arg) + print("Running in exception mode: ") + print("Printing all exceptions with the string '" + arg + "'\n") + self.exception = True + self.exception_string = arg + except: + print("Error: No exception string given.") + elif arg == "-h" or arg == "--help": + print(usage()) + return False + elif arg == "-fr" or arg == "--forcerun": + print("Not downloading new images") + self.fr = True + elif arg == "--email": + self.email_enabled = True + else: + print(usage()) + return False + # Return the args were sucessfully parsed + return self._sanity_check() + + def _sanity_check(self): + """Check to make sure there are no conflicting arguments and the + specified files exist. + + Returns: + False if there are conflicting arguments or a specified file does + not exist, True otherwise + """ + if self.single and self.list: + print("Cannot run both from config file and on a single file.") + return False + if self.list: + if not file_exists(self.config_file): + print("Configuration file does not exist at:", + self.config_file) + return False + elif self.single: + if not file_exists(self.single_file): + msg = "Image file does not exist at: " + self.single_file + return False + if (not self.single) and (not self.ignore) and (not self.list): + self.config_file = "config.xml" + if not file_exists(self.config_file): + msg = "Configuration file does not exist at: " + self.config_file + return False + + return True + +#### +# Helper Functions +#### +def search_logs(string, test_data): + """Search through all the known log files for a given string. + + Args: + string: the String to search for. + test_data: the TestData that holds the logs to search. + + Returns: + a listof_String, the lines that contained the given String. + """ + logs_path = test_data.logs_dir + results = [] + for file in os.listdir(logs_path): + log = codecs.open(make_path(logs_path, file), "r", "utf_8") + for line in log: + if string in line: + results.append(line) + log.close() + return results + +def search_log(log, string, test_data): + """Search the given log for any instances of a given string. + + Args: + log: a pathto_File, the log to search in + string: the String to search for. + test_data: the TestData that holds the log to search. + + Returns: + a listof_String, all the lines that the string is found on + """ + logs_path = make_path(test_data.logs_dir, log) + try: + results = [] + log = codecs.open(logs_path, "r", "utf_8") + for line in log: + if string in line: + results.append(line) + log.close() + if results: + return results + except: + raise FileNotFoundException(logs_path) + +# Search through all the the logs of the given type +# Types include autopsy, tika, and solr +def search_log_set(type, string, test_data): + """Search through all logs to the given type for the given string. + + Args: + type: the type of log to search in. + string: the String to search for. + test_data: the TestData containing the logs to search. + + Returns: + a listof_String, the lines on which the String was found. + """ + logs_path = test_data.logs_dir + results = [] + for file in os.listdir(logs_path): + if type in file: + log = codecs.open(make_path(logs_path, file), "r", "utf_8") + for line in log: + if string in line: + results.append(line) + log.close() + return results + + +def clear_dir(dir): + """Clears all files from a directory and remakes it. + + Args: + dir: a pathto_Dir, the directory to clear + """ + try: + if dir_exists(dir): + shutil.rmtree(dir) + os.makedirs(dir) + return True; + except OSError as e: + printerror(test_data,"Error: Cannot clear the given directory:") + printerror(test_data,dir + "\n") + print(str(e)) + return False; + +def del_dir(dir): + """Delete the given directory. + + Args: + dir: a pathto_Dir, the directory to delete + """ + try: + if dir_exists(dir): + shutil.rmtree(dir) + return True; + except: + printerror(test_data,"Error: Cannot delete the given directory:") + printerror(test_data,dir + "\n") + return False; + +def get_file_in_dir(dir, ext): + """Returns the first file in the given directory with the given extension. + + Args: + dir: a pathto_Dir, the directory to search + ext: a String, the extension to search for + + Returns: + pathto_File, the file that was found + """ + try: + for file in os.listdir(dir): + if file.endswith(ext): + return make_path(dir, file) + # If nothing has been found, raise an exception + raise FileNotFoundException(dir) + except: + raise DirNotFoundException(dir) + +def find_file_in_dir(dir, name, ext): + """Find the file with the given name in the given directory. + + Args: + dir: a pathto_Dir, the directory to search + name: a String, the basename of the file to search for + ext: a String, the extension of the file to search for + """ + try: + for file in os.listdir(dir): + if file.startswith(name): + if file.endswith(ext): + return make_path(dir, file) + raise FileNotFoundException(dir) + except: + raise DirNotFoundException(dir) + + +class OS: + LINUX, MAC, WIN, CYGWIN = range(4) + + +if __name__ == "__main__": + global SYS + if _platform == "linux" or _platform == "linux2": + SYS = OS.LINUX + elif _platform == "darwin": + SYS = OS.MAC + elif _platform == "win32": + SYS = OS.WIN + elif _platform == "cygwin": + SYS = OS.CYGWIN + + if SYS is OS.WIN or SYS is OS.CYGWIN: + main() + else: + print("We only support Windows and Cygwin at this time.") diff --git a/test/script/srcupdater.py b/test/script/srcupdater.py index eab46604b610e46ead6f9d60107cbfd5d6ad5c5e..c0194654a63a076437bfc5751787bed6313b78e9 100644 --- a/test/script/srcupdater.py +++ b/test/script/srcupdater.py @@ -1,214 +1,214 @@ -import codecs -import datetime -import logging -import os -import re -import shutil -import socket -import sqlite3 -import subprocess -import sys -from sys import platform as _platform -import time -import traceback -import xml -from xml.dom.minidom import parse, parseString -import Emailer -from regression_utils import * - -def compile(errore, attachli, parsedin): - global to - global server - global subj - global email_enabled - global redo - global tryredo - global failedbool - global errorem - errorem = errore - global attachl - attachl = attachli - global passed - global parsed - parsed = parsedin - passed = True - tryredo = False - redo = True - while(redo): - passed = True - if(passed): - gitPull("sleuthkit") - if(passed): - vsBuild() - print("TSK") - if(passed): - gitPull("autopsy") - if(passed): - antBuild("datamodel", False) - print("DataModel") - if(passed): - antBuild("autopsy", True) - print("Aut") - if(passed): - redo = False - else: - print("Compile Failed") - time.sleep(3600) - attachl = [] - errorem = "The test standard didn't match the gold standard.\n" - failedbool = False - if(tryredo): - errorem = "" - errorem += "Rebuilt properly.\n" - if email_enabled: - Emailer.send_email(to, server, subj, errorem, attachl) - attachl = [] - passed = True - -#Pulls from git -def gitPull(TskOrAutopsy): - global SYS - global errorem - global attachl - ccwd = "" - gppth = make_local_path("..", "GitPullOutput" + TskOrAutopsy + ".txt") - attachl.append(gppth) - gpout = open(gppth, 'a') - toPull = "https://www.github.com/sleuthkit/" + TskOrAutopsy - call = ["git", "pull", toPull] - if TskOrAutopsy == "sleuthkit": - ccwd = os.path.join("..", "..", "..", "sleuthkit") - else: - ccwd = os.path.join("..", "..") - subprocess.call(call, stdout=sys.stdout, cwd=ccwd) - gpout.close() - - -#Builds TSK as a win32 applicatiion -def vsBuild(): - global redo - global tryredo - global passed - global parsed - #Please ensure that the current working directory is $autopsy/testing/script - oldpath = os.getcwd() - os.chdir(os.path.join("..", "..", "..","sleuthkit", "win32")) - vs = [] - vs.append("/cygdrive/c/windows/microsoft.NET/framework/v4.0.30319/MSBuild.exe") - vs.append(os.path.join("Tsk-win.sln")) - vs.append("/p:configuration=release") - vs.append("/p:platform=x64") - vs.append("/t:clean") - vs.append("/t:rebuild") - print(vs) - VSpth = make_local_path("..", "VSOutput.txt") - VSout = open(VSpth, 'a') - subprocess.call(vs, stdout=VSout) - VSout.close() - os.chdir(oldpath) - chk = os.path.join("..", "..", "..","sleuthkit", "win32", "x64", "Release", "libtsk_jni.dll") - if not os.path.exists(chk): - print("path doesn't exist") - global errorem - global attachl - global email_enabled - if(not tryredo): - errorem += "LIBTSK C++ failed to build.\n" - attachl.append(VSpth) - if email_enabled: - Emailer.send_email(parsed, errorem, attachl, False) - tryredo = True - passed = False - redo = True - - - -#Builds Autopsy or the Datamodel -def antBuild(which, Build): - print("building: ", which) - global redo - global passed - global tryredo - global parsed - directory = os.path.join("..", "..") - ant = [] - if which == "datamodel": - directory = os.path.join("..", "..", "..", "sleuthkit", "bindings", "java") - ant.append("ant") - ant.append("-f") - ant.append(directory) - ant.append("clean") - if(Build): - ant.append("build") - else: - ant.append("dist") - antpth = make_local_path("..", "ant" + which + "Output.txt") - antout = open(antpth, 'a') - succd = subprocess.call(ant, stdout=antout) - antout.close() - global errorem - global attachl - global email_enabled - global to - global subj - global server - if which == "datamodel": - chk = os.path.join("..", "..", "..","sleuthkit", "bindings", "java", "dist", "TSK_DataModel.jar") - try: - open(chk) - except IOError as e: - if(not tryredo): - errorem += "DataModel Java build failed.\n" - attachl.append(antpth) - if email_enabled: - Emailer.send_email(to, server, subj, errorem, attachl) - passed = False - tryredo = True - elif (succd != 0 and (not tryredo)): - errorem += "Autopsy build failed.\n" - attachl.append(antpth) - Emailer.send_email(to, server, subj, errorem, attachl) - tryredo = True - elif (succd != 0): - passed = False - - -def main(): - global email_enabled - global to - global server - global subj - errore = "" - attachli = [] - config_file = "" - arg = sys.argv.pop(0) - arg = sys.argv.pop(0) - config_file = arg - parsedin = parse(config_file) - try: - to = parsedin.getElementsByTagName("email")[0].getAttribute("value").encode().decode("utf_8") - server = parsedin.getElementsByTagName("mail_server")[0].getAttribute("value").encode().decode("utf_8") - subj = parsedin.getElementsByTagName("subject")[0].getAttribute("value").encode().decode("utf_8") - except Exception: - email_enabled = False - # email_enabled = (to is not None) and (server is not None) and (subj is not None) - email_enabled = False - compile(errore, attachli, parsedin) - -class OS: - LINUX, MAC, WIN, CYGWIN = range(4) -if __name__ == "__main__": - global SYS - if _platform == "linux" or _platform == "linux2": - SYS = OS.LINUX - elif _platform == "darwin": - SYS = OS.MAC - elif _platform == "win32": - SYS = OS.WIN - elif _platform == "cygwin": - SYS = OS.CYGWIN - - if SYS is OS.WIN or SYS is OS.CYGWIN: - main() - else: - print("We only support Windows and Cygwin at this time.") +import codecs +import datetime +import logging +import os +import re +import shutil +import socket +import sqlite3 +import subprocess +import sys +from sys import platform as _platform +import time +import traceback +import xml +from xml.dom.minidom import parse, parseString +import Emailer +from regression_utils import * + +def compile(errore, attachli, parsedin): + global to + global server + global subj + global email_enabled + global redo + global tryredo + global failedbool + global errorem + errorem = errore + global attachl + attachl = attachli + global passed + global parsed + parsed = parsedin + passed = True + tryredo = False + redo = True + while(redo): + passed = True + if(passed): + gitPull("sleuthkit") + if(passed): + vsBuild() + print("TSK") + if(passed): + gitPull("autopsy") + if(passed): + antBuild("datamodel", False) + print("DataModel") + if(passed): + antBuild("autopsy", True) + print("Aut") + if(passed): + redo = False + else: + print("Compile Failed") + time.sleep(3600) + attachl = [] + errorem = "The test standard didn't match the gold standard.\n" + failedbool = False + if(tryredo): + errorem = "" + errorem += "Rebuilt properly.\n" + if email_enabled: + Emailer.send_email(to, server, subj, errorem, attachl) + attachl = [] + passed = True + +#Pulls from git +def gitPull(TskOrAutopsy): + global SYS + global errorem + global attachl + ccwd = "" + gppth = make_local_path("..", "GitPullOutput" + TskOrAutopsy + ".txt") + attachl.append(gppth) + gpout = open(gppth, 'a') + toPull = "https://www.github.com/sleuthkit/" + TskOrAutopsy + call = ["git", "pull", toPull] + if TskOrAutopsy == "sleuthkit": + ccwd = os.path.join("..", "..", "..", "sleuthkit") + else: + ccwd = os.path.join("..", "..") + subprocess.call(call, stdout=sys.stdout, cwd=ccwd) + gpout.close() + + +#Builds TSK as a win32 applicatiion +def vsBuild(): + global redo + global tryredo + global passed + global parsed + #Please ensure that the current working directory is $autopsy/testing/script + oldpath = os.getcwd() + os.chdir(os.path.join("..", "..", "..","sleuthkit", "win32")) + vs = [] + vs.append("/cygdrive/c/windows/microsoft.NET/framework/v4.0.30319/MSBuild.exe") + vs.append(os.path.join("Tsk-win.sln")) + vs.append("/p:configuration=release") + vs.append("/p:platform=x64") + vs.append("/t:clean") + vs.append("/t:rebuild") + print(vs) + VSpth = make_local_path("..", "VSOutput.txt") + VSout = open(VSpth, 'a') + subprocess.call(vs, stdout=VSout) + VSout.close() + os.chdir(oldpath) + chk = os.path.join("..", "..", "..","sleuthkit", "win32", "x64", "Release", "libtsk_jni.dll") + if not os.path.exists(chk): + print("path doesn't exist") + global errorem + global attachl + global email_enabled + if(not tryredo): + errorem += "LIBTSK C++ failed to build.\n" + attachl.append(VSpth) + if email_enabled: + Emailer.send_email(parsed, errorem, attachl, False) + tryredo = True + passed = False + redo = True + + + +#Builds Autopsy or the Datamodel +def antBuild(which, Build): + print("building: ", which) + global redo + global passed + global tryredo + global parsed + directory = os.path.join("..", "..") + ant = [] + if which == "datamodel": + directory = os.path.join("..", "..", "..", "sleuthkit", "bindings", "java") + ant.append("ant") + ant.append("-f") + ant.append(directory) + ant.append("clean") + if(Build): + ant.append("build") + else: + ant.append("dist") + antpth = make_local_path("..", "ant" + which + "Output.txt") + antout = open(antpth, 'a') + succd = subprocess.call(ant, stdout=antout) + antout.close() + global errorem + global attachl + global email_enabled + global to + global subj + global server + if which == "datamodel": + chk = os.path.join("..", "..", "..","sleuthkit", "bindings", "java", "dist", "TSK_DataModel.jar") + try: + open(chk) + except IOError as e: + if(not tryredo): + errorem += "DataModel Java build failed.\n" + attachl.append(antpth) + if email_enabled: + Emailer.send_email(to, server, subj, errorem, attachl) + passed = False + tryredo = True + elif (succd != 0 and (not tryredo)): + errorem += "Autopsy build failed.\n" + attachl.append(antpth) + Emailer.send_email(to, server, subj, errorem, attachl) + tryredo = True + elif (succd != 0): + passed = False + + +def main(): + global email_enabled + global to + global server + global subj + errore = "" + attachli = [] + config_file = "" + arg = sys.argv.pop(0) + arg = sys.argv.pop(0) + config_file = arg + parsedin = parse(config_file) + try: + to = parsedin.getElementsByTagName("email")[0].getAttribute("value").encode().decode("utf_8") + server = parsedin.getElementsByTagName("mail_server")[0].getAttribute("value").encode().decode("utf_8") + subj = parsedin.getElementsByTagName("subject")[0].getAttribute("value").encode().decode("utf_8") + except Exception: + email_enabled = False + # email_enabled = (to is not None) and (server is not None) and (subj is not None) + email_enabled = False + compile(errore, attachli, parsedin) + +class OS: + LINUX, MAC, WIN, CYGWIN = range(4) +if __name__ == "__main__": + global SYS + if _platform == "linux" or _platform == "linux2": + SYS = OS.LINUX + elif _platform == "darwin": + SYS = OS.MAC + elif _platform == "win32": + SYS = OS.WIN + elif _platform == "cygwin": + SYS = OS.CYGWIN + + if SYS is OS.WIN or SYS is OS.CYGWIN: + main() + else: + print("We only support Windows and Cygwin at this time.") diff --git a/thunderbirdparser/manifest.mf b/thunderbirdparser/manifest.mf index c16a2f4c010af5731db615bed9beb99b35fb9357..fc34c0e90a96e9bd70f0dbcf07f7727daeb23476 100644 --- a/thunderbirdparser/manifest.mf +++ b/thunderbirdparser/manifest.mf @@ -1,7 +1,7 @@ -Manifest-Version: 1.0 -AutoUpdate-Show-In-Client: true -OpenIDE-Module: org.sleuthkit.autopsy.thunderbirdparser/3 -OpenIDE-Module-Implementation-Version: 9 -OpenIDE-Module-Layer: org/sleuthkit/autopsy/thunderbirdparser/layer.xml -OpenIDE-Module-Localizing-Bundle: org/sleuthkit/autopsy/thunderbirdparser/Bundle.properties - +Manifest-Version: 1.0 +AutoUpdate-Show-In-Client: true +OpenIDE-Module: org.sleuthkit.autopsy.thunderbirdparser/3 +OpenIDE-Module-Implementation-Version: 9 +OpenIDE-Module-Layer: org/sleuthkit/autopsy/thunderbirdparser/layer.xml +OpenIDE-Module-Localizing-Bundle: org/sleuthkit/autopsy/thunderbirdparser/Bundle.properties + diff --git a/thunderbirdparser/nbproject/project.properties b/thunderbirdparser/nbproject/project.properties index 6a243df466d3c116fba63b26f451d95dfde8d1d0..0735c621fa6302e729ccdc2e68290f83db78e747 100644 --- a/thunderbirdparser/nbproject/project.properties +++ b/thunderbirdparser/nbproject/project.properties @@ -1,6 +1,6 @@ -javac.source=1.7 -javac.compilerargs=-Xlint -Xlint:-serial -license.file=../LICENSE-2.0.txt -nbm.homepage=http://www.sleuthkit.org/autopsy/ -nbm.needs.restart=true -spec.version.base=1.2 +javac.source=1.7 +javac.compilerargs=-Xlint -Xlint:-serial +license.file=../LICENSE-2.0.txt +nbm.homepage=http://www.sleuthkit.org/autopsy/ +nbm.needs.restart=true +spec.version.base=1.2 diff --git a/update_versions.py b/update_versions.py index 2883021c9fbff30fe50c297bae91cca165b14f2e..fa228d0cca136427874ce527cef37fb43029d836 100644 --- a/update_versions.py +++ b/update_versions.py @@ -1,939 +1,939 @@ -# -# Autopsy Forensic Browser -# -# Copyright 2012-2013 Basis Technology Corp. -# Contact: carrier <at> sleuthkit <dot> org -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -####################### -# This script exists to help us determine update the library -# versions appropriately. See this page for version details. -# -# http://wiki.sleuthkit.org/index.php?title=Autopsy_3_Module_Versions -# -# The basic idea is that this script uses javadoc/jdiff to -# compare the current state of the source code to the last -# tag and identifies if APIs were removed, added, etc. -# -# When run from the Autopsy build script, this script will: -# - Clone Autopsy and checkout to the previous release tag -# as found in the NEWS.txt file -# - Auto-discover all modules and packages -# - Run jdiff, comparing the current and previous modules -# - Use jdiff's output to determine if each module -# a) has no changes -# b) has backwards compatible changes -# c) has backwards incompatible changes -# - Based off it's compatibility, updates each module's -# a) Major version -# b) Specification version -# c) Implementation version -# - Updates the dependencies on each module depending on the -# updated version numbers -# -# Optionally, when run from the command line, one can provide the -# desired tag to compare the current version to, the directory for -# the current version of Autopsy, and whether to automatically -# update the version numbers and dependencies. -# ------------------------------------------------------------ - -import errno -import os -import shutil -import stat -import subprocess -import sys -import traceback -from os import remove, close -from shutil import move -from tempfile import mkstemp -from xml.dom.minidom import parse, parseString - -# Jdiff return codes. Described in more detail further on -NO_CHANGES = 100 -COMPATIBLE = 101 -NON_COMPATIBLE = 102 -ERROR = 1 - -# An Autopsy module object -class Module: - # Initialize it with a name, return code, and version numbers - def __init__(self, name=None, ret=None, versions=None): - self.name = name - self.ret = ret - self.versions = versions - # As a string, the module should be it's name - def __str__(self): - return self.name - def __repr__(self): - return self.name - # When compared to another module, the two are equal if the names are the same - def __cmp__(self, other): - if isinstance(other, Module): - if self.name == other.name: - return 0 - elif self.name < other.name: - return -1 - else: - return 1 - return 1 - def __eq__(self, other): - if isinstance(other, Module): - if self.name == other.name: - return True - return False - def set_name(self, name): - self.name = name - def set_ret(self, ret): - self.ret = ret - def set_versions(self, versions): - self.versions = versions - def spec(self): - return self.versions[0] - def impl(self): - return self.versions[1] - def release(self): - return self.versions[2] - -# Representation of the Specification version number -class Spec: - # Initialize specification number, where num is a string like x.y - def __init__(self, num): - self.third = None - spec_nums = num.split(".") - if len(spec_nums) == 3: - final = spec_nums[2] - self.third = int(final) - - l, r = spec_nums[0], spec_nums[1] - - self.left = int(l) - self.right = int(r) - - def __str__(self): - return self.get() - def __cmp__(self, other): - if isinstance(other, Spec): - if self.left == other.left: - if self.right == other.right: - return 0 - if self.right < other.right: - return -1 - return 1 - if self.left < other.left: - return -1 - return 1 - elif isinstance(other, str): - l, r = other.split(".") - if self.left == int(l): - if self.right == int(r): - return 0 - if self.right < int(r): - return -1 - return 1 - if self.left < int(l): - return -1 - return 1 - return -1 - - def overflow(self): - return str(self.left + 1) + ".0" - def increment(self): - return str(self.left) + "." + str(self.right + 1) - def get(self): - spec_str = str(self.left) + "." + str(self.right) - if self.third is not None: - spec_str += "." + str(self.final) - return spec_str - def set(self, num): - if isinstance(num, str): - l, r = num.split(".") - self.left = int(l) - self.right = int(r) - elif isinstance(num, Spec): - self.left = num.left - self.right = num.right - return self - -# ================================ # -# Core Functions # -# ================================ # - -# Given a list of modules and the names for each version, compare -# the generated jdiff XML for each module and output the jdiff -# JavaDocs. -# -# modules: the list of all modules both versions have in common -# apiname_tag: the api name of the previous version, most likely the tag -# apiname_cur: the api name of the current version, most likely "Current" -# -# returns the exit code from the modified jdiff.jar -# return code 1 = error in jdiff -# return code 100 = no changes -# return code 101 = compatible changes -# return code 102 = incompatible changes -def compare_xml(module, apiname_tag, apiname_cur): - global docdir - make_dir(docdir) - null_file = fix_path(os.path.abspath("./thirdparty/jdiff/v-custom/lib/Null.java")) - jdiff = fix_path(os.path.abspath("./thirdparty/jdiff/v-custom/jdiff.jar")) - oldapi = fix_path("build/jdiff-xml/" + apiname_tag + "-" + module.name) - newapi = fix_path("build/jdiff-xml/" + apiname_cur + "-" + module.name) - docs = fix_path(docdir + "/" + module.name) - # Comments are strange. They look for a file with additional user comments in a - # directory like docs/user_comments_for_xyz. The problem being that xyz is the - # path to the new/old api. So xyz turns into multiple directories for us. - # i.e. user_comments_for_build/jdiff-xml/[tag name]-[module name]_to_build/jdiff-xml - comments = fix_path(docs + "/user_comments_for_build") - jdiff_com = fix_path(comments + "/jdiff-xml") - tag_comments = fix_path(jdiff_com + "/" + apiname_tag + "-" + module.name + "_to_build") - jdiff_tag_com = fix_path(tag_comments + "/jdiff-xml") - - if not os.path.exists(jdiff): - print("JDIFF doesn't exist.") - - make_dir(docs) - make_dir(comments) - make_dir(jdiff_com) - make_dir(tag_comments) - make_dir(jdiff_tag_com) - make_dir("jdiff-logs") - log = open("jdiff-logs/COMPARE-" + module.name + ".log", "w") - cmd = ["javadoc", - "-doclet", "jdiff.JDiff", - "-docletpath", jdiff, - "-d", docs, - "-oldapi", oldapi, - "-newapi", newapi, - "-script", - null_file] - jdiff = subprocess.Popen(cmd, stdout=log, stderr=log) - jdiff.wait() - log.close() - code = jdiff.returncode - print("Compared XML for " + module.name) - if code == NO_CHANGES: - print(" No API changes") - elif code == COMPATIBLE: - print(" API Changes are backwards compatible") - elif code == NON_COMPATIBLE: - print(" API Changes are not backwards compatible") - else: - print(" *Error in XML, most likely an empty module") - sys.stdout.flush() - return code - -# Generate the jdiff xml for the given module -# path: path to the autopsy source -# module: Module object -# name: api name for jdiff -def gen_xml(path, modules, name): - for module in modules: - # If its the regression test, the source is in the "test" dir - if module.name == "Testing": - src = os.path.join(path, module.name, "test", "qa-functional", "src") - else: - src = os.path.join(path, module.name, "src") - # xerces = os.path.abspath("./lib/xerces.jar") - xml_out = fix_path(os.path.abspath("./build/jdiff-xml/" + name + "-" + module.name)) - jdiff = fix_path(os.path.abspath("./thirdparty/jdiff/v-custom/jdiff.jar")) - make_dir("build/jdiff-xml") - make_dir("jdiff-logs") - log = open("jdiff-logs/GEN_XML-" + name + "-" + module.name + ".log", "w") - cmd = ["javadoc", - "-doclet", "jdiff.JDiff", - "-docletpath", jdiff, # ;" + xerces, <-- previous problems required this - "-apiname", xml_out, # leaving it in just in case it's needed once again - "-sourcepath", fix_path(src)] - cmd = cmd + get_packages(src) - jdiff = subprocess.Popen(cmd, stdout=log, stderr=log) - jdiff.wait() - log.close() - print("Generated XML for " + name + " " + module.name) - sys.stdout.flush() - -# Find all the modules in the given path -def find_modules(path): - modules = [] - # Step into each folder in the given path and - # see if it has manifest.mf - if so, it's a module - for dir in os.listdir(path): - directory = os.path.join(path, dir) - if os.path.isdir(directory): - for file in os.listdir(directory): - if file == "manifest.mf": - modules.append(Module(dir, None, None)) - return modules - -# Detects the differences between the source and tag modules -def module_diff(source_modules, tag_modules): - added_modules = [x for x in source_modules if x not in tag_modules] - removed_modules = [x for x in tag_modules if x not in source_modules] - similar_modules = [x for x in source_modules if x in tag_modules] - - added_modules = (added_modules if added_modules else []) - removed_modules = (removed_modules if removed_modules else []) - similar_modules = (similar_modules if similar_modules else []) - return similar_modules, added_modules, removed_modules - -# Reads the previous tag from NEWS.txt -def get_tag(sourcepath): - news = open(sourcepath + "/NEWS.txt", "r") - second_instance = False - for line in news: - if "----------------" in line: - if second_instance: - ver = line.split("VERSION ")[1] - ver = ver.split(" -")[0] - return ("autopsy-" + ver).strip() - else: - second_instance = True - continue - news.close() - - -# ========================================== # -# Dependency Functions # -# ========================================== # - -# Write a new XML file, copying all the lines from projectxml -# and replacing the specification version for the code-name-base base -# with the supplied specification version spec -def set_dep_spec(projectxml, base, spec): - print(" Updating Specification version..") - orig = open(projectxml, "r") - f, abs_path = mkstemp() - new_file = open(abs_path, "w") - found_base = False - spacing = " " - sopen = "<specification-version>" - sclose = "</specification-version>\n" - for line in orig: - if base in line: - found_base = True - if found_base and sopen in line: - update = spacing + sopen + str(spec) + sclose - new_file.write(update) - else: - new_file.write(line) - new_file.close() - close(f) - orig.close() - remove(projectxml) - move(abs_path, projectxml) - -# Write a new XML file, copying all the lines from projectxml -# and replacing the release version for the code-name-base base -# with the supplied release version -def set_dep_release(projectxml, base, release): - print(" Updating Release version..") - orig = open(projectxml, "r") - f, abs_path = mkstemp() - new_file = open(abs_path, "w") - found_base = False - spacing = " " - ropen = "<release-version>" - rclose = "</release-version>\n" - for line in orig: - if base in line: - found_base = True - if found_base and ropen in line: - update = spacing + ropen + str(release) + rclose - new_file.write(update) - else: - new_file.write(line) - new_file.close() - close(f) - orig.close() - remove(projectxml) - move(abs_path, projectxml) - -# Return the dependency versions in the XML dependency node -def get_dep_versions(dep): - run_dependency = dep.getElementsByTagName("run-dependency")[0] - release_version = run_dependency.getElementsByTagName("release-version") - if release_version: - release_version = getTagText(release_version[0].childNodes) - specification_version = run_dependency.getElementsByTagName("specification-version") - if specification_version: - specification_version = getTagText(specification_version[0].childNodes) - return int(release_version), Spec(specification_version) - -# Given a code-name-base, see if it corresponds with any of our modules -def get_module_from_base(modules, code_name_base): - for module in modules: - if "org.sleuthkit.autopsy." + module.name.lower() == code_name_base: - return module - return None # If it didn't match one of our modules - -# Check the text between two XML tags -def getTagText(nodelist): - for node in nodelist: - if node.nodeType == node.TEXT_NODE: - return node.data - -# Check the projectxml for a dependency on any module in modules -def check_for_dependencies(projectxml, modules): - dom = parse(projectxml) - dep_list = dom.getElementsByTagName("dependency") - for dep in dep_list: - code_name_base = dep.getElementsByTagName("code-name-base")[0] - code_name_base = getTagText(code_name_base.childNodes) - module = get_module_from_base(modules, code_name_base) - if module: - print(" Found dependency on " + module.name) - release, spec = get_dep_versions(dep) - if release != module.release() and module.release() is not None: - set_dep_release(projectxml, code_name_base, module.release()) - else: print(" Release version is correct") - if spec != module.spec() and module.spec() is not None: - set_dep_spec(projectxml, code_name_base, module.spec()) - else: print(" Specification version is correct") - -# Given the module and the source directory, return -# the paths to the manifest and project properties files -def get_dependency_file(module, source): - projectxml = os.path.join(source, module.name, "nbproject", "project.xml") - if os.path.isfile(projectxml): - return projectxml - -# Verify/Update the dependencies for each module, basing the dependency -# version number off the versions in each module -def update_dependencies(modules, source): - for module in modules: - print("Checking the dependencies for " + module.name + "...") - projectxml = get_dependency_file(module, source) - if projectxml == None: - print(" Error finding project xml file") - else: - other = [x for x in modules] - check_for_dependencies(projectxml, other) - sys.stdout.flush() - -# ======================================== # -# Versioning Functions # -# ======================================== # - -# Return the specification version in the given project.properties/manifest.mf file -def get_specification(project, manifest): - try: - # Try to find it in the project file - # it will be there if impl version is set to append automatically - f = open(project, 'r') - for line in f: - if "spec.version.base" in line: - return Spec(line.split("=")[1].strip()) - f.close() - # If not found there, try the manifest file - f = open(manifest, 'r') - for line in f: - if "OpenIDE-Module-Specification-Version:" in line: - return Spec(line.split(": ")[1].strip()) - except Exception as e: - print("Error parsing Specification version for") - print(project) - print(e) - -# Set the specification version in the given project properties file -# but if it can't be found there, set it in the manifest file -def set_specification(project, manifest, num): - try: - # First try the project file - f = open(project, 'r') - for line in f: - if "spec.version.base" in line: - f.close() - replace(project, line, "spec.version.base=" + str(num) + "\n") - return - f.close() - # If it's not there, try the manifest file - f = open(manifest, 'r') - for line in f: - if "OpenIDE-Module-Specification-Version:" in line: - f.close() - replace(manifest, line, "OpenIDE-Module-Specification-Version: " + str(num) + "\n") - return - # Otherwise we're out of luck - print(" Error finding the Specification version to update") - print(" " + manifest) - f.close() - except: - print(" Error incrementing Specification version for") - print(" " + project) - -# Return the implementation version in the given manifest.mf file -def get_implementation(manifest): - try: - f = open(manifest, 'r') - for line in f: - if "OpenIDE-Module-Implementation-Version" in line: - return int(line.split(": ")[1].strip()) - f.close() - except: - print("Error parsing Implementation version for") - print(manifest) - -# Set the implementation version in the given manifest file -def set_implementation(manifest, num): - try: - f = open(manifest, 'r') - for line in f: - if "OpenIDE-Module-Implementation-Version" in line: - f.close() - replace(manifest, line, "OpenIDE-Module-Implementation-Version: " + str(num) + "\n") - return - # If it isn't there, add it - f.close() - write_implementation(manifest, num) - except: - print(" Error incrementing Implementation version for") - print(" " + manifest) - -# Rewrite the manifest file to include the implementation version -def write_implementation(manifest, num): - f = open(manifest, "r") - contents = f.read() - contents = contents[:-2] + "OpenIDE-Module-Implementation-Version: " + str(num) + "\n\n" - f.close() - f = open(manifest, "w") - f.write(contents) - f.close() - -# Return the release version in the given manifest.mf file -def get_release(manifest): - try: - f = open(manifest, 'r') - for line in f: - if "OpenIDE-Module:" in line: - return int(line.split("/")[1].strip()) - f.close() - except: - #print("Error parsing Release version for") - #print(manifest) - return 0 - -# Set the release version in the given manifest file -def set_release(manifest, num): - try: - f = open(manifest, 'r') - for line in f: - if "OpenIDE-Module:" in line: - f.close() - index = line.index('/') - len(line) + 1 - newline = line[:index] + str(num) - replace(manifest, line, newline + "\n") - return - print(" Error finding the release version to update") - print(" " + manifest) - f.close() - except: - print(" Error incrementing release version for") - print(" " + manifest) - -# Given the module and the source directory, return -# the paths to the manifest and project properties files -def get_version_files(module, source): - manifest = os.path.join(source, module.name, "manifest.mf") - project = os.path.join(source, module.name, "nbproject", "project.properties") - if os.path.isfile(manifest) and os.path.isfile(project): - return manifest, project - -# Returns a the current version numbers for the module in source -def get_versions(module, source): - manifest, project = get_version_files(module, source) - if manifest == None or project == None: - print(" Error finding manifeset and project properties files") - return - spec = get_specification(project, manifest) - impl = get_implementation(manifest) - release = get_release(manifest) - return [spec, impl, release] - -# Update the version numbers for every module in modules -def update_versions(modules, source): - for module in modules: - versions = module.versions - manifest, project = get_version_files(module, source) - print("Updating " + module.name + "...") - if manifest == None or project == None: - print(" Error finding manifeset and project properties files") - return - if module.ret == COMPATIBLE: - versions = [versions[0].set(versions[0].increment()), versions[1] + 1, versions[2]] - set_specification(project, manifest, versions[0]) - set_implementation(manifest, versions[1]) - module.set_versions(versions) - elif module.ret == NON_COMPATIBLE: - versions = [versions[0].set(versions[0].overflow()), versions[1] + 1, versions[2] + 1] - set_specification(project, manifest, versions[0]) - set_implementation(manifest, versions[1]) - set_release(manifest, versions[2]) - module.set_versions(versions) - elif module.ret == NO_CHANGES: - versions = [versions[0], versions[1] + 1, versions[2]] - set_implementation(manifest, versions[1]) - module.set_versions(versions) - elif module.ret == None: - versions = [Spec("1.0"), 1, 1] - set_specification(project, manifest, versions[0]) - set_implementation(manifest, versions[1]) - set_release(manifest, versions[2]) - module.set_versions(versions) - sys.stdout.flush() - -# Given a list of the added modules, remove the modules -# which have the correct 'new module default' version number -def remove_correct_added(modules): - correct = [x for x in modules] - for module in modules: - if module.spec() == "1.0" or module.spec() == "0.0": - if module.impl() == 1: - if module.release() == 1 or module.release() == 0: - correct.remove(module) - return correct - -# ==================================== # -# Helper Functions # -# ==================================== # - -# Replace pattern with subst in given file -def replace(file, pattern, subst): - #Create temp file - fh, abs_path = mkstemp() - new_file = open(abs_path,'w') - old_file = open(file) - for line in old_file: - new_file.write(line.replace(pattern, subst)) - #close temp file - new_file.close() - close(fh) - old_file.close() - #Remove original file - remove(file) - #Move new file - move(abs_path, file) - -# Given a list of modules print the version numbers that need changing -def print_version_updates(modules): - f = open("gen_version.txt", "a") - for module in modules: - versions = module.versions - if module.ret == COMPATIBLE: - output = (module.name + ":\n") - output += ("\tSpecification:\t" + str(versions[0]) + "\t->\t" + str(versions[0].increment()) + "\n") - output += ("\tImplementation:\t" + str(versions[1]) + "\t->\t" + str(versions[1] + 1) + "\n") - output += ("\tRelease:\tNo Change.\n") - output += ("\n") - print(output) - sys.stdout.flush() - f.write(output) - elif module.ret == NON_COMPATIBLE: - output = (module.name + ":\n") - output += ("\tSpecification:\t" + str(versions[0]) + "\t->\t" + str(versions[0].overflow()) + "\n") - output += ("\tImplementation:\t" + str(versions[1]) + "\t->\t" + str(versions[1] + 1) + "\n") - output += ("\tRelease:\t" + str(versions[2]) + "\t->\t" + str(versions[2] + 1) + "\n") - output += ("\n") - print(output) - sys.stdout.flush() - f.write(output) - elif module.ret == ERROR: - output = (module.name + ":\n") - output += ("\t*Unable to detect necessary changes\n") - output += ("\tSpecification:\t" + str(versions[0]) + "\n") - output += ("\tImplementation:\t" + str(versions[1]) + "\n") - output += ("\tRelease:\t\t" + str(versions[2]) + "\n") - output += ("\n") - print(output) - f.write(output) - sys.stdout.flush() - elif module.ret == NO_CHANGES: - output = (module.name + ":\n") - if versions[1] is None: - output += ("\tImplementation: None\n") - else: - output += ("\tImplementation:\t" + str(versions[1]) + "\t->\t" + str(versions[1] + 1) + "\n") - output += ("\n") - print(output) - sys.stdout.flush() - f.write(output) - elif module.ret is None: - output = ("Added " + module.name + ":\n") - if module.spec() != "1.0" and module.spec() != "0.0": - output += ("\tSpecification:\t" + str(module.spec()) + "\t->\t" + "1.0\n") - output += ("\n") - if module.impl() != 1: - output += ("\tImplementation:\t" + str(module.impl()) + "\t->\t" + "1\n") - output += ("\n") - if module.release() != 1 and module.release() != 0: - output += ("Release:\t\t" + str(module.release()) + "\t->\t" + "1\n") - output += ("\n") - print(output) - sys.stdout.flush() - f.write(output) - sys.stdout.flush() - f.close() - -# Changes cygwin paths to Windows -def fix_path(path): - if "cygdrive" in path: - new_path = path[11:] - return "C:/" + new_path - else: - return path - -# Print a 'title' -def printt(title): - print("\n" + title) - lines = "" - for letter in title: - lines += "-" - print(lines) - sys.stdout.flush() - -# Get a list of package names in the given path -# The path is expected to be of the form {base}/module/src -# -# NOTE: We currently only check for packages of the form -# org.sleuthkit.autopsy.x -# If we add other namespaces for commercial modules we will -# have to add a check here -def get_packages(path): - packages = [] - package_path = os.path.join(path, "org", "sleuthkit", "autopsy") - for folder in os.listdir(package_path): - package_string = "org.sleuthkit.autopsy." - packages.append(package_string + folder) - return packages - -# Create the given directory, if it doesn't already exist -def make_dir(dir): - try: - if not os.path.isdir(dir): - os.mkdir(dir) - if os.path.isdir(dir): - return True - return False - except: - print("Exception thrown when creating directory") - return False - -# Delete the given directory, and make sure it is deleted -def del_dir(dir): - try: - if os.path.isdir(dir): - shutil.rmtree(dir, ignore_errors=False, onerror=handleRemoveReadonly) - if os.path.isdir(dir): - return False - else: - return True - return True - except: - print("Exception thrown when deleting directory") - traceback.print_exc() - return False - -# Handle any permisson errors thrown by shutil.rmtree -def handleRemoveReadonly(func, path, exc): - excvalue = exc[1] - if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES: - os.chmod(path, stat.S_IRWXU| stat.S_IRWXG| stat.S_IRWXO) # 0777 - func(path) - else: - raise - -# Run git clone and git checkout for the tag -def do_git(tag, tag_dir): - try: - printt("Cloning Autopsy tag " + tag + " into dir " + tag_dir + " (this could take a while)...") - subprocess.call(["git", "clone", "https://github.com/sleuthkit/autopsy.git", tag_dir], - stdout=subprocess.PIPE) - printt("Checking out tag " + tag + "...") - subprocess.call(["git", "checkout", tag], - stdout=subprocess.PIPE, - cwd=tag_dir) - return True - except Exception as ex: - print("Error cloning and checking out Autopsy: ", sys.exc_info()[0]) - print(str(ex)) - print("The terminal you are using most likely does not recognize git commands.") - return False - -# Get the flags from argv -def args(): - try: - sys.argv.pop(0) - while sys.argv: - arg = sys.argv.pop(0) - if arg == "-h" or arg == "--help": - return 1 - elif arg == "-t" or arg == "--tag": - global tag - tag = sys.argv.pop(0) - elif arg == "-s" or arg == "--source": - global source - source = sys.argv.pop(0) - elif arg == "-d" or arg == "--dir": - global docdir - docdir = sys.argv.pop(0) - elif arg == "-a" or arg == "--auto": - global dry - dry = False - else: - raise Exception() - except: - pass - -# Print script run info -def printinfo(): - global tag - global source - global docdir - global dry - printt("Release script information:") - if source is None: - source = fix_path(os.path.abspath(".")) - print("Using source directory:\n " + source) - if tag is None: - tag = get_tag(source) - print("Checking out to tag:\n " + tag) - if docdir is None: - docdir = fix_path(os.path.abspath("./jdiff-javadocs")) - print("Generating jdiff JavaDocs in:\n " + docdir) - if dry is True: - print("Dry run: will not auto-update version numbers") - sys.stdout.flush() - -# Print the script's usage/help -def usage(): - return \ - """ - USAGE: - Compares the API of the current Autopsy source code with a previous - tagged version. By default, it will detect the previous tag from - the NEWS file and will not update the versions in the source code. - - OPTIONAL FLAGS: - -t --tag Specify a previous tag to compare to. - Otherwise the NEWS file will be used. - - -d --dir The output directory for the jdiff JavaDocs. If no - directory is given, the default is jdiff-javadocs/{module}. - - -s --source The directory containing Autopsy's source code. - - -a --auto Automatically update version numbers (not dry). - - -h --help Prints this usage. - """ - -# ==================================== # -# Main Functionality # -# ==================================== # - -# Where the magic happens -def main(): - global tag; global source; global docdir; global dry - tag = None; source = None; docdir = None; dry = True - - ret = args() - if ret: - print(usage()) - return 0 - printinfo() - - # ----------------------------------------------- - # 1) Clone Autopsy, checkout to given tag/commit - # 2) Get the modules in the clone and the source - # 3) Generate the xml comparison - # ----------------------------------------------- - if not del_dir("./build/" + tag): - print("\n\n=========================================") - print(" Failed to delete previous Autopsy clone.") - print(" Unable to continue...") - print("=========================================") - return 1 - tag_dir = os.path.abspath("./build/" + tag) - if not do_git(tag, tag_dir): - return 1 - sys.stdout.flush() - - tag_modules = find_modules(tag_dir) - source_modules = find_modules(source) - - printt("Generating jdiff XML reports...") - apiname_tag = tag - apiname_cur = "current" - gen_xml(tag_dir, tag_modules, apiname_tag) - gen_xml(source, source_modules, apiname_cur) - - printt("Deleting cloned Autopsy directory...") - print("Clone successfully deleted" if del_dir(tag_dir) else "Failed to delete clone") - sys.stdout.flush() - - # ----------------------------------------------------- - # 1) Seperate modules into added, similar, and removed - # 2) Compare XML for each module - # ----------------------------------------------------- - printt("Comparing modules found...") - similar_modules, added_modules, removed_modules = module_diff(source_modules, tag_modules) - if added_modules or removed_modules: - for m in added_modules: - print("+ Added " + m.name) - sys.stdout.flush() - for m in removed_modules: - print("- Removed " + m.name) - sys.stdout.flush() - else: - print("No added or removed modules") - sys.stdout.flush() - - printt("Comparing jdiff outputs...") - for module in similar_modules: - module.set_ret(compare_xml(module, apiname_tag, apiname_cur)) - print("Refer to the jdiff-javadocs folder for more details") - - # ------------------------------------------------------------ - # 1) Do versioning - # 2) Auto-update version numbers in files and the_modules list - # 3) Auto-update dependencies - # ------------------------------------------------------------ - printt("Auto-detecting version numbers and changes...") - for module in added_modules: - module.set_versions(get_versions(module, source)) - for module in similar_modules: - module.set_versions(get_versions(module, source)) - - added_modules = remove_correct_added(added_modules) - the_modules = similar_modules + added_modules - print_version_updates(the_modules) - - if not dry: - printt("Auto-updating version numbers...") - update_versions(the_modules, source) - print("All auto-updates complete") - - printt("Detecting and auto-updating dependencies...") - update_dependencies(the_modules, source) - - printt("Deleting jdiff XML...") - xml_dir = os.path.abspath("./build/jdiff-xml") - print("XML successfully deleted" if del_dir(xml_dir) else "Failed to delete XML") - - print("\n--- Script completed successfully ---") - return 0 - -# Start off the script -if __name__ == "__main__": - sys.exit(main()) +# +# Autopsy Forensic Browser +# +# Copyright 2012-2013 Basis Technology Corp. +# Contact: carrier <at> sleuthkit <dot> org +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +####################### +# This script exists to help us determine update the library +# versions appropriately. See this page for version details. +# +# http://wiki.sleuthkit.org/index.php?title=Autopsy_3_Module_Versions +# +# The basic idea is that this script uses javadoc/jdiff to +# compare the current state of the source code to the last +# tag and identifies if APIs were removed, added, etc. +# +# When run from the Autopsy build script, this script will: +# - Clone Autopsy and checkout to the previous release tag +# as found in the NEWS.txt file +# - Auto-discover all modules and packages +# - Run jdiff, comparing the current and previous modules +# - Use jdiff's output to determine if each module +# a) has no changes +# b) has backwards compatible changes +# c) has backwards incompatible changes +# - Based off it's compatibility, updates each module's +# a) Major version +# b) Specification version +# c) Implementation version +# - Updates the dependencies on each module depending on the +# updated version numbers +# +# Optionally, when run from the command line, one can provide the +# desired tag to compare the current version to, the directory for +# the current version of Autopsy, and whether to automatically +# update the version numbers and dependencies. +# ------------------------------------------------------------ + +import errno +import os +import shutil +import stat +import subprocess +import sys +import traceback +from os import remove, close +from shutil import move +from tempfile import mkstemp +from xml.dom.minidom import parse, parseString + +# Jdiff return codes. Described in more detail further on +NO_CHANGES = 100 +COMPATIBLE = 101 +NON_COMPATIBLE = 102 +ERROR = 1 + +# An Autopsy module object +class Module: + # Initialize it with a name, return code, and version numbers + def __init__(self, name=None, ret=None, versions=None): + self.name = name + self.ret = ret + self.versions = versions + # As a string, the module should be it's name + def __str__(self): + return self.name + def __repr__(self): + return self.name + # When compared to another module, the two are equal if the names are the same + def __cmp__(self, other): + if isinstance(other, Module): + if self.name == other.name: + return 0 + elif self.name < other.name: + return -1 + else: + return 1 + return 1 + def __eq__(self, other): + if isinstance(other, Module): + if self.name == other.name: + return True + return False + def set_name(self, name): + self.name = name + def set_ret(self, ret): + self.ret = ret + def set_versions(self, versions): + self.versions = versions + def spec(self): + return self.versions[0] + def impl(self): + return self.versions[1] + def release(self): + return self.versions[2] + +# Representation of the Specification version number +class Spec: + # Initialize specification number, where num is a string like x.y + def __init__(self, num): + self.third = None + spec_nums = num.split(".") + if len(spec_nums) == 3: + final = spec_nums[2] + self.third = int(final) + + l, r = spec_nums[0], spec_nums[1] + + self.left = int(l) + self.right = int(r) + + def __str__(self): + return self.get() + def __cmp__(self, other): + if isinstance(other, Spec): + if self.left == other.left: + if self.right == other.right: + return 0 + if self.right < other.right: + return -1 + return 1 + if self.left < other.left: + return -1 + return 1 + elif isinstance(other, str): + l, r = other.split(".") + if self.left == int(l): + if self.right == int(r): + return 0 + if self.right < int(r): + return -1 + return 1 + if self.left < int(l): + return -1 + return 1 + return -1 + + def overflow(self): + return str(self.left + 1) + ".0" + def increment(self): + return str(self.left) + "." + str(self.right + 1) + def get(self): + spec_str = str(self.left) + "." + str(self.right) + if self.third is not None: + spec_str += "." + str(self.final) + return spec_str + def set(self, num): + if isinstance(num, str): + l, r = num.split(".") + self.left = int(l) + self.right = int(r) + elif isinstance(num, Spec): + self.left = num.left + self.right = num.right + return self + +# ================================ # +# Core Functions # +# ================================ # + +# Given a list of modules and the names for each version, compare +# the generated jdiff XML for each module and output the jdiff +# JavaDocs. +# +# modules: the list of all modules both versions have in common +# apiname_tag: the api name of the previous version, most likely the tag +# apiname_cur: the api name of the current version, most likely "Current" +# +# returns the exit code from the modified jdiff.jar +# return code 1 = error in jdiff +# return code 100 = no changes +# return code 101 = compatible changes +# return code 102 = incompatible changes +def compare_xml(module, apiname_tag, apiname_cur): + global docdir + make_dir(docdir) + null_file = fix_path(os.path.abspath("./thirdparty/jdiff/v-custom/lib/Null.java")) + jdiff = fix_path(os.path.abspath("./thirdparty/jdiff/v-custom/jdiff.jar")) + oldapi = fix_path("build/jdiff-xml/" + apiname_tag + "-" + module.name) + newapi = fix_path("build/jdiff-xml/" + apiname_cur + "-" + module.name) + docs = fix_path(docdir + "/" + module.name) + # Comments are strange. They look for a file with additional user comments in a + # directory like docs/user_comments_for_xyz. The problem being that xyz is the + # path to the new/old api. So xyz turns into multiple directories for us. + # i.e. user_comments_for_build/jdiff-xml/[tag name]-[module name]_to_build/jdiff-xml + comments = fix_path(docs + "/user_comments_for_build") + jdiff_com = fix_path(comments + "/jdiff-xml") + tag_comments = fix_path(jdiff_com + "/" + apiname_tag + "-" + module.name + "_to_build") + jdiff_tag_com = fix_path(tag_comments + "/jdiff-xml") + + if not os.path.exists(jdiff): + print("JDIFF doesn't exist.") + + make_dir(docs) + make_dir(comments) + make_dir(jdiff_com) + make_dir(tag_comments) + make_dir(jdiff_tag_com) + make_dir("jdiff-logs") + log = open("jdiff-logs/COMPARE-" + module.name + ".log", "w") + cmd = ["javadoc", + "-doclet", "jdiff.JDiff", + "-docletpath", jdiff, + "-d", docs, + "-oldapi", oldapi, + "-newapi", newapi, + "-script", + null_file] + jdiff = subprocess.Popen(cmd, stdout=log, stderr=log) + jdiff.wait() + log.close() + code = jdiff.returncode + print("Compared XML for " + module.name) + if code == NO_CHANGES: + print(" No API changes") + elif code == COMPATIBLE: + print(" API Changes are backwards compatible") + elif code == NON_COMPATIBLE: + print(" API Changes are not backwards compatible") + else: + print(" *Error in XML, most likely an empty module") + sys.stdout.flush() + return code + +# Generate the jdiff xml for the given module +# path: path to the autopsy source +# module: Module object +# name: api name for jdiff +def gen_xml(path, modules, name): + for module in modules: + # If its the regression test, the source is in the "test" dir + if module.name == "Testing": + src = os.path.join(path, module.name, "test", "qa-functional", "src") + else: + src = os.path.join(path, module.name, "src") + # xerces = os.path.abspath("./lib/xerces.jar") + xml_out = fix_path(os.path.abspath("./build/jdiff-xml/" + name + "-" + module.name)) + jdiff = fix_path(os.path.abspath("./thirdparty/jdiff/v-custom/jdiff.jar")) + make_dir("build/jdiff-xml") + make_dir("jdiff-logs") + log = open("jdiff-logs/GEN_XML-" + name + "-" + module.name + ".log", "w") + cmd = ["javadoc", + "-doclet", "jdiff.JDiff", + "-docletpath", jdiff, # ;" + xerces, <-- previous problems required this + "-apiname", xml_out, # leaving it in just in case it's needed once again + "-sourcepath", fix_path(src)] + cmd = cmd + get_packages(src) + jdiff = subprocess.Popen(cmd, stdout=log, stderr=log) + jdiff.wait() + log.close() + print("Generated XML for " + name + " " + module.name) + sys.stdout.flush() + +# Find all the modules in the given path +def find_modules(path): + modules = [] + # Step into each folder in the given path and + # see if it has manifest.mf - if so, it's a module + for dir in os.listdir(path): + directory = os.path.join(path, dir) + if os.path.isdir(directory): + for file in os.listdir(directory): + if file == "manifest.mf": + modules.append(Module(dir, None, None)) + return modules + +# Detects the differences between the source and tag modules +def module_diff(source_modules, tag_modules): + added_modules = [x for x in source_modules if x not in tag_modules] + removed_modules = [x for x in tag_modules if x not in source_modules] + similar_modules = [x for x in source_modules if x in tag_modules] + + added_modules = (added_modules if added_modules else []) + removed_modules = (removed_modules if removed_modules else []) + similar_modules = (similar_modules if similar_modules else []) + return similar_modules, added_modules, removed_modules + +# Reads the previous tag from NEWS.txt +def get_tag(sourcepath): + news = open(sourcepath + "/NEWS.txt", "r") + second_instance = False + for line in news: + if "----------------" in line: + if second_instance: + ver = line.split("VERSION ")[1] + ver = ver.split(" -")[0] + return ("autopsy-" + ver).strip() + else: + second_instance = True + continue + news.close() + + +# ========================================== # +# Dependency Functions # +# ========================================== # + +# Write a new XML file, copying all the lines from projectxml +# and replacing the specification version for the code-name-base base +# with the supplied specification version spec +def set_dep_spec(projectxml, base, spec): + print(" Updating Specification version..") + orig = open(projectxml, "r") + f, abs_path = mkstemp() + new_file = open(abs_path, "w") + found_base = False + spacing = " " + sopen = "<specification-version>" + sclose = "</specification-version>\n" + for line in orig: + if base in line: + found_base = True + if found_base and sopen in line: + update = spacing + sopen + str(spec) + sclose + new_file.write(update) + else: + new_file.write(line) + new_file.close() + close(f) + orig.close() + remove(projectxml) + move(abs_path, projectxml) + +# Write a new XML file, copying all the lines from projectxml +# and replacing the release version for the code-name-base base +# with the supplied release version +def set_dep_release(projectxml, base, release): + print(" Updating Release version..") + orig = open(projectxml, "r") + f, abs_path = mkstemp() + new_file = open(abs_path, "w") + found_base = False + spacing = " " + ropen = "<release-version>" + rclose = "</release-version>\n" + for line in orig: + if base in line: + found_base = True + if found_base and ropen in line: + update = spacing + ropen + str(release) + rclose + new_file.write(update) + else: + new_file.write(line) + new_file.close() + close(f) + orig.close() + remove(projectxml) + move(abs_path, projectxml) + +# Return the dependency versions in the XML dependency node +def get_dep_versions(dep): + run_dependency = dep.getElementsByTagName("run-dependency")[0] + release_version = run_dependency.getElementsByTagName("release-version") + if release_version: + release_version = getTagText(release_version[0].childNodes) + specification_version = run_dependency.getElementsByTagName("specification-version") + if specification_version: + specification_version = getTagText(specification_version[0].childNodes) + return int(release_version), Spec(specification_version) + +# Given a code-name-base, see if it corresponds with any of our modules +def get_module_from_base(modules, code_name_base): + for module in modules: + if "org.sleuthkit.autopsy." + module.name.lower() == code_name_base: + return module + return None # If it didn't match one of our modules + +# Check the text between two XML tags +def getTagText(nodelist): + for node in nodelist: + if node.nodeType == node.TEXT_NODE: + return node.data + +# Check the projectxml for a dependency on any module in modules +def check_for_dependencies(projectxml, modules): + dom = parse(projectxml) + dep_list = dom.getElementsByTagName("dependency") + for dep in dep_list: + code_name_base = dep.getElementsByTagName("code-name-base")[0] + code_name_base = getTagText(code_name_base.childNodes) + module = get_module_from_base(modules, code_name_base) + if module: + print(" Found dependency on " + module.name) + release, spec = get_dep_versions(dep) + if release != module.release() and module.release() is not None: + set_dep_release(projectxml, code_name_base, module.release()) + else: print(" Release version is correct") + if spec != module.spec() and module.spec() is not None: + set_dep_spec(projectxml, code_name_base, module.spec()) + else: print(" Specification version is correct") + +# Given the module and the source directory, return +# the paths to the manifest and project properties files +def get_dependency_file(module, source): + projectxml = os.path.join(source, module.name, "nbproject", "project.xml") + if os.path.isfile(projectxml): + return projectxml + +# Verify/Update the dependencies for each module, basing the dependency +# version number off the versions in each module +def update_dependencies(modules, source): + for module in modules: + print("Checking the dependencies for " + module.name + "...") + projectxml = get_dependency_file(module, source) + if projectxml == None: + print(" Error finding project xml file") + else: + other = [x for x in modules] + check_for_dependencies(projectxml, other) + sys.stdout.flush() + +# ======================================== # +# Versioning Functions # +# ======================================== # + +# Return the specification version in the given project.properties/manifest.mf file +def get_specification(project, manifest): + try: + # Try to find it in the project file + # it will be there if impl version is set to append automatically + f = open(project, 'r') + for line in f: + if "spec.version.base" in line: + return Spec(line.split("=")[1].strip()) + f.close() + # If not found there, try the manifest file + f = open(manifest, 'r') + for line in f: + if "OpenIDE-Module-Specification-Version:" in line: + return Spec(line.split(": ")[1].strip()) + except Exception as e: + print("Error parsing Specification version for") + print(project) + print(e) + +# Set the specification version in the given project properties file +# but if it can't be found there, set it in the manifest file +def set_specification(project, manifest, num): + try: + # First try the project file + f = open(project, 'r') + for line in f: + if "spec.version.base" in line: + f.close() + replace(project, line, "spec.version.base=" + str(num) + "\n") + return + f.close() + # If it's not there, try the manifest file + f = open(manifest, 'r') + for line in f: + if "OpenIDE-Module-Specification-Version:" in line: + f.close() + replace(manifest, line, "OpenIDE-Module-Specification-Version: " + str(num) + "\n") + return + # Otherwise we're out of luck + print(" Error finding the Specification version to update") + print(" " + manifest) + f.close() + except: + print(" Error incrementing Specification version for") + print(" " + project) + +# Return the implementation version in the given manifest.mf file +def get_implementation(manifest): + try: + f = open(manifest, 'r') + for line in f: + if "OpenIDE-Module-Implementation-Version" in line: + return int(line.split(": ")[1].strip()) + f.close() + except: + print("Error parsing Implementation version for") + print(manifest) + +# Set the implementation version in the given manifest file +def set_implementation(manifest, num): + try: + f = open(manifest, 'r') + for line in f: + if "OpenIDE-Module-Implementation-Version" in line: + f.close() + replace(manifest, line, "OpenIDE-Module-Implementation-Version: " + str(num) + "\n") + return + # If it isn't there, add it + f.close() + write_implementation(manifest, num) + except: + print(" Error incrementing Implementation version for") + print(" " + manifest) + +# Rewrite the manifest file to include the implementation version +def write_implementation(manifest, num): + f = open(manifest, "r") + contents = f.read() + contents = contents[:-2] + "OpenIDE-Module-Implementation-Version: " + str(num) + "\n\n" + f.close() + f = open(manifest, "w") + f.write(contents) + f.close() + +# Return the release version in the given manifest.mf file +def get_release(manifest): + try: + f = open(manifest, 'r') + for line in f: + if "OpenIDE-Module:" in line: + return int(line.split("/")[1].strip()) + f.close() + except: + #print("Error parsing Release version for") + #print(manifest) + return 0 + +# Set the release version in the given manifest file +def set_release(manifest, num): + try: + f = open(manifest, 'r') + for line in f: + if "OpenIDE-Module:" in line: + f.close() + index = line.index('/') - len(line) + 1 + newline = line[:index] + str(num) + replace(manifest, line, newline + "\n") + return + print(" Error finding the release version to update") + print(" " + manifest) + f.close() + except: + print(" Error incrementing release version for") + print(" " + manifest) + +# Given the module and the source directory, return +# the paths to the manifest and project properties files +def get_version_files(module, source): + manifest = os.path.join(source, module.name, "manifest.mf") + project = os.path.join(source, module.name, "nbproject", "project.properties") + if os.path.isfile(manifest) and os.path.isfile(project): + return manifest, project + +# Returns a the current version numbers for the module in source +def get_versions(module, source): + manifest, project = get_version_files(module, source) + if manifest == None or project == None: + print(" Error finding manifeset and project properties files") + return + spec = get_specification(project, manifest) + impl = get_implementation(manifest) + release = get_release(manifest) + return [spec, impl, release] + +# Update the version numbers for every module in modules +def update_versions(modules, source): + for module in modules: + versions = module.versions + manifest, project = get_version_files(module, source) + print("Updating " + module.name + "...") + if manifest == None or project == None: + print(" Error finding manifeset and project properties files") + return + if module.ret == COMPATIBLE: + versions = [versions[0].set(versions[0].increment()), versions[1] + 1, versions[2]] + set_specification(project, manifest, versions[0]) + set_implementation(manifest, versions[1]) + module.set_versions(versions) + elif module.ret == NON_COMPATIBLE: + versions = [versions[0].set(versions[0].overflow()), versions[1] + 1, versions[2] + 1] + set_specification(project, manifest, versions[0]) + set_implementation(manifest, versions[1]) + set_release(manifest, versions[2]) + module.set_versions(versions) + elif module.ret == NO_CHANGES: + versions = [versions[0], versions[1] + 1, versions[2]] + set_implementation(manifest, versions[1]) + module.set_versions(versions) + elif module.ret == None: + versions = [Spec("1.0"), 1, 1] + set_specification(project, manifest, versions[0]) + set_implementation(manifest, versions[1]) + set_release(manifest, versions[2]) + module.set_versions(versions) + sys.stdout.flush() + +# Given a list of the added modules, remove the modules +# which have the correct 'new module default' version number +def remove_correct_added(modules): + correct = [x for x in modules] + for module in modules: + if module.spec() == "1.0" or module.spec() == "0.0": + if module.impl() == 1: + if module.release() == 1 or module.release() == 0: + correct.remove(module) + return correct + +# ==================================== # +# Helper Functions # +# ==================================== # + +# Replace pattern with subst in given file +def replace(file, pattern, subst): + #Create temp file + fh, abs_path = mkstemp() + new_file = open(abs_path,'w') + old_file = open(file) + for line in old_file: + new_file.write(line.replace(pattern, subst)) + #close temp file + new_file.close() + close(fh) + old_file.close() + #Remove original file + remove(file) + #Move new file + move(abs_path, file) + +# Given a list of modules print the version numbers that need changing +def print_version_updates(modules): + f = open("gen_version.txt", "a") + for module in modules: + versions = module.versions + if module.ret == COMPATIBLE: + output = (module.name + ":\n") + output += ("\tSpecification:\t" + str(versions[0]) + "\t->\t" + str(versions[0].increment()) + "\n") + output += ("\tImplementation:\t" + str(versions[1]) + "\t->\t" + str(versions[1] + 1) + "\n") + output += ("\tRelease:\tNo Change.\n") + output += ("\n") + print(output) + sys.stdout.flush() + f.write(output) + elif module.ret == NON_COMPATIBLE: + output = (module.name + ":\n") + output += ("\tSpecification:\t" + str(versions[0]) + "\t->\t" + str(versions[0].overflow()) + "\n") + output += ("\tImplementation:\t" + str(versions[1]) + "\t->\t" + str(versions[1] + 1) + "\n") + output += ("\tRelease:\t" + str(versions[2]) + "\t->\t" + str(versions[2] + 1) + "\n") + output += ("\n") + print(output) + sys.stdout.flush() + f.write(output) + elif module.ret == ERROR: + output = (module.name + ":\n") + output += ("\t*Unable to detect necessary changes\n") + output += ("\tSpecification:\t" + str(versions[0]) + "\n") + output += ("\tImplementation:\t" + str(versions[1]) + "\n") + output += ("\tRelease:\t\t" + str(versions[2]) + "\n") + output += ("\n") + print(output) + f.write(output) + sys.stdout.flush() + elif module.ret == NO_CHANGES: + output = (module.name + ":\n") + if versions[1] is None: + output += ("\tImplementation: None\n") + else: + output += ("\tImplementation:\t" + str(versions[1]) + "\t->\t" + str(versions[1] + 1) + "\n") + output += ("\n") + print(output) + sys.stdout.flush() + f.write(output) + elif module.ret is None: + output = ("Added " + module.name + ":\n") + if module.spec() != "1.0" and module.spec() != "0.0": + output += ("\tSpecification:\t" + str(module.spec()) + "\t->\t" + "1.0\n") + output += ("\n") + if module.impl() != 1: + output += ("\tImplementation:\t" + str(module.impl()) + "\t->\t" + "1\n") + output += ("\n") + if module.release() != 1 and module.release() != 0: + output += ("Release:\t\t" + str(module.release()) + "\t->\t" + "1\n") + output += ("\n") + print(output) + sys.stdout.flush() + f.write(output) + sys.stdout.flush() + f.close() + +# Changes cygwin paths to Windows +def fix_path(path): + if "cygdrive" in path: + new_path = path[11:] + return "C:/" + new_path + else: + return path + +# Print a 'title' +def printt(title): + print("\n" + title) + lines = "" + for letter in title: + lines += "-" + print(lines) + sys.stdout.flush() + +# Get a list of package names in the given path +# The path is expected to be of the form {base}/module/src +# +# NOTE: We currently only check for packages of the form +# org.sleuthkit.autopsy.x +# If we add other namespaces for commercial modules we will +# have to add a check here +def get_packages(path): + packages = [] + package_path = os.path.join(path, "org", "sleuthkit", "autopsy") + for folder in os.listdir(package_path): + package_string = "org.sleuthkit.autopsy." + packages.append(package_string + folder) + return packages + +# Create the given directory, if it doesn't already exist +def make_dir(dir): + try: + if not os.path.isdir(dir): + os.mkdir(dir) + if os.path.isdir(dir): + return True + return False + except: + print("Exception thrown when creating directory") + return False + +# Delete the given directory, and make sure it is deleted +def del_dir(dir): + try: + if os.path.isdir(dir): + shutil.rmtree(dir, ignore_errors=False, onerror=handleRemoveReadonly) + if os.path.isdir(dir): + return False + else: + return True + return True + except: + print("Exception thrown when deleting directory") + traceback.print_exc() + return False + +# Handle any permisson errors thrown by shutil.rmtree +def handleRemoveReadonly(func, path, exc): + excvalue = exc[1] + if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES: + os.chmod(path, stat.S_IRWXU| stat.S_IRWXG| stat.S_IRWXO) # 0777 + func(path) + else: + raise + +# Run git clone and git checkout for the tag +def do_git(tag, tag_dir): + try: + printt("Cloning Autopsy tag " + tag + " into dir " + tag_dir + " (this could take a while)...") + subprocess.call(["git", "clone", "https://github.com/sleuthkit/autopsy.git", tag_dir], + stdout=subprocess.PIPE) + printt("Checking out tag " + tag + "...") + subprocess.call(["git", "checkout", tag], + stdout=subprocess.PIPE, + cwd=tag_dir) + return True + except Exception as ex: + print("Error cloning and checking out Autopsy: ", sys.exc_info()[0]) + print(str(ex)) + print("The terminal you are using most likely does not recognize git commands.") + return False + +# Get the flags from argv +def args(): + try: + sys.argv.pop(0) + while sys.argv: + arg = sys.argv.pop(0) + if arg == "-h" or arg == "--help": + return 1 + elif arg == "-t" or arg == "--tag": + global tag + tag = sys.argv.pop(0) + elif arg == "-s" or arg == "--source": + global source + source = sys.argv.pop(0) + elif arg == "-d" or arg == "--dir": + global docdir + docdir = sys.argv.pop(0) + elif arg == "-a" or arg == "--auto": + global dry + dry = False + else: + raise Exception() + except: + pass + +# Print script run info +def printinfo(): + global tag + global source + global docdir + global dry + printt("Release script information:") + if source is None: + source = fix_path(os.path.abspath(".")) + print("Using source directory:\n " + source) + if tag is None: + tag = get_tag(source) + print("Checking out to tag:\n " + tag) + if docdir is None: + docdir = fix_path(os.path.abspath("./jdiff-javadocs")) + print("Generating jdiff JavaDocs in:\n " + docdir) + if dry is True: + print("Dry run: will not auto-update version numbers") + sys.stdout.flush() + +# Print the script's usage/help +def usage(): + return \ + """ + USAGE: + Compares the API of the current Autopsy source code with a previous + tagged version. By default, it will detect the previous tag from + the NEWS file and will not update the versions in the source code. + + OPTIONAL FLAGS: + -t --tag Specify a previous tag to compare to. + Otherwise the NEWS file will be used. + + -d --dir The output directory for the jdiff JavaDocs. If no + directory is given, the default is jdiff-javadocs/{module}. + + -s --source The directory containing Autopsy's source code. + + -a --auto Automatically update version numbers (not dry). + + -h --help Prints this usage. + """ + +# ==================================== # +# Main Functionality # +# ==================================== # + +# Where the magic happens +def main(): + global tag; global source; global docdir; global dry + tag = None; source = None; docdir = None; dry = True + + ret = args() + if ret: + print(usage()) + return 0 + printinfo() + + # ----------------------------------------------- + # 1) Clone Autopsy, checkout to given tag/commit + # 2) Get the modules in the clone and the source + # 3) Generate the xml comparison + # ----------------------------------------------- + if not del_dir("./build/" + tag): + print("\n\n=========================================") + print(" Failed to delete previous Autopsy clone.") + print(" Unable to continue...") + print("=========================================") + return 1 + tag_dir = os.path.abspath("./build/" + tag) + if not do_git(tag, tag_dir): + return 1 + sys.stdout.flush() + + tag_modules = find_modules(tag_dir) + source_modules = find_modules(source) + + printt("Generating jdiff XML reports...") + apiname_tag = tag + apiname_cur = "current" + gen_xml(tag_dir, tag_modules, apiname_tag) + gen_xml(source, source_modules, apiname_cur) + + printt("Deleting cloned Autopsy directory...") + print("Clone successfully deleted" if del_dir(tag_dir) else "Failed to delete clone") + sys.stdout.flush() + + # ----------------------------------------------------- + # 1) Seperate modules into added, similar, and removed + # 2) Compare XML for each module + # ----------------------------------------------------- + printt("Comparing modules found...") + similar_modules, added_modules, removed_modules = module_diff(source_modules, tag_modules) + if added_modules or removed_modules: + for m in added_modules: + print("+ Added " + m.name) + sys.stdout.flush() + for m in removed_modules: + print("- Removed " + m.name) + sys.stdout.flush() + else: + print("No added or removed modules") + sys.stdout.flush() + + printt("Comparing jdiff outputs...") + for module in similar_modules: + module.set_ret(compare_xml(module, apiname_tag, apiname_cur)) + print("Refer to the jdiff-javadocs folder for more details") + + # ------------------------------------------------------------ + # 1) Do versioning + # 2) Auto-update version numbers in files and the_modules list + # 3) Auto-update dependencies + # ------------------------------------------------------------ + printt("Auto-detecting version numbers and changes...") + for module in added_modules: + module.set_versions(get_versions(module, source)) + for module in similar_modules: + module.set_versions(get_versions(module, source)) + + added_modules = remove_correct_added(added_modules) + the_modules = similar_modules + added_modules + print_version_updates(the_modules) + + if not dry: + printt("Auto-updating version numbers...") + update_versions(the_modules, source) + print("All auto-updates complete") + + printt("Detecting and auto-updating dependencies...") + update_dependencies(the_modules, source) + + printt("Deleting jdiff XML...") + xml_dir = os.path.abspath("./build/jdiff-xml") + print("XML successfully deleted" if del_dir(xml_dir) else "Failed to delete XML") + + print("\n--- Script completed successfully ---") + return 0 + +# Start off the script +if __name__ == "__main__": + sys.exit(main())