diff --git a/.gitignore b/.gitignore
index 0b1b20d0d86e0d82aa0c1089164e960f414968f8..878e12c33b52a54f235f173cf923395fa7dbbe95 100755
--- a/.gitignore
+++ b/.gitignore
@@ -82,6 +82,7 @@ rejistry++/msvcpp/rejistry++/ipch
 
 # Release files
 release/sleuthkit-*
+release/clone
 
 # IntelliSense data
 /win32/*.ncb
@@ -116,6 +117,7 @@ Makefile
 stamp-h1
 tsk/tsk_config.h
 tsk/tsk_incs.h
+tsk/tsk.pc
 aclocal.m4
 autom4te.cache
 config.log
@@ -142,6 +144,7 @@ tests/fs_thread_test
 tests/read_apis
 tools/autotools/tsk_comparedir
 tools/autotools/tsk_gettimes
+tools/autotools/tsk_imageinfo
 tools/autotools/tsk_loaddb
 tools/autotools/tsk_recover
 tools/fiwalk/plugins/jpeg_extract
@@ -190,6 +193,7 @@ unit_tests/base/test_base
 *.E01
 *.vmdk
 
+sleuthkit-*.tar.gz
 
 #Test data folder
 
diff --git a/INSTALL.txt b/INSTALL.txt
index 42436279d2c777a739a7575ad4c21c5b9e6a742e..66eab8b840e31bc6b92cc28de26d7c07fa5258ca 100644
--- a/INSTALL.txt
+++ b/INSTALL.txt
@@ -3,7 +3,7 @@
 
                     Installation Instructions
 
-                     Last Modified: Oct 2012
+                     Last Modified: Oct 2022
 
 
 REQUIREMENTS
@@ -51,9 +51,31 @@ currently support.  You can download it from:
     The official repository is available here, but there is not
     a package of the last stable release:
 
-    https://github.com/libyal/libewf
+    https://github.com/libyal/libewf-legacy
     Available at: http://sourceforge.net/projects/libewf/
 
+- Libvhdi: Allows you to process disk images that are stored in the
+Virtual Hard Disk format (VHD).  
+
+    The official repository is available here:
+
+    https://github.com/libyal/libvhdi
+
+- Libvmdk: Allows you to process disk images that are stored in the
+VMware Virtual Disk format (VMDK).  
+
+    The official repository is available here:
+
+    https://github.com/libyal/libvmdk
+
+- Libvslvm: Allows you to access the Linux Logical Volume Manager (LVM) format 
+that is sotred on a disk image.  A stand-alone version of libbfio is needed 
+to allow libvslvm to directly read from a TSK_IMAGE.  
+
+    The official repository is available here:
+
+    https://github.com/libyal/libvslvm
+    https://github.com/libyal/libbfio
 
 
 INSTALLATION
@@ -124,6 +146,34 @@ if it is installed.
 the libewf installation (the directory should have 'lib' and 'include'
 directories in it).
 
+--without-libvhdi: Supply this if you want TSK to ignore libvhdi even
+if it is installed.
+
+--with-libvhdi=dir: Supply this if you want TSK to look in 'dir' for
+the libvhdi installation (the directory should have 'lib' and 'include'
+directories in it).
+
+--without-libvmdk: Supply this if you want TSK to ignore libvmdk even
+if it is installed.
+
+--with-libvmdk=dir: Supply this if you want TSK to look in 'dir' for
+the libvmdk installation (the directory should have 'lib' and 'include'
+directories in it).
+
+--without-libvslvm: Supply this if you want TSK to ignore libvslvm even
+if it is installed.
+
+--with-libvslvm=dir: Supply this if you want TSK to look in 'dir' for
+the libvslvm installation (the directory should have 'lib' and 'include'
+directories in it).
+
+--without-libbfio: Supply this if you want TSK to ignore libbfio even
+if it is installed.
+
+--with-libbfio=dir: Supply this if you want TSK to look in 'dir' for
+the libbfio installation (the directory should have 'lib' and 'include'
+directories in it).
+
 -----------------------------------------------------------------------------
 
 Brian Carrier
diff --git a/Makefile.am b/Makefile.am
index b82318567563d11ac532d69831ff5783753dfbad..7da8eb4da5a9ba2ad0f8f6ecff74245f36228f51 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -24,9 +24,11 @@ EXTRA_DIST = README_win32.txt README.md INSTALL.txt ChangeLog.txt NEWS.txt API-C
     bindings/java/src/org/sleuthkit/datamodel/Examples/*.java \
     bindings/java/src/*.html \
     case-uco/java/*.xml \
+    case-uco/java/*.md \
     case-uco/java/nbproject/*.xml \
     case-uco/java/nbproject/*.properties \
-    case-uco/java/src/org/sleuthkit/caseuco/*.java
+    case-uco/java/src/org/sleuthkit/caseuco/*.java \
+    case-uco/java/test/org/sleuthkit/caseuco/*.java
 
 ACLOCAL_AMFLAGS = -I m4
 
@@ -48,19 +50,20 @@ SUBDIRS = tsk tools tests samples man $(UNIT_TESTS) $(JAVA_BINDINGS) $(JAVA_CASE
 
 nobase_include_HEADERS = tsk/libtsk.h tsk/tsk_incs.h \
     tsk/base/tsk_base.h tsk/base/tsk_os.h \
-    tsk/img/tsk_img.h tsk/vs/tsk_vs.h tsk/img/pool.hpp \
+    tsk/img/tsk_img.h tsk/vs/tsk_vs.h tsk/img/pool.hpp tsk/img/logical_img.h \
     tsk/vs/tsk_bsd.h tsk/vs/tsk_dos.h tsk/vs/tsk_gpt.h \
     tsk/vs/tsk_mac.h tsk/vs/tsk_sun.h \
     tsk/fs/tsk_fs.h tsk/fs/tsk_ffs.h tsk/fs/tsk_ext2fs.h tsk/fs/tsk_fatfs.h \
-    tsk/fs/tsk_ntfs.h tsk/fs/tsk_iso9660.h tsk/fs/tsk_hfs.h tsk/fs/tsk_yaffs.h \
+    tsk/fs/tsk_ntfs.h tsk/fs/tsk_iso9660.h tsk/fs/tsk_hfs.h tsk/fs/tsk_yaffs.h tsk/fs/tsk_logical_fs.h \
     tsk/fs/tsk_apfs.h tsk/fs/tsk_apfs.hpp tsk/fs/apfs_fs.h tsk/fs/apfs_fs.hpp tsk/fs/apfs_compat.hpp \
     tsk/fs/decmpfs.h tsk/fs/tsk_exfatfs.h tsk/fs/tsk_fatxxfs.h \
     tsk/hashdb/tsk_hashdb.h tsk/auto/tsk_auto.h \
     tsk/auto/tsk_is_image_supported.h tsk/auto/guid.h \
     tsk/pool/tsk_pool.h tsk/pool/tsk_pool.hpp tsk/pool/tsk_apfs.h tsk/pool/tsk_apfs.hpp \
 	tsk/pool/pool_compat.hpp tsk/pool/apfs_pool_compat.hpp \
+    tsk/pool/lvm_pool_compat.hpp \
     tsk/util/crypto.hpp tsk/util/lw_shared_ptr.hpp tsk/util/span.hpp \
-    tsk/util/detect_encryption.h
+    tsk/util/detect_encryption.h tsk/util/file_system_utils.h
 
 nobase_dist_data_DATA = tsk/sorter/default.sort tsk/sorter/freebsd.sort \
     tsk/sorter/images.sort tsk/sorter/linux.sort tsk/sorter/openbsd.sort \
diff --git a/NEWS.txt b/NEWS.txt
index e6ad6237e2015f7f5a733786f9a824137ee6871c..77b01edc755b709434f9dcc5007e66f6ffa2dcec 100644
--- a/NEWS.txt
+++ b/NEWS.txt
@@ -1,3 +1,42 @@
+---------------- VERSION 4.12.1 --------------
+C/C++:
+- Bug fixes from Luis Nassif and Joachim Metz
+- Added check to stop for very large folders to prevent memory exhausion
+
+Java:
+- Added File Repository concept for files to be stored in another location
+- Schema updated to 9.4
+- Fixed OS Account merge bug and now fire events when accounts are merged
+
+
+---------------- VERSION 4.12.0 --------------
+- There was a 1-year gap since 4.11.1 and the git log has 441 commits in that timeframe. 
+- Many for small fixes.  
+- This set of release notes is much more of an overview than other releases
+
+What's New:
+- LVM Support (non-Windows) from Joachim Metz
+- Logical File System support (a folder structure is parsed by TSK libraries) from Ann Priestman (Basis)
+
+What's Changed:
+- Lots of bug fixes from the Basis team and Joachim Metz
+- Additional fixes from Eran-YT, msuhanov, Joel Uckelman, Aleks L, dschoemantruter
+- General themes of C/C++ bounds checks and Java improvements to OS Accounts, Ingest jobs, CaseDbAccessManager, and much more.
+
+
+
+---------------- VERSION 4.11.1 --------------
+
+C/C++:
+- Several fixes from Joachim Metz
+- NTFS Decompression bug fix from Kim Stone and Joel Uckelman
+
+Java:
+- Fixed connection leak when making OS Accounts in bridge
+- OsAccount updates for instance types and special Windows SIDs
+- Fixed issue with duplicate value in Japanese timeline translation
+
+
 ---------------- VERSION 4.11.0 --------------
 C/C++:
 - Added checks at various layers to detect encrypted file systems and disks to give more useful error messages.
diff --git a/README.md b/README.md
index ff66ccf0beae568ebcbe1a3db96d3eb3429e7ff4..893d6f3dca8446c2c9ff424f1257d9872ec0094d 100644
--- a/README.md
+++ b/README.md
@@ -15,8 +15,8 @@ the tool or customize it to specific needs.
 The Sleuth Kit uses code from the file system analysis tools of
 The Coroner's Toolkit (TCT) by Wietse Venema and Dan Farmer.  The
 TCT code was modified for platform independence.  In addition,
-support was added for the NTFS (see [wiki/ntfs](http://wiki.sleuthkit.org/index.php?title=FAT_Implementation_Notes)) 
-and FAT (see [wiki/fat](http://wiki.sleuthkit.org/index.php?title=NTFS_Implementation_Notes)) file systems.  Previously, The Sleuth Kit was
+support was added for the NTFS (see [wiki/ntfs](http://wiki.sleuthkit.org/index.php?title=NTFS_Implementation_Notes)) 
+and FAT (see [wiki/fat](http://wiki.sleuthkit.org/index.php?title=FAT_Implementation_Notes)) file systems.  Previously, The Sleuth Kit was
 called The @stake Sleuth Kit (TASK).  The Sleuth Kit is now independent
 of any commercial or academic organizations.
 
diff --git a/appveyor.yml b/appveyor.yml
index 5afea87227a05d12d7518ea85f4632519ad7d66b..b6a90b6476c3cdb0511fc83965a23e2f73919777 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -1,36 +1,76 @@
 version: 4.6.0.{build}
 
-cache:
-  - C:\Users\appveyor\.ant
-  - C:\ProgramData\chocolatey\bin
-  - C:\ProgramData\chocolatey\lib
-
-image: Visual Studio 2015
-
-install:
-  - ps: choco install nuget.commandline
-  - ps: choco install ant --ignore-dependencies
-  - ps: $env:Path="C:\Program Files\Java\jdk1.8.0\bin;$($env:Path);C:\ProgramData\chocolatey\lib\ant"
-  - set PATH=C:\Python36-x64\';%PATH%
-environment:
-  global:
-    TSK_HOME: "%APPVEYOR_BUILD_FOLDER%"
-    PYTHON: "C:\\Python36-x64"
-    JDK_HOME: C:\Program Files\Java\jdk1.8.0
-services:
-
-before_build:
-  - nuget restore win32\libtsk -PackagesDirectory win32\packages
-
-build_script:
-  - python win32\updateAndBuildAll.py -m
-  - ps: ant -version
-  - ps: pushd bindings/java
-  - cmd: ant -q dist
-  - ps: popd
-  - ps: pushd case-uco/java
-  - cmd: ant -q
-  - ps: popd
-
-test_script:
-  - cmd: ant -q -f bindings/java test
+environment: 
+  matrix:
+
+  - job_name: Windows Build
+    appveyor_build_worker_image: Visual Studio 2015
+  - job_name: Linux Build
+    appveyor_build_worker_image: Ubuntu
+  - job_name: macOS Build
+    appveyor_build_worker_image: macos-catalina
+
+matrix:
+  fast_finish: true
+
+
+# job-specific configurations
+for: 
+
+  - 
+    matrix:
+      only:
+        - job_name: Windows Build
+
+    cache:
+      - C:\Users\appveyor\.ant
+      - C:\ProgramData\chocolatey\bin
+      - C:\ProgramData\chocolatey\lib
+
+    install:
+      - ps: choco install nuget.commandline
+      - ps: choco install ant --ignore-dependencies
+      - ps: $env:Path="C:\Program Files\Java\jdk1.8.0\bin;$($env:Path);C:\ProgramData\chocolatey\lib\ant"
+      - set PATH=C:\Python36-x64\';%PATH%
+    environment:
+      global:
+        TSK_HOME: "%APPVEYOR_BUILD_FOLDER%"
+        PYTHON: "C:\\Python36-x64"
+        JDK_HOME: C:\Program Files\Java\jdk1.8.0
+    services:
+
+    before_build:
+      - nuget restore win32\libtsk -PackagesDirectory win32\packages
+
+    build_script:
+      - python win32\updateAndBuildAll.py -m
+      - ps: ant -version
+      - ps: pushd bindings/java
+      - cmd: ant -q dist
+      - ps: popd
+      - ps: pushd case-uco/java
+      - cmd: ant -q
+      - ps: popd
+
+    test_script:
+      - cmd: ant -q -f bindings/java test
+
+  - 
+    matrix:
+      only:
+        - job_name: Linux Build
+
+    build_script:
+      - ./bootstrap
+      - ./configure -q
+      - make -s
+
+  - 
+    matrix:
+      only:
+        - job_name: macOS Build
+
+    build_script:
+      - ./bootstrap
+      - ./configure -q
+      - make -s
diff --git a/bindings/java/build.xml b/bindings/java/build.xml
index ab734e61205edcd5a75998146471f1590c647568..2cb327329a73a199df5e1be71766de8ea54e99da 100644
--- a/bindings/java/build.xml
+++ b/bindings/java/build.xml
@@ -11,7 +11,7 @@
 	<import file="build-${os.family}.xml"/>
 
     <!-- Careful changing this because release-windows.pl updates it by pattern -->
-<property name="VERSION" value="4.11.0"/>
+<property name="VERSION" value="4.12.1"/>
 
 	<!-- set global properties for this build -->
 	<property name="default-jar-location" location="/usr/share/java"/>
@@ -119,7 +119,7 @@
 	</target>
 
 	<target name="compile-test" depends="compile" description="compile the tests">
-		<javac debug="on" srcdir="${test}" destdir="${build}" includeantruntime="false">
+		<javac encoding="iso-8859-1" debug="on" srcdir="${test}" destdir="${build}" includeantruntime="false">
 			<classpath refid="libraries"/>
 			<compilerarg value="-Xlint" />
 		</javac>
@@ -127,7 +127,7 @@
 
 	<target name="compile" depends="init, set-library-path, retrieve-deps" description="compile the source">
 		<!-- Compile the java code from ${src} into ${build} -->
-		<javac debug="on" srcdir="${src}" destdir="${build}" classpathref="libraries" includeantruntime="false">
+		<javac encoding="iso-8859-1" debug="on" srcdir="${src}" destdir="${build}" classpathref="libraries" includeantruntime="false">
 			<compilerarg value="-Xlint"/>
 		</javac>
 
@@ -137,7 +137,7 @@
 		</copy>
 		
 		<!-- Verify sample compiles -->
-		<javac debug="on" srcdir="${sample}" destdir="${build}" includeantruntime="false">
+		<javac encoding="iso-8859-1" debug="on" srcdir="${sample}" destdir="${build}" includeantruntime="false">
 			<classpath refid="libraries"/>
 		</javac>
 		
diff --git a/bindings/java/doxygen/Doxyfile b/bindings/java/doxygen/Doxyfile
index b20fc027b6c600a56253ada19199495d3e51d26f..6da9ad6abb31e6fff81e6f7d98ad4dc27260ecc4 100644
--- a/bindings/java/doxygen/Doxyfile
+++ b/bindings/java/doxygen/Doxyfile
@@ -39,7 +39,7 @@ PROJECT_NAME           = "Sleuth Kit Java Bindings (JNI)"
 # control system is used.
 
 # NOTE: This is updated by the release-unix.pl script
-PROJECT_NUMBER = 4.11.0
+PROJECT_NUMBER = 4.12.1
 
 # Using the PROJECT_BRIEF tag one can provide an optional one line description
 # for a project that appears at the top of each page and should give viewer a
@@ -1056,7 +1056,7 @@ GENERATE_HTML          = YES
 # This tag requires that the tag GENERATE_HTML is set to YES.
 
 # NOTE: This is updated by the release-unix.pl script
-HTML_OUTPUT = jni-docs/4.11.0/
+HTML_OUTPUT = jni-docs/4.12.1/
 
 # The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
 # generated HTML page (for example: .htm, .php, .asp).
diff --git a/bindings/java/doxygen/artifact_catalog.dox b/bindings/java/doxygen/artifact_catalog.dox
index 41a666328a03aa16346449faac896f98e8acee7d..8bf2fcb6997187159772dbd7070012e7dc56784d 100644
--- a/bindings/java/doxygen/artifact_catalog.dox
+++ b/bindings/java/doxygen/artifact_catalog.dox
@@ -26,6 +26,7 @@ Describes how a data source was used, e.g., as a SIM card or an OS drive (such a
 ### REQUIRED ATTRIBUTES
 - TSK_DESCRIPTION (Description of the usage, e.g., "OS Drive (Windows Vista)").
 
+
 ---
 ## TSK_ENCRYPTION_DETECTED
 An indication that the content is encrypted.
@@ -33,6 +34,7 @@ An indication that the content is encrypted.
 ### REQUIRED ATTRIBUTES
 - TSK_COMMENT (A comment on the encryption, e.g., encryption type or password)
 
+
 ---
 ## TSK_ENCRYPTION_SUSPECTED
 An indication that the content is likely encrypted.
@@ -40,6 +42,7 @@ An indication that the content is likely encrypted.
 ### REQUIRED ATTRIBUTES
 - TSK_COMMENT (Reason for suspecting encryption)
 
+
 ---
 ## TSK_EXT_MISMATCH_DETECTED
 An indication that the registered extensions for a file's mime type do not match the file's extension.
@@ -47,6 +50,7 @@ An indication that the registered extensions for a file's mime type do not match
 ### REQUIRED ATTRIBUTES
 None
 
+
 ---
 ## TSK_FACE_DETECTED
 An indication that a human face was detected in some content.
@@ -54,6 +58,7 @@ An indication that a human face was detected in some content.
 ### REQUIRED ATTRIBUTES
 None
 
+
 ---
 ## TSK_HASHSET_HIT
 Indicates that the MD5 hash of a file matches a set of known MD5s (possibly user defined).
@@ -64,28 +69,19 @@ Indicates that the MD5 hash of a file matches a set of known MD5s (possibly user
 ### OPTIONAL ATTRIBUTES
 - TSK_COMMENT (Additional comments about the hit)
 
+
 ---
-## TSK_INTERESTING_ARTIFACT_HIT
-Indicates that the source artifact matches some set of criteria which deem it interesting. Artifacts with this meta artifact will be brought to the attention of the user.
+## TSK_INTERESTING_ITEM
+Indicates that the source item matches some set of criteria which deem it interesting. Items with this meta artifact will be brought to the attention of the user.
 
 ### REQUIRED ATTRIBUTES
-- TSK_ASSOCIATED_ARTIFACT (The source artifact)
-- TSK_SET_NAME (The name of the set of criteria which deemed this artifact interesting)
+- TSK_SET_NAME (The name of the set of criteria which deemed this item interesting)
 
 ### OPTIONAL ATTRIBUTES
-- TSK_COMMENT (Comment on the reason that the source artifact is interesting)
+- TSK_COMMENT (Comment on the reason that the source item is interesting)
 - TSK_CATEGORY (The set membership rule that was satisfied)
+- TSK_ASSOCIATED_ARTIFACT (The source artifact when the source item is an artifact)
 
----
-## TSK_INTERESTING_FILE_HIT
-Indication that the source file matches some set of criteria (possibly user defined) which deem it interesting. Files with this artifact will be brought to the attention of the user.
-
-### REQUIRED ATTRIBUTES
-- TSK_SET_NAME (The name of the set of criteria which deemed this file interesting)
-
-### OPTIONAL ATTRIBUTES
-- TSK_COMMENT (Comment on the reason that the source artifact is interesting)
-- TSK_CATEGORY (The set membership rule that was satisfied. I.e. a particular mime)
 
 ---
 ## TSK_KEYWORD_HIT
@@ -101,6 +97,7 @@ Indication that the source artifact or file contains a keyword. Keywords are gro
 ### OPTIONAL ATTRIBUTES
 - TSK_KEYWORD_PREVIEW (Snippet of text around keyword)
 
+
 ---
 ## TSK_OBJECT_DETECTED
 Indicates that an object was detected in a media file. Typically used by computer vision software to classify images.
@@ -111,6 +108,36 @@ Indicates that an object was detected in a media file. Typically used by compute
 ### OPTIONAL ATTRIBUTES
 - TSK_DESCRIPTION (Additional comments about the object or observer, e.g., what detected the object)
 
+
+---
+## TSK_PREVIOUSLY_NOTABLE
+Indicates that the file or artifact was previously tagged as "Notable" in another Autopsy case.
+
+### REQUIRED ATTRIBUTES
+- TSK_CORRELATION_TYPE (The correlation type that was previously tagged as notable)
+- TSK_CORRELATION_VALUE (The correlation value that was previously tagged as notable)
+- TSK_OTHER_CASES (The list of cases containing this file or artifact at the time the artifact is created)
+
+
+---
+## TSK_PREVIOUSLY_SEEN
+Indicates that the file or artifact was previously seen in another Autopsy case.
+
+### REQUIRED ATTRIBUTES
+- TSK_CORRELATION_TYPE (The correlation type that was previously seen)
+- TSK_CORRELATION_VALUE (The correlation value that was previously seen)
+- TSK_OTHER_CASES (The list of cases containing this file or artifact at the time the artifact is created)
+
+
+---
+## TSK_PREVIOUSLY_UNSEEN
+Indicates that the file or artifact was previously unseen in another Autopsy case.
+
+### REQUIRED ATTRIBUTES
+- TSK_CORRELATION_TYPE (The correlation type that was previously seen)
+- TSK_CORRELATION_VALUE (The correlation value that was previously seen)
+
+
 ---
 ## TSK_USER_CONTENT_SUSPECTED
 An indication that some media file content was generated by the user.
@@ -118,6 +145,7 @@ An indication that some media file content was generated by the user.
 ### REQUIRED ATTRIBUTES
 - TSK_COMMENT (The reason why user-generated content is suspected)
 
+
 ---
 ## TSK_VERIFICATION_FAILED
 An indication that some data did not pass verification. One example would be verifying a SHA-1 hash.
@@ -125,6 +153,7 @@ An indication that some data did not pass verification. One example would be ver
 ### REQUIRED ATTRIBUTES
 - TSK_COMMENT (Reason for failure, what failed)
 
+
 ---
 ## TSK_WEB_ACCOUNT_TYPE
 A web account type entry. 
@@ -134,6 +163,7 @@ A web account type entry.
 - TSK_TEXT (Indicates type of account (admin/moderator/user) and possible platform)
 - TSK_URL (URL indicating the user has an account on this domain)
 
+
 ---
 ## TSK_WEB_CATEGORIZATION
 The categorization of a web host using a specific usage type, e.g. mail.google.com would correspond to Web Email.
@@ -143,6 +173,7 @@ The categorization of a web host using a specific usage type, e.g. mail.google.c
 - TSK_DOMAIN (The domain of the host, e.g. google.com)
 - TSK_HOST (The full host, e.g. mail.google.com)
 
+
 ---
 ## TSK_YARA_HIT
 Indicates that the some content of the file was a hit for a YARA rule match.
@@ -151,20 +182,11 @@ Indicates that the some content of the file was a hit for a YARA rule match.
 - TSK_RULE (The rule that was a hit for this file)
 - TSK_SET_NAME (Name of the rule set containing the matching rule YARA rule)
 
+
 ---
 ## TSK_METADATA_EXIF
 EXIF metadata found in an image or audio file.
 
-### REQUIRED ATTRIBUTES
-- At least one of:
-- TSK_DATETIME_CREATED (Creation date of the file, in seconds since 1970-01-01T00:00:00Z)
-- TSK_DEVICE_MAKE (Device make, generally the manufacturer, e.g., Apple)
-- TSK_DEVICE_MODEL (Device model, generally the product, e.g., iPhone)
-- TSK_GEO_ALTITUDE (The camera's altitude when the image/audio was taken)
-- TSK_GEO_LATITUDE (The camera's latitude when the image/audio was taken)
-- TSK_GEO_LONGITUDE (The camera's longitude when the image/audio was taken)## TSK_METADATA_EXIF
-EXIF metadata found in an image or audio file.
-
 ### REQUIRED ATTRIBUTES
 - At least one of:
 - TSK_DATETIME_CREATED (Creation date of the file, in seconds since 1970-01-01T00:00:00Z)
@@ -192,8 +214,8 @@ TSK_CARD_NUMBER (Credit card number)
 - TSK_KEYWORD_SEARCH_DOCUMENT_ID (Document ID of the Solr document that contains the TSK_CARD_NUMBER when the account is a credit card discovered by the Autopsy regular expression search for credit cards)
 - TSK_SET_NAME (The keyword list name, i.e., "Credit Card Numbers", when the account is a credit card discovered by the Autopsy regular expression search for credit cards)
 
----
 
+---
 ## TSK_ASSOCIATED_OBJECT
 Provides a backwards link to an artifact that references the parent file of this artifact.  Example usage is that a downloaded file will have this artifact and it will point back to the TSK_WEB_DOWNLOAD artifact that is associated with a browser's SQLite database. See \ref jni_bb_associated_object.
 
@@ -212,7 +234,6 @@ Details about System/aplication/file backups.
 - TSK_DATETIME_END (Date/Time the backup ended)
 
 
-
 ---
 ## TSK_BLUETOOTH_ADAPTER
 Details about a Bluetooth adapter.
@@ -252,7 +273,6 @@ A calendar entry in an application file or database.
 - TSK_DATETIME_END (End of the entry, in seconds since 1970-01-01T00:00:00Z)
 
 
-
 ---
 ## TSK_CALLLOG
 A call log record in an application file or database.
@@ -270,7 +290,6 @@ A call log record in an application file or database.
 - TSK_NAME (The name of the caller or callee)
 
 
-
 ---
 ## TSK_CLIPBOARD_CONTENT
 Data found on the operating system's clipboard.
@@ -279,7 +298,6 @@ Data found on the operating system's clipboard.
 - TSK_TEXT (Text on the clipboard)
 
 
-
 ---
 ## TSK_CONTACT
 A contact book entry in an application file or database.
@@ -300,8 +318,6 @@ A contact book entry in an application file or database.
 - TSK_URL (e.g., the URL of an image if the contact is a vCard)
 
 
-
-
 ---
 ## TSK_DELETED_PROG
 Programs that have been deleted from the system.
@@ -314,7 +330,6 @@ Programs that have been deleted from the system.
 - TSK_PATH (Location where the program resided before being deleted)
 
 
-
 ---
 ## TSK_DEVICE_ATTACHED
 Details about a device that was physically attached to a data source.
@@ -329,7 +344,6 @@ Details about a device that was physically attached to a data source.
 - TSK_MAC_ADDRESS (Mac address of the attached device)
 
 
-
 ---
 ## TSK_DEVICE_INFO
 Details about a device data source.
@@ -341,7 +355,6 @@ Details about a device data source.
 - TSK_IMSI (IMSI number of the device)
 
 
-
 ---
 ## TSK_EMAIL_MSG
 An email message found in an application file or database.
@@ -364,6 +377,7 @@ An email message found in an application file or database.
 - TSK_SUBJECT (Subject of the email message)
 - TSK_THREAD_ID (ID specified by the analysis module to group emails into threads for display purposes)
 
+
 ---
 ## TSK_EXTRACTED_TEXT
 Text extracted from some content.
@@ -371,6 +385,7 @@ Text extracted from some content.
 ### REQUIRED ATTRIBUTES
 - TSK_TEXT (The extracted text)
 
+
 ---
 ## TSK_GEN_INFO
 A generic information artifact. Each content object will have at most one TSK_GEN_INFO artifact, which is easily accessed through org.sleuthkit.datamodel.AbstractContent.getGenInfoArtifact() and related methods. The TSK_GEN_INFO object is useful for storing values related to the content object without making a new artifact type.
@@ -379,7 +394,8 @@ A generic information artifact. Each content object will have at most one TSK_GE
 None
 
 ### OPTIONAL ATTRIBUTES
-- TSK_PHOTODNA_HASH (The PhotoDNA hash of an image)
+- TSK_HASH_PHOTODNA (The PhotoDNA hash of an image)
+
 
 ---
 ## TSK_GPS_AREA
@@ -393,6 +409,7 @@ An outline of an area.
 - TSK_NAME (Name of the area, e.g., Minute Man Trail)
 - TSK_PROG_NAME (Name of the application that was the source of the GPS route)
 
+
 ---
 ## TSK_GPS_BOOKMARK
 A bookmarked GPS location or saved waypoint.
@@ -409,7 +426,6 @@ A bookmarked GPS location or saved waypoint.
 - TSK_PROG_NAME (Name of the application that was the source of the GPS bookmark)
 
 
-
 ---
 ## TSK_GPS_LAST_KNOWN_LOCATION
 The last known location of a GPS connected device. This may be from a perspective other than the device.
@@ -425,7 +441,6 @@ The last known location of a GPS connected device. This may be from a perspectiv
 - TSK_NAME (The name of the last known location. Ex: Boston)
 
 
-
 ---
 ## TSK_GPS_ROUTE
 A GPS route.
@@ -440,7 +455,6 @@ A GPS route.
 - TSK_PROG_NAME (Name of the application that was the source of the GPS route)
 
 
-
 ---
 ## TSK_GPS_SEARCH
 A GPS location that was known to have been searched by the device or user.
@@ -456,7 +470,6 @@ A GPS location that was known to have been searched by the device or user.
 - TSK_NAME (The name of the target location, e.g., Boston)
 
 
-
 ---
 ## TSK_GPS_TRACK
 A Global Positioning System (GPS) track artifact records the track, or path, of a GPS-enabled dvice as a connected series of track points. A track point is a location in a geographic coordinate system with latitude, longitude and altitude (elevation) axes.
@@ -469,7 +482,6 @@ A Global Positioning System (GPS) track artifact records the track, or path, of
 - TSK_PROG_NAME (Name of application containing the GPS trackpoint set)
 
 
-
 ---
 ## TSK_INSTALLED_PROG
 Details about an installed program. 
@@ -484,6 +496,7 @@ Details about an installed program.
 - TSK_PERMISSIONS (Permissions of the installed program)
 - TSK_VERSION (Version number of the program)
 
+
 ---
 ## TSK_MESSAGE
 A message that is found in some content.
@@ -506,7 +519,6 @@ A message that is found in some content.
 - TSK_THREAD_ID (ID for keeping threaded messages together)
 
 
-
 ---
 ## TSK_METADATA
 General metadata for some content.
@@ -525,6 +537,7 @@ None
 - TSK_USER_ID (Last author of the document)
 - TSK_VERSION (Version number of the program used to create the document)
 
+
 ---
 ## TSK_OS_INFO
 Details about an operating system recovered from the data source.
@@ -545,7 +558,6 @@ Details about an operating system recovered from the data source.
 - TSK_VERSION (Version of the OS)
 
 
-
 ---
 ## TSK_PROG_NOTIFICATIONS
 Notifications to the user.
@@ -559,7 +571,6 @@ Notifications to the user.
 - TSK_VALUE (Message being sent or received)
 
 
-
 ---
 ## TSK_PROG_RUN
 The number of times a program/application was run.
@@ -577,7 +588,6 @@ The number of times a program/application was run.
 - TSK_PATH (Path of the executable program)
 
 
-
 ---
 ## TSK_RECENT_OBJECT
 Indicates recently accessed content. Examples: Recent Documents or Recent Downloads menu items on Windows.
@@ -594,7 +604,6 @@ Indicates recently accessed content. Examples: Recent Documents or Recent Downlo
 - TSK_COMMENT (What the source of the attribute may be)
 
 
-
 ---
 ## TSK_REMOTE_DRIVE
 Details about a remote drive found in the data source.
@@ -606,7 +615,6 @@ Details about a remote drive found in the data source.
 - TSK_LOCAL_PATH (The local path of this remote drive. This path may be mapped, e.g., 'D:/' or 'F:/')
 
 
-
 ---
 ## TSK_SCREEN_SHOTS
 Screenshots from a device or application.
@@ -642,7 +650,6 @@ An application or web user account.
 - TSK_USER_NAME (User name of the service account)
 
 
-
 ---
 ## TSK_SIM_ATTACHED
 Details about a SIM card that was physically attached to the device.
@@ -653,7 +660,6 @@ Details about a SIM card that was physically attached to the device.
 - TSK_IMSI (IMSI number of this SIM card)
 
 
-
 ---
 ## TSK_SPEED_DIAL_ENTRY
 A speed dial entry.
@@ -666,7 +672,6 @@ A speed dial entry.
 - TSK_SHORTCUT (Keyboard shortcut)
 
 
-
 ---
 ## TSK_TL_EVENT
 An event in the timeline of a case.
@@ -676,6 +681,7 @@ An event in the timeline of a case.
 - TSK_DATETIME (When the event occurred, in seconds since 1970-01-01T00:00:00Z)
 - TSK_DESCRIPTION (A description of the event)
 
+
 ---
 ## TSK_USER_DEVICE_EVENT
 Activity on the system or from an application.  Example usage is a mobile device being locked and unlocked. 
@@ -689,6 +695,7 @@ Activity on the system or from an application.  Example usage is a mobile device
 - TSK_PROG_NAME (Name of the program doing the activity)
 - TSK_VALUE (Connection type)
 
+
 ---
 ## TSK_WEB_BOOKMARK
 A web bookmark entry.
@@ -703,6 +710,7 @@ A web bookmark entry.
 - TSK_NAME (Name of the bookmark entry)
 - TSK_TITLE (Title of the web page that was bookmarked)
 
+
 ---
 ## TSK_WEB_CACHE
 A web cache entry. The resource that was cached may or may not be present in the data source.
@@ -717,6 +725,7 @@ A web cache entry. The resource that was cached may or may not be present in the
 - TSK_PATH_ID (Object ID of the source cache file)
 - TSK_DOMAIN (Domain of the URL)
 
+
 ---
 ## TSK_WEB_COOKIE
 A Web cookie found.
@@ -734,7 +743,6 @@ A Web cookie found.
 - TSK_PROG_NAME (Name of the application or application extractor that stored the Web cookie)
 
 
-
 ---
 ## TSK_WEB_DOWNLOAD
 A Web download. The downloaded resource may or may not be present in the data source.
@@ -750,7 +758,6 @@ A Web download. The downloaded resource may or may not be present in the data so
 - TSK_PROG_NAME (Name of the application or application extractor that downloaded this resource)
 
 
-
 ---
 ## TSK_WEB_FORM_ADDRESS
 Contains autofill data for a person's address. Form data is usually saved by a Web browser.
@@ -803,7 +810,6 @@ A Web history entry.
 - TSK_DATETIME_CREATED (The datetime the page was created, ie: offline pages)
 
 
-
 ---
 ## TSK_WEB_SEARCH_QUERY
 Details about a Web search query.
@@ -817,7 +823,6 @@ Details about a Web search query.
 - TSK_PROG_NAME (Application or application extractor that stored the Web search query)
 
 
-
 ---
 ## TSK_WIFI_NETWORK
 Details about a WiFi network.
diff --git a/bindings/java/doxygen/blackboard.dox b/bindings/java/doxygen/blackboard.dox
index fc51e88b9699fcfbdcc4d6603340602e1f134876..b6e2bfd96d17aa7f00fd67cce9291011fe467905 100644
--- a/bindings/java/doxygen/blackboard.dox
+++ b/bindings/java/doxygen/blackboard.dox
@@ -6,7 +6,7 @@ The blackboard allows modules (in Autopsy or other frameworks) to communicate an
 
 \subsection jni_bb_concepts Concepts
 
-The blackboard is a collection of <em>artifacts</em>.  Each artifact has a type, such as web browser history, EXIF, or GPS route. The Sleuth Kit has many artifact types already defined (see org.sleuthkit.datamodel.BlackboardArtifact.ARTIFACT_TYPE and the \ref artifact_catalog_page "artifact catalog") and you can also \ref jni_bb_artifact2 "create your own". 
+The blackboard is a collection of <em>artifacts</em>.  Each artifact is a either a data artifact or an analysis result. In general, data artifacts record data found in the image (ex: a call log entry) while analysis results are more subjective (ex: a file matching a user-created interesting file set rule). Each artifact has a type, such as web browser history, EXIF, or GPS route. The Sleuth Kit has many artifact types already defined (see org.sleuthkit.datamodel.BlackboardArtifact.ARTIFACT_TYPE and the \ref artifact_catalog_page "artifact catalog") and you can also \ref jni_bb_artifact2 "create your own". 
 
 Each artifact has a set of name-value pairs called <em>attributes</em>.  Attributes also have types, such as URL, created date, or device make. The Sleuth Kit has many attribute types already defined (see org.sleuthkit.datamodel.BlackboardAttribute.ATTRIBUTE_TYPE) and you can also \ref jni_bb_artifact2 "create your own".  
 
@@ -31,7 +31,7 @@ First you need to decide what type of artifact you are making and what category
 <li>Analysis Result: Result from an analysis technique on a given object with a given configuration. Includes Conclusion, Relevance Score, and Confidence.
 <li>Data Artifact: Data that was originally embedded by an application/OS in a file or other data container.
 </ul>
-Consult the \ref artifact_catalog_page "artifact catalog" for a list of built-in types and what categories they belong to. If you are creating a data artifact, you can optionally add an OS account to it. If you are creating an analysis result, you can optionally add a score and other notes about the result.
+Consult the \ref artifact_catalog_page "artifact catalog" for a list of built-in types and what categories they belong to. If you are creating a data artifact, you can optionally add an OS account to it. If you are creating an analysis result, you can optionally add a score and other notes about the result. Note that you must use the category defined in the artifact catalog for each type or you will get an error. For example, you can't create a web bookmark analysis result. 
 
 There are may ways to create artifacts, but we will focus on creating them through the Blackboard class or directly through a Content object. Regardless of how they are created, all artifacts must be associated with a Content object. 
 
@@ -109,7 +109,11 @@ We achieve this relationship by creating a TSK_ASSOCIATED_OBJECT artifact on the
  
 \subsection jni_bb_query  Querying the Blackboard
 
+You can find artifacts by querying the blackboard in a variety of ways. It is preferable to use the methods that specifically return either data artifacts or analysis results since these will contain the complete information for the artifact. You can use the more general "Artifact" or "BlackboardArtifact" methods to get both, however these results will only contain the blackboard attributes and not any associated OS account or score/justification.
+
 You can find artifacts  using a variety of ways:
+- org.sleuthkit.datamodel.Content.getAllDataArtifacts() to get all data artifacts for a specific Content object.
+- org.sleuthkit.datamodel.Content.getAnalysisResults() to get analysis results of a given type for a specific Content object.
 - org.sleuthkit.datamodel.Content.getArtifacts() in its various forms to get a specific type of artifact for a specific Content object. 
 - org.sleuthkit.datamodel.Content.getGenInfoArtifact() to get the TSK_GEN_INFO artifact for a specific content object.
 - org.sleuthkit.datamodel.SleuthkitCase.getBlackboardArtifacts() in its various forms to get artifacts based on some combination of artifact type, attribute type and value, and content object.
@@ -123,7 +127,7 @@ in the Autopsy UI alongside the built in artifacts and will also appear in the r
 \subsection jni_bb_custom_make Making Custom Artifacts and Attributes
 
 
-org.sleuthkit.datamodel.SleuthkitCase.addBlackboardArtifactType() is used to create a custom artifact.  Give it the display and unique name and it will return a org.sleuthkit.datamodel.BlackboardArtifact.Type object with a unique ID.  You will need to call this once for each case to create the artifact ID.   You can then use this ID to make an artifact of the given type.  To check if the artifact type has already been added to the blackboard or to get the ID after it was created, use org.sleuthkit.datamodel.SleuthkitCase.getArtifactType().
+org.sleuthkit.datamodel.SleuthkitCase.addBlackboardArtifactType() is used to create a custom artifact.  Give it the display name, unique name and category (data artifact or analysis result) and it will return a org.sleuthkit.datamodel.BlackboardArtifact.Type object with a unique ID.  You will need to call this once for each case to create the artifact ID.   You can then use this ID to make an artifact of the given type.  To check if the artifact type has already been added to the blackboard or to get the ID after it was created, use org.sleuthkit.datamodel.SleuthkitCase.getArtifactType().
 
 To create custom attributes, use org.sleuthkit.datamodel.SleuthkitCase.addArtifactAttributeType() to create the artifact type and get its ID. Like artifacts, you must create the attribute type for each new case. To get a type after it has been created in the case, use org.sleuthkit.datamodel.SleuthkitCase.getAttributeType(). Your attribute will be a name-value pair where the value is of the type you specified when creating it. The current types are: String, Integer, Long, Double, Byte, Datetime, and JSON. If you believe you need to create an attribute with type JSON, please read the 
 \ref jni_bb_json_attr_overview "overview" and \ref jni_bb_json_attr "tutorial" sections below. 
diff --git a/bindings/java/doxygen/os_accounts.dox b/bindings/java/doxygen/os_accounts.dox
index dd15fc6026dc94537411f00306047f2a901d6f17..12547cb3e1076341c4aaf359eb9b1251ee508e4f 100644
--- a/bindings/java/doxygen/os_accounts.dox
+++ b/bindings/java/doxygen/os_accounts.dox
@@ -29,6 +29,8 @@ A realm has two core fields:
 - Address that the OS uses internally, such as part of a Windows SID
 - Name that is what users more often see
 
+A local realm also defines the single host that the realm works on. 
+
 When searching for realms, the address has priority over the name. Often times with Windows systems, we may have a realm address from SIDs but not a specific realm name. 
 
 Realms are managed by org.sleuthkit.datamodel.OsAccountRealmManager.
@@ -46,6 +48,16 @@ OS accounts also have other properties, such as full name, creation date, etc.,
 
 OS accounts are managed by org.sleuthkit.datamodel.OsAccountManager.
 
+\subsection os_acct_acct_inst OS Account Instances
+
+An OS Account can exist on multiple systems for several reasons, including:
+- It's a domain account and the user logged into several systems
+- It's a local account, but there was a reference to the account in an event log or registry setting. 
+
+Therefore, the database stories each instance the account was seen.  An account instance shows that there was a reference to the account on a given host. The instance types are defined in org.sleuthkit.datamodel.OsAccountInstance.OsAccountInstanceType.  
+
+When writing modules, you should record each instance that you see the account. 
+
 \subsection os_acct_acct_os Supported Operating Systems
 
 At this point, APIs exist for only Windows accounts, such as: 
@@ -54,35 +66,35 @@ At this point, APIs exist for only Windows accounts, such as:
 
 The underlying database schema supports other operating systems, but the utility APIs do not exist to populate them other than with Windows SIDs. These methods may be added in the future.
 
-\section os_account_storing Storing Original Account Data
+\section os_account_storing Storing Original Account Data in Other Tables
 
-We recommend that the OS account addresses or names that were parsed from the data source be saved alongside any references to OsAccount objects. For example, the case database stores the UID or SID that was stored in a file system for a file in addition to the reference to the OsAccount object that is associated with that address.  This helps to ensure the original data is preserved in case an Os account can't be created, gets deleted, or is incorrectly merged. 
+We recommend that the OS account addresses or names that were parsed from the data source be saved alongside any references to OsAccount objects when making new tables. For example, the case database stores the UID or SID that was stored in a file system for a file in addition to the reference to the OsAccount object that is associated with that address.  This helps to ensure the original data is preserved in case an Os account can't be created, gets deleted, or is incorrectly merged. 
 
 
 \section os_acct_example Example Creation & Update Code
 
-There are three unique elements to creating and updating OS accounts when adding data to the case database:
+There are three unique things to keep in mind when creating or updating OS accounts:
 
 <ol>
-<li>When creating and updating OS accounts in the case database, you need to avoid some pitfalls involving doing a lot of work in a transaction. Why? For single-user cases, if you have created a org.sleuthkit.datamodel.SleuthkitCase.CaseDbTransaction, you should never call another database access method unless it allows you to pass in the CaseDbTransaction you are using. Otherwise, the method that you call will attempt to create its own transaction and because you already have the underlying SQLite case database locked, the called method will block forever waiting for a lock it cannot obtain. For a multi-user case, you will run the risk of attempting to create OS accounts in the case database that would duplicate accounts created by another user on another machine. In this scenario, uniqueness constraints will cause your entire transaction to fail and everything you have done up to that point will be rolled back and will have to be redone.
+<li><b>Transactions.</b> To avoid duplicates, OS Accounts are often created and merged outside of a transaction.  With single-user cases, you can get into deadlocks when you mix transaction-based API calls and non-transaction API calls.  
 
-This means that if you want to use a CaseDbTransation to add a lot of files or artifacts associated with OS accounts, you'll need to:
+This means that if you want to use a CaseDbTransation to add a lot of files or artifacts associated with OS accounts at once, you'll need to:
 <ol type="a">
 <li>Pre-process the data to identify what OS accounts you need to create or look up 
 <li>Look up or create the OS accounts in individual transactions 
 <li>Start a new transaction and add the files or artifacts with the references to the OS accounts
 </ol>
 
-<li>You need to check if you have more information than what is already stored (e.g., maybe the realm name was unknown).
+<li><b>Updates.</b> When you come accross OS Account data, you may have more info then what already exists in the DB and you should therefore try to upate it. e.g maybe the realm name was unknown. You should call an "update" command just in case you have new data.
 
-<li>You need to record that an OS account was referenced on a given data source because OS accounts are stored in parallel to data sources and are not children of them.
+<li><b>Instances.</b> You need to record that an OS account was referenced on a given data source because OS accounts are stored in parallel to data sources and are not children of them.  Some methods, such as 'addFile()', will automatically record the instance. 
 </ol> 
 
 Here are some examples.
 
 \subsection os_acct_ex_get Adding a File or Data Artifact
 
-If you pass in an OsAccount to the various methods to add files and data artifacts, then the database will make the association and record the occurence. All you need to do is get the account.  You can do that with org.sleuthkit.datamodel.OsAccountManager.getWindowsOsAccount(). Note that sometimes that call will fail if the SID associated with the file is for a group, for example, if the OS account has admin rights. 
+The various addFile() methods allow you to pass in an OsAccount and the database will make the association and record the occurence. All you need to do is get the account.  You can do that with org.sleuthkit.datamodel.OsAccountManager.getWindowsOsAccount(). Note that sometimes that call will fail if the SID associated with the file is for a group, for example, if the OS account has admin rights. 
 
 If you get an OsAccount, you can try to update it if you think you may have new information. 
 
diff --git a/bindings/java/doxygen/schema/db_schema_9_1.dox b/bindings/java/doxygen/schema/db_schema_9_1.dox
index 0c7a48a203e5d1cea49d99636d7d12a2f9562f0e..cd768dab7fe1d30fd260a8aaa9a68506b183ff5c 100644
--- a/bindings/java/doxygen/schema/db_schema_9_1.dox
+++ b/bindings/java/doxygen/schema/db_schema_9_1.dox
@@ -10,6 +10,8 @@ Each Autopsy release is associated with a schema version with a major and minor
 - If the case database has the same major number as the version of Autopsy being used, the case should generally be able to be opened and used.
 - If the case database has a higher major number than the version of Autopsy being used, an error will be displayed when attempting to open the case. 
 
+You can find more of a description in the org.sleuthkit.datamodel.CaseDbSchemaVersionNumber class description. 
+
 You can find a basic graphic of some of the table relationships <a href="https://docs.google.com/drawings/d/1omR_uUAp1fQt720oJ-kk8C48BXmVa3PNjPZCDdT0Tb4/edit?usp#sharing">here</a>
 
 
diff --git a/bindings/java/ivy.xml b/bindings/java/ivy.xml
index 67518a6566d0eba79da9a9bb86e28dd94ead860e..d6b63338186290a0a839ad3d9883f6db9f654923 100644
--- a/bindings/java/ivy.xml
+++ b/bindings/java/ivy.xml
@@ -2,7 +2,7 @@
 	<info organisation="org.sleuthkit" module="datamodel"/>
 	<dependencies>
 		<dependency org="joda-time" name="joda-time" rev="2.4" />
-		<dependency org="com.google.guava" name="guava" rev="19.0"/>
+		<dependency org="com.google.guava" name="guava" rev="31.1-jre"/>
 		<dependency org="org.apache.commons" name="commons-lang3" rev="3.0"/>
 		<dependency org="commons-validator" name="commons-validator" rev="1.6"/>
 		
@@ -13,12 +13,14 @@
 
         <!-- NOTE: When SQLITE version is changed, also change the version number in
           debian/sleuthkit-java.install so that it gets packaged correctly on Linux -->
-		<dependency org="org.xerial" name="sqlite-jdbc" rev="3.25.2"/>
+		<dependency org="org.xerial" name="sqlite-jdbc" rev="3.42.0.0"/>
 
-		<dependency org="org.postgresql" name="postgresql" rev="42.2.18" >
+		<dependency org="org.postgresql" name="postgresql" rev="42.3.5" >
 			<artifact name="postgresql" type="jar" />
 		</dependency>
-		<dependency org="com.mchange" name="c3p0" rev="0.9.5" />
+		<dependency conf="default" org="com.mchange" name="c3p0" rev="0.9.5.5" />
+		<dependency conf="default" org="com.mchange" name="mchange-commons-java" rev="0.2.20"/>
+		
 		<dependency org="com.zaxxer" name="SparseBitSet" rev="1.1" />
 	</dependencies>
 </ivy-module>
diff --git a/bindings/java/jni/auto_db_java.cpp b/bindings/java/jni/auto_db_java.cpp
index 20e915d81c21412a12f83536ad618421b4cfcc1a..e0e9afb5c461628fb1215d11a3bb40305b611cd6 100644
--- a/bindings/java/jni/auto_db_java.cpp
+++ b/bindings/java/jni/auto_db_java.cpp
@@ -482,7 +482,9 @@ TSK_RETVAL_ENUM TskAutoDbJava::createJString(const char * input, jstring & newJS
 
     if (tsk_UTF8toUTF16((const UTF8 **)&source, (const UTF8 *)&source[input_len], &target, &target[input_len], TSKlenientConversion) != TSKconversionOK) {
         free(utf16_input);
-        return TSK_ERR;
+        // use default JNI method as fallback, fixes https://github.com/sleuthkit/sleuthkit/issues/2723
+        newJString = m_jniEnv->NewStringUTF(input);
+        return TSK_OK;
     }
 
     /*
@@ -1228,21 +1230,15 @@ TskAutoDbJava::addUnallocatedPoolBlocksToDb(size_t & numPool) {
         /* Create the unallocated space files */
         TSK_FS_ATTR_RUN * unalloc_runs = tsk_pool_unallocated_runs(pool_info);
         TSK_FS_ATTR_RUN * current_run = unalloc_runs;
-        vector<TSK_DB_FILE_LAYOUT_RANGE> ranges;
         while (current_run != NULL) {
 
-            TSK_DB_FILE_LAYOUT_RANGE tempRange(current_run->addr * pool_info->block_size, current_run->len * pool_info->block_size, 0);
-
-            ranges.push_back(tempRange);
-            int64_t fileObjId = 0;
-            if (TSK_ERR == addUnallocBlockFile(unallocVolObjId, 0, current_run->len * pool_info->block_size, ranges, fileObjId, m_curImgId)) {
+            if (addUnallocBlockFileInChunks(current_run->addr * pool_info->block_size, current_run->len * pool_info->block_size, unallocVolObjId, m_curImgId) == TSK_ERR) {
                 registerError();
                 tsk_fs_attr_run_free(unalloc_runs);
                 return TSK_ERR;
             }
 
             current_run = current_run->next;
-            ranges.clear();
         }
         tsk_fs_attr_run_free(unalloc_runs);
     }
@@ -1918,14 +1914,10 @@ TSK_RETVAL_ENUM TskAutoDbJava::addUnallocVsSpaceToDb(size_t & numVsP) {
             return TSK_ERR;
         }
 
-        // Create an unalloc file with unalloc part, with vs part as parent
-        vector<TSK_DB_FILE_LAYOUT_RANGE> ranges;
+        // Create an unalloc file (or files) with unalloc part, with vs part as parent
         const uint64_t byteStart = vsInfo->offset + vsInfo->block_size * vsPart.start;
-        const uint64_t byteLen = vsInfo->block_size * vsPart.len; 
-        TSK_DB_FILE_LAYOUT_RANGE tempRange(byteStart, byteLen, 0);
-        ranges.push_back(tempRange);
-        int64_t fileObjId = 0;
-        if (addUnallocBlockFile(vsPart.objId, 0, tempRange.byteLen, ranges, fileObjId, m_curImgId) == TSK_ERR) {
+        const uint64_t byteLen = vsInfo->block_size * vsPart.len;
+        if (addUnallocBlockFileInChunks(byteStart, byteLen, vsPart.objId, m_curImgId) == TSK_ERR) {
             registerError();
             return TSK_ERR;
         }
@@ -1954,13 +1946,60 @@ TSK_RETVAL_ENUM TskAutoDbJava::addUnallocImageSpaceToDb() {
         vector<TSK_DB_FILE_LAYOUT_RANGE> ranges;
         ranges.push_back(tempRange);
         int64_t fileObjId = 0;
-        if (TSK_ERR == addUnallocBlockFile(m_curImgId, 0, imgSize, ranges, fileObjId, m_curImgId)) {
+        if (TSK_ERR == addUnallocBlockFileInChunks(0, imgSize, m_curImgId, m_curImgId)) {
             return TSK_ERR;
         }
     }
     return TSK_OK;
 }
 
+/**
+* Adds unallocated block files to the database, chunking if enabled.
+*
+* @returns TSK_OK on success, TSK_ERR on error
+*/
+TSK_RETVAL_ENUM TskAutoDbJava::addUnallocBlockFileInChunks(uint64_t byteStart, TSK_OFF_T totalSize, int64_t parentObjId, int64_t dataSourceObjId) {
+
+    if (m_maxChunkSize <= 0) {
+        // No chunking - write the entire file
+        TSK_DB_FILE_LAYOUT_RANGE tempRange(byteStart, totalSize, 0);
+        vector<TSK_DB_FILE_LAYOUT_RANGE> ranges;
+        ranges.push_back(tempRange);
+        int64_t fileObjId = 0;
+        return addUnallocBlockFile(parentObjId, 0, totalSize, ranges, fileObjId, dataSourceObjId);
+    }
+
+    // We will chunk into separate files with max size m_maxChunkSize
+    uint64_t maxChunkSize = (uint64_t)m_maxChunkSize;
+    uint64_t bytesLeft = (uint64_t)totalSize;
+    uint64_t startingOffset = byteStart;
+    uint64_t chunkSize;
+    vector<TSK_DB_FILE_LAYOUT_RANGE> ranges;
+    while (bytesLeft > 0) {
+
+        if (maxChunkSize >= bytesLeft) {
+            chunkSize = bytesLeft;
+            bytesLeft = 0;
+        }
+        else {
+            chunkSize = maxChunkSize;
+            bytesLeft -= maxChunkSize;
+        }
+
+        TSK_DB_FILE_LAYOUT_RANGE tempRange(startingOffset, chunkSize, 0);
+        ranges.push_back(tempRange);
+        int64_t fileObjId = 0;
+
+        TSK_RETVAL_ENUM retval = addUnallocBlockFile(parentObjId, 0, chunkSize, ranges, fileObjId, dataSourceObjId);
+        if (retval != TSK_OK) {
+            return retval;
+        }
+        ranges.clear();
+        startingOffset += chunkSize;
+    }
+    return TSK_OK;
+}
+
 /**
 * Returns the directory currently being analyzed by processFile().
 * Safe to use from another thread than processFile().
diff --git a/bindings/java/jni/auto_db_java.h b/bindings/java/jni/auto_db_java.h
index b324a71c43445a88099b5901d061ce7c5e9a5812..6980a5dcfef1956c146a7259fd125f63af60e4be 100644
--- a/bindings/java/jni/auto_db_java.h
+++ b/bindings/java/jni/auto_db_java.h
@@ -205,6 +205,7 @@ class TskAutoDbJava :public TskAuto {
     TSK_RETVAL_ENUM addUnallocVsSpaceToDb(size_t & numVsP);
     TSK_RETVAL_ENUM addUnallocImageSpaceToDb();
     TSK_RETVAL_ENUM addUnallocSpaceToDb();
+    TSK_RETVAL_ENUM addUnallocBlockFileInChunks(uint64_t byteStart, TSK_OFF_T totalSize, int64_t parentObjId, int64_t dataSourceObjId);
 
     // JNI methods
     TSK_RETVAL_ENUM addImageInfo(int type, TSK_OFF_T ssize, int64_t & objId, const string & timezone, TSK_OFF_T size, const string &md5,
diff --git a/bindings/java/nbproject/project.xml b/bindings/java/nbproject/project.xml
index 2b34d8ff45723f61d2970a892b13cf86a71e8cfb..1a424a7438523663fa127bd79782e840778514eb 100755
--- a/bindings/java/nbproject/project.xml
+++ b/bindings/java/nbproject/project.xml
@@ -114,7 +114,7 @@
         <java-data xmlns="http://www.netbeans.org/ns/freeform-project-java/4">
             <compilation-unit>
                 <package-root>src</package-root>
-                <classpath mode="compile">lib;lib/diffutils-1.2.1.jar;lib/junit-4.12.jar;lib/postgresql-42.2.18.jar;lib/c3p0-0.9.5.jar;lib/mchange-commons-java-0.2.9.jar;lib/c3p0-0.9.5-sources.jar;lib/c3p0-0.9.5-javadoc.jar;lib/joda-time-2.4.jar;lib/commons-lang3-3.0.jar;lib/guava-19.0.jar;lib/SparseBitSet-1.1.jar;lib/gson-2.8.5.jar;lib/commons-validator-1.6.jar</classpath>
+                <classpath mode="compile">lib;lib/diffutils-1.2.1.jar;lib/junit-4.12.jar;lib/postgresql-42.2.18.jar;lib/c3p0-0.9.5.5.jar;lib/mchange-commons-java-0.2.20.jar;lib/joda-time-2.4.jar;lib/commons-lang3-3.0.jar;lib/guava-31.1-jre.jar;lib/SparseBitSet-1.1.jar;lib/gson-2.8.5.jar;lib/commons-validator-1.6.jar</classpath>
                 <built-to>build</built-to>
                 <source-level>1.8</source-level>
             </compilation-unit>
diff --git a/bindings/java/src/org/sleuthkit/datamodel/AbstractContent.java b/bindings/java/src/org/sleuthkit/datamodel/AbstractContent.java
index f16830f626477bd878c5835d658e4985b326f276..41867656eb97fc2e9ba5ada7705be7d81127075c 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/AbstractContent.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/AbstractContent.java
@@ -18,11 +18,13 @@
  */
 package org.sleuthkit.datamodel;
 
+import com.google.common.annotations.Beta;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Optional;
 import java.util.Set;
 import java.util.logging.Level;
 import java.util.logging.Logger;
@@ -125,12 +127,11 @@ public Content getParent() throws TskCoreException {
 		// It is possible that multiple threads could be doing this calculation
 		// simultaneously, but it's worth the potential extra processing to prevent deadlocks.
 		if (parent == null) {
-			ObjectInfo parentInfo;
-			parentInfo = db.getParentInfo(this);
-			if (parentInfo == null) {
+			Optional<Long> parentIdOpt = getParentId();
+			if (!parentIdOpt.isPresent()) {
 				parent = null;
 			} else {
-				parent = db.getContentById(parentInfo.getId());
+				parent = db.getContentById(parentIdOpt.get());
 			}
 		}
 		return parent;
@@ -140,6 +141,28 @@ void setParent(Content parent) {
 		this.parent = parent;
 	}
 
+	/**
+	 * Returns the parent object id of the content or empty if no parent can be
+	 * identified.
+	 *
+	 * @return An optional of the parent object id.
+	 *
+	 * @throws TskCoreException
+	 */
+	@Beta
+	public Optional<Long> getParentId() throws TskCoreException {
+		if (parentId == UNKNOWN_ID) {
+			ObjectInfo parentInfo = db.getParentInfo(this);
+			if (parentInfo != null) {
+				parentId = parentInfo.getId();
+			}
+		}
+
+		return parentId == UNKNOWN_ID
+				? Optional.empty()
+				: Optional.of(parentId);
+	}
+
 	/**
 	 * Set the ID of the this AbstractContent's parent
 	 *
@@ -327,7 +350,7 @@ public BlackboardArtifact newArtifact(int artifactTypeID) throws TskCoreExceptio
 		if (artifactTypeID == ARTIFACT_TYPE.TSK_GEN_INFO.getTypeID()) {
 			return getGenInfoArtifact(true);
 		}
-		BlackboardArtifact.Type artifactType = db.getArtifactType(artifactTypeID);
+		BlackboardArtifact.Type artifactType = db.getBlackboard().getArtifactType(artifactTypeID);
 		switch (artifactType.getCategory()) {
 			case DATA_ARTIFACT:
 				return this.newDataArtifact(artifactType, Collections.emptyList());
@@ -375,26 +398,12 @@ public AnalysisResultAdded newAnalysisResult(BlackboardArtifact.Type artifactTyp
 
 	@Override
 	public DataArtifact newDataArtifact(BlackboardArtifact.Type artifactType, Collection<BlackboardAttribute> attributesList, Long osAccountId) throws TskCoreException {
-		DataArtifact artifact = db.getBlackboard().newDataArtifact(artifactType, objId, this.getDataSource().getId(), attributesList, osAccountId);
-
-		if (osAccountId != null) {
-			try (CaseDbConnection connection = db.getConnection()) {
-				db.getOsAccountManager().newOsAccountInstance(osAccountId, getDataSource().getId(), OsAccountInstance.OsAccountInstanceType.LAUNCHED, connection);
-			}
-		}
-		return artifact;
+		return db.getBlackboard().newDataArtifact(artifactType, objId, this.getDataSource().getId(), attributesList, osAccountId);
 	}
 
 	@Override
 	public DataArtifact newDataArtifact(BlackboardArtifact.Type artifactType, Collection<BlackboardAttribute> attributesList, Long osAccountId, long dataSourceId) throws TskCoreException {
-		DataArtifact artifact = db.getBlackboard().newDataArtifact(artifactType, objId, dataSourceId, attributesList, osAccountId);
-
-		if (osAccountId != null) {
-			try (CaseDbConnection connection = db.getConnection()) {
-				db.getOsAccountManager().newOsAccountInstance(osAccountId, dataSourceId, OsAccountInstance.OsAccountInstanceType.LAUNCHED, connection);
-			}
-		}
-		return artifact;
+		return db.getBlackboard().newDataArtifact(artifactType, objId, dataSourceId, attributesList, osAccountId);
 	}
 
 	@Override
@@ -411,7 +420,7 @@ public BlackboardArtifact newArtifact(BlackboardArtifact.ARTIFACT_TYPE type) thr
 
 	@Override
 	public ArrayList<BlackboardArtifact> getArtifacts(String artifactTypeName) throws TskCoreException {
-		return getArtifacts(db.getArtifactType(artifactTypeName).getTypeID());
+		return getArtifacts(db.getBlackboard().getArtifactType(artifactTypeName).getTypeID());
 	}
 
 	@Override
diff --git a/bindings/java/src/org/sleuthkit/datamodel/AbstractFile.java b/bindings/java/src/org/sleuthkit/datamodel/AbstractFile.java
index 3ff266e1ebb459efd2a2d523749e46d89e925a30..cf79c881a0a384e37a78eaa1216b250a86619e5a 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/AbstractFile.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/AbstractFile.java
@@ -1,7 +1,7 @@
 /*
  * SleuthKit Java Bindings
  *
- * Copyright 2011-2021 Basis Technology Corp.
+ * Copyright 2011-2022 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -21,6 +21,7 @@
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.RandomAccessFile;
+import java.lang.ref.SoftReference;
 import java.sql.SQLException;
 import java.sql.Statement;
 import java.text.MessageFormat;
@@ -37,6 +38,7 @@
 import java.util.logging.Level;
 import java.util.logging.Logger;
 import org.sleuthkit.datamodel.SleuthkitCase.CaseDbTransaction;
+import org.sleuthkit.datamodel.TskData.CollectedStatus;
 import org.sleuthkit.datamodel.TskData.FileKnown;
 import org.sleuthkit.datamodel.TskData.TSK_FS_META_FLAG_ENUM;
 import org.sleuthkit.datamodel.TskData.TSK_FS_META_TYPE_ENUM;
@@ -54,6 +56,7 @@ public abstract class AbstractFile extends AbstractContent {
 	protected final TSK_FS_META_TYPE_ENUM metaType;
 	protected TSK_FS_NAME_FLAG_ENUM dirFlag;
 	protected Set<TSK_FS_META_FLAG_ENUM> metaFlags;
+	protected final Long fileSystemObjectId;  // File system object ID; may be null
 	protected long size;
 	protected final long metaAddr, ctime, crtime, atime, mtime;
 	protected final int metaSeq;
@@ -89,6 +92,16 @@ public abstract class AbstractFile extends AbstractContent {
 	 */
 	protected String sha256Hash;
 	private boolean sha256HashDirty = false;
+	
+	/*
+	 * SHA-1 hash
+	 */
+	protected String sha1Hash;
+	private boolean sha1HashDirty = false;
+	
+	private TskData.CollectedStatus collected; // Collected status of file data	
+	private boolean collectedDirty = false;
+	
 	private String mimeType;
 	private boolean mimeTypeDirty = false;
 	private static final Logger LOGGER = Logger.getLogger(AbstractFile.class.getName());
@@ -104,6 +117,11 @@ public abstract class AbstractFile extends AbstractContent {
 	private final Long osAccountObjId; // obj id of the owner's OS account, may be null
 	
 	private volatile String uniquePath;
+	private volatile FileSystem parentFileSystem;
+	
+	private final boolean tryContentProviderStream;
+	private Object contentProviderStreamLock = new Object();
+	private SoftReference<ContentProviderStream> contentProviderStreamRef = null;
 
 	/**
 	 * Initializes common fields used by AbstactFile implementations (objects in
@@ -113,6 +131,7 @@ public abstract class AbstractFile extends AbstractContent {
 	 * @param objId              object id in tsk_objects table
 	 * @param dataSourceObjectId The object id of the root data source of this
 	 *                           file.
+	 * @param fileSystemObjectId The object id of the file system. Can be null (or 0 representing null)
 	 * @param attrType
 	 * @param attrId
 	 * @param name               name field of the file
@@ -131,10 +150,9 @@ public abstract class AbstractFile extends AbstractContent {
 	 * @param modes
 	 * @param uid
 	 * @param gid
-	 * @param md5Hash            md5sum of the file, or null or "NULL" if not
-	 *                           present
-	 * @param sha256Hash         sha256 hash of the file, or null or "NULL" if
-	 *                           not present
+	 * @param md5Hash            md5sum of the file, or null if not present
+	 * @param sha256Hash         sha256 hash of the file, or null if not present
+	 * @param sha1Hash           SHA-1 hash of the file, or null if not present
 	 * @param knownState         knownState status of the file, or null if
 	 *                           unknown (default)
 	 * @param parentPath
@@ -143,11 +161,13 @@ public abstract class AbstractFile extends AbstractContent {
 	 *                           including the '.'), can be null.
 	 * @param ownerUid           Owner uid/SID, can be null if not available.
 	 * @param osAccountObjectId	 Object Id of the owner OsAccount, may be null.
+	 * @param collected			 Collected status of file data
 	 *
 	 */
 	AbstractFile(SleuthkitCase db,
 			long objId,
 			long dataSourceObjectId,
+			Long fileSystemObjectId,
 			TskData.TSK_FS_ATTR_TYPE_ENUM attrType, int attrId,
 			String name,
 			TskData.TSK_DB_FILES_TYPE_ENUM fileType,
@@ -158,15 +178,28 @@ public abstract class AbstractFile extends AbstractContent {
 			long ctime, long crtime, long atime, long mtime,
 			short modes,
 			int uid, int gid,
-			String md5Hash, String sha256Hash, FileKnown knownState,
+			String md5Hash, String sha256Hash, String sha1Hash, 
+			FileKnown knownState,
 			String parentPath,
 			String mimeType,
 			String extension,
 			String ownerUid,
 			Long osAccountObjectId,
+			TskData.CollectedStatus collected,
 			List<Attribute> fileAttributes) {
 		super(db, objId, name);
 		this.dataSourceObjectId = dataSourceObjectId;
+		if (fileSystemObjectId != null) {
+			// When reading from the result set, nulls are converted to zeros.
+			// Switch it to null.
+			if (fileSystemObjectId > 0) {
+				this.fileSystemObjectId = fileSystemObjectId;
+			} else {
+				this.fileSystemObjectId = null;
+			}
+		} else {
+			this.fileSystemObjectId = null;
+		}
 		this.attrType = attrType;
 		this.attrId = attrId;
 		this.fileType = fileType;
@@ -187,6 +220,7 @@ public abstract class AbstractFile extends AbstractContent {
 
 		this.md5Hash = md5Hash;
 		this.sha256Hash = sha256Hash;
+		this.sha1Hash = sha1Hash;
 		if (knownState == null) {
 			this.knownState = FileKnown.UNKNOWN;
 		} else {
@@ -198,6 +232,10 @@ public abstract class AbstractFile extends AbstractContent {
 		this.encodingType = TskData.EncodingType.NONE;
 		this.ownerUid = ownerUid;
 		this.osAccountObjId = osAccountObjectId;
+		this.collected = collected;
+		// any item that is marked as YES_REPO and there is a custom content provider for the db will attempt to use the content provider to provide data
+		// this will be flipped to false if there is no content provider stream from the content provider for this file
+		this.tryContentProviderStream = collected == CollectedStatus.YES_REPO && db.getContentProvider() != null;
 		if (Objects.nonNull(fileAttributes) && !fileAttributes.isEmpty()) {
 			this.fileAttributesCache.addAll(fileAttributes);
 			loadedAttributesCacheFromDb = true;
@@ -527,6 +565,28 @@ public String getSha256Hash() {
 		return this.sha256Hash;
 	}
 
+	/**
+	 * Sets the SHA-1 hash for this file.
+	 *
+	 * IMPORTANT: The SHA-1 hash is set for this AbstractFile object, but it
+	 * is not saved to the case database until AbstractFile.save is called.
+	 *
+	 * @param sha1Hash The SHA-1 hash of the file.
+	 */
+	public void setSha1Hash(String sha1Hash) {
+		this.sha1Hash = sha1Hash;
+		this.sha1HashDirty = true;
+	}
+
+	/**
+	 * Get the SHA-1 hash value as calculated, if present
+	 *
+	 * @return SHA-1 hash string, if it is present or null if it is not
+	 */
+	public String getSha1Hash() {
+		return this.sha1Hash;
+	}
+	
 	/**
 	 * Gets the attributes of this File
 	 *
@@ -537,7 +597,7 @@ public String getSha256Hash() {
 	public List<Attribute> getAttributes() throws TskCoreException {
 		synchronized (this) {
 			if (!loadedAttributesCacheFromDb) {
-				ArrayList<Attribute> attributes = getSleuthkitCase().getFileAttributes(this);
+				ArrayList<Attribute> attributes = getSleuthkitCase().getBlackboard().getFileAttributes(this);
 				fileAttributesCache.clear();
 				fileAttributesCache.addAll(attributes);
 				loadedAttributesCacheFromDb = true;
@@ -682,6 +742,25 @@ public Content getDataSource() throws TskCoreException {
 	public long getDataSourceObjectId() {
 		return dataSourceObjectId;
 	}
+	
+	/**
+	 * Gets the collected status of the file data.
+	 * 
+	 * @return The collected.
+	 */
+	public TskData.CollectedStatus getCollected() {
+		return collected;
+	}
+	
+	/**
+	 * Sets the collected status of the file data.
+	 * 
+	 * @param collected The file data's collected status
+	 */
+	public void setCollected(TskData.CollectedStatus collected) {
+		this.collected = collected;
+		collectedDirty = true;
+	}
 
 	/**
 	 * Gets file ranges associated with the file. File ranges are objects in
@@ -992,12 +1071,48 @@ void removeMetaFlag(TSK_FS_META_FLAG_ENUM metaFlag) {
 	short getMetaFlagsAsInt() {
 		return TSK_FS_META_FLAG_ENUM.toInt(metaFlags);
 	}
+	
+	/**
+	 * Attempts to get cached or load the content provider stream for this file.
+	 * If none exists, returns null.
+	 *
+	 * NOTE: Does not check the value for tryContentProviderStream before
+	 * attempting.
+	 *
+	 * @return The content stream for this file or null if none exists.
+	 *
+	 * @throws TskCoreException
+	 */
+	private ContentProviderStream getContentProviderStream() throws TskCoreException {
+		synchronized (contentProviderStreamLock) {
+			// try to get soft reference content provider stream
+			ContentProviderStream contentProviderStream = contentProviderStreamRef == null ? null : contentProviderStreamRef.get();
+			// load if not cached and then cache if present
+			if (contentProviderStream == null) {
+				ContentStreamProvider provider = getSleuthkitCase().getContentProvider();
+				contentProviderStream = provider == null ? null : provider.getContentStream(this).orElse(null);
+
+				if (contentProviderStream == null) {
+					throw new TskCoreException(MessageFormat.format("Could not get content provider string for file with obj id: {0}, path: {1}",
+							getId(),
+							getUniquePath()));
+				}
+
+				this.contentProviderStreamRef = new SoftReference<>(contentProviderStream);
+			}
 
+			return contentProviderStream;
+		}
+	}
+	
 	@Override
 	public final int read(byte[] buf, long offset, long len) throws TskCoreException {
-		//template method
-		//if localPath is set, use local, otherwise, use readCustom() supplied by derived class
-		if (localPathSet) {
+		// try to use content provider stream if should use
+		if (tryContentProviderStream) {
+			ContentProviderStream contentProviderStream = getContentProviderStream();
+			return contentProviderStream.read(buf, offset, len);
+		} else if (localPathSet) {
+			//if localPath is set, use local, otherwise, use readCustom() supplied by derived class
 			return readLocal(buf, offset, len);
 		} else {
 			return readInt(buf, offset, len);
@@ -1163,13 +1278,14 @@ final void setEncodingType(TskData.EncodingType encodingType) {
 	}
 
 	/**
-	 * Check if the file exists. If non-local always true, if local, checks if
-	 * actual local path exists
+	 * Check if the file exists. If non-local or file is marked with YES_REPO
+	 * and there is a content provider always true, if local, checks if actual
+	 * local path exists
 	 *
 	 * @return true if the file exists, false otherwise
 	 */
 	public boolean exists() {
-		if (!localPathSet) {
+		if (tryContentProviderStream || !localPathSet) {
 			return true;
 		} else {
 			try {
@@ -1184,13 +1300,13 @@ public boolean exists() {
 
 	/**
 	 * Check if the file exists and is readable. If non-local (e.g. within an
-	 * image), always true, if local, checks if actual local path exists and is
-	 * readable
+	 * image) or file is marked with YES_REPO and there is a content provider,
+	 * always true, if local, checks if actual local path exists and is readable
 	 *
 	 * @return true if the file is readable
 	 */
 	public boolean canRead() {
-		if (!localPathSet) {
+		if (tryContentProviderStream || !localPathSet) {
 			return true;
 		} else {
 			try {
@@ -1246,6 +1362,7 @@ public void close() {
 
 	}
 
+	@SuppressWarnings("deprecation")
 	@Override
 	protected void finalize() throws Throwable {
 		try {
@@ -1270,7 +1387,7 @@ public String toString(boolean preserveState) {
 				+ "\t" + "metaAddr " + metaAddr + "\t" + "metaSeq " + metaSeq + "\t" + "metaFlags " + metaFlags //NON-NLS
 				+ "\t" + "metaType " + metaType + "\t" + "modes " + modes //NON-NLS
 				+ "\t" + "parentPath " + parentPath + "\t" + "size " + size //NON-NLS
-				+ "\t" + "knownState " + knownState + "\t" + "md5Hash " + md5Hash + "\t" + "sha256Hash " + sha256Hash //NON-NLS
+				+ "\t" + "knownState " + knownState + "\t" + "md5Hash " + md5Hash + "\t" + "sha256Hash " + sha256Hash + "\t" + "sha1Hash " + sha1Hash//NON-NLS
 				+ "\t" + "localPathSet " + localPathSet + "\t" + "localPath " + localPath //NON-NLS
 				+ "\t" + "localAbsPath " + localAbsPath + "\t" + "localFile " + localFile //NON-NLS
 				+ "]\t";
@@ -1336,7 +1453,7 @@ public void save() throws TskCoreException {
 	 *                          properties to the case database.
 	 */
 	public void save(CaseDbTransaction transaction) throws TskCoreException {
-		if (!(md5HashDirty || sha256HashDirty || mimeTypeDirty || knownStateDirty)) {
+		if (!(md5HashDirty || sha256HashDirty || sha1HashDirty || mimeTypeDirty || knownStateDirty || collectedDirty)) {
 			return;
 		}
 
@@ -1356,12 +1473,24 @@ public void save(CaseDbTransaction transaction) throws TskCoreException {
 			}
 			updateSql += "sha256 = '" + this.getSha256Hash() + "'";
 		}
+		if (sha1HashDirty) {
+			if (!updateSql.isEmpty()) {
+				updateSql += ", ";
+			}
+			updateSql += "sha1 = '" + this.getSha1Hash() + "'";
+		}
 		if (knownStateDirty) {
 			if (!updateSql.isEmpty()) {
 				updateSql += ", ";
 			}
 			updateSql += "known = '" + this.getKnown().getFileKnownValue() + "'";
 		}
+		if (collectedDirty) {
+			if (!updateSql.isEmpty()) {
+				updateSql += ", ";
+			}
+			updateSql += "collected = '" + this.getCollected().getType() + "'";
+		}
 		updateSql = "UPDATE tsk_files SET " + updateSql + " WHERE obj_id = " + this.getId();
 
 		SleuthkitCase.CaseDbConnection connection = transaction.getConnection();
@@ -1369,8 +1498,10 @@ public void save(CaseDbTransaction transaction) throws TskCoreException {
 			connection.executeUpdate(statement, updateSql);
 			md5HashDirty = false;
 			sha256HashDirty = false;
+			sha1HashDirty = false;
 			mimeTypeDirty = false;
 			knownStateDirty = false;
+			collectedDirty = false;
 		} catch (SQLException ex) {
 			throw new TskCoreException(String.format("Error updating properties of file %s (obj_id = %s)", getName(), getId()), ex);
 		}
@@ -1397,22 +1528,116 @@ public Optional<Long> getOsAccountObjectId() {
 		return Optional.ofNullable(osAccountObjId);
 	}
 	
+	/**
+	 * Sets the parent file system of this file or directory.
+	 *
+	 * @param parent The parent file system object.
+	 */
+	void setFileSystem(FileSystem parent) {
+		parentFileSystem = parent;
+	}
+	
+	/**
+	 * Get the object id of the parent file system of this file or directory if it exists.
+	 *
+	 * @return The parent file system id.
+	 */
+	public Optional<Long> getFileSystemObjectId() {
+		return Optional.ofNullable(fileSystemObjectId);
+	}
+	
+	/**
+	 * Check if this AbstractFile belongs to a file system.
+	 * 
+	 * @return True if the file belongs to a file system, false otherwise.
+	 */
+	public boolean hasFileSystem() {
+		return fileSystemObjectId != null;
+	}
+	
+	/**
+	 * Gets the parent file system of this file or directory.
+	 * If the AbstractFile object is not FsContent, hasFileSystem() should
+	 * be called before this method to ensure the file belongs to a file
+	 * system.
+	 *
+	 * @return The file system object of the parent.
+	 *
+	 * @throws org.sleuthkit.datamodel.TskCoreException If the file does not belong to a file system or
+	 *     another error occurs.
+	 */
+	public FileSystem getFileSystem() throws TskCoreException {
+		if (fileSystemObjectId == null) {
+			throw new TskCoreException("File with ID: " + this.getId() + " does not belong to a file system");
+		}
+		if (parentFileSystem == null) {
+			synchronized (this) {
+				if (parentFileSystem == null) {
+					parentFileSystem = getSleuthkitCase().getFileSystemById(fileSystemObjectId, AbstractContent.UNKNOWN_ID);
+				}
+			}
+		}
+		return parentFileSystem;
+	}
+	
+	/**
+	 * Get the full path to this file or directory, starting with a "/" and the
+	 * data source name and then all the other segments in the path.
+	 *
+	 * @return A unique path for this object.
+	 *
+	 * @throws TskCoreException if there is an error querying the case database.
+	 */
 	@Override
 	public String getUniquePath() throws TskCoreException {
 
 		if (uniquePath == null) {
-			Content dataSource = getDataSource();
-			if (dataSource instanceof LocalFilesDataSource) {
-				if(dataSource != this) {
-					uniquePath = dataSource.getUniquePath() + parentPath + getName();
+			if (getFileSystemObjectId().isPresent()) {
+				// For file system files, construct the path using the path to
+				// the file system, the parent path, and the file name. FileSystem
+				// objects are cached so this is unlikely to perform any
+				// database operations.
+				StringBuilder sb = new StringBuilder();
+				sb.append(getFileSystem().getUniquePath());
+				if (! parentPath.isEmpty()) {
+					sb.append(parentPath);
 				} else {
-					uniquePath =  "/" + getName();
+					// The parent path may not be set in older cases.
+					sb.append("/");
 				}
+				sb.append(getName());
+				uniquePath = sb.toString();
 			} else {
-				uniquePath = super.getUniquePath();
+				if ((this instanceof LayoutFile) && (parentPath.equals("/"))) {
+					// This may be the case where the layout file is a direct child of a 
+					// volume. We want to make sure to include the volume information if present,
+					// so go up the directory structure instead of using the optimized code.
+					uniquePath = super.getUniquePath();
+				} else if (getName().equals(VirtualDirectory.NAME_CARVED) || getName().equals(VirtualDirectory.NAME_UNALLOC) || 
+						parentPath.startsWith("/" + VirtualDirectory.NAME_CARVED) || parentPath.startsWith("/" + VirtualDirectory.NAME_UNALLOC)) {
+					// We can make $Unalloc and $CarvedFiles under volumes without being part of a file system.
+					// As above, we want to make sure to include the volume information if present,
+					// so go up the directory structure instead of using the optimized code.
+					uniquePath = super.getUniquePath();
+				} else {
+					// Optimized code to use for most files. Construct the path
+					// using the data source name, the parent path, and the file name.
+					// DataSource objects are cached so this is unlikely to perform any
+				    // database operations.
+					String dataSourceName = "";
+					Content dataSource = getDataSource();
+					if (dataSource != null) {
+					  dataSourceName = dataSource.getUniquePath(); 
+					}
+					if (! parentPath.isEmpty()) {
+						uniquePath = dataSourceName + parentPath + getName();
+					} else {
+						// The parent path may not be set in older cases.
+						uniquePath = dataSourceName + "/" + getName();
+					}
+				}
 			}
 		}
-
 		return uniquePath;
 	}
 
@@ -1441,92 +1666,6 @@ public DataArtifact newDataArtifact(BlackboardArtifact.Type artifactType, Collec
 		return super.newDataArtifact(artifactType, attributesList, getOsAccountObjectId().orElse(null));
 	}
 
-	/**
-	 * Initializes common fields used by AbstactFile implementations (objects in
-	 * tsk_files table)
-	 *
-	 * @param db         case / db handle where this file belongs to
-	 * @param objId      object id in tsk_objects table
-	 * @param attrType
-	 * @param attrId
-	 * @param name       name field of the file
-	 * @param fileType   type of the file
-	 * @param metaAddr
-	 * @param metaSeq
-	 * @param dirType
-	 * @param metaType
-	 * @param dirFlag
-	 * @param metaFlags
-	 * @param size
-	 * @param ctime
-	 * @param crtime
-	 * @param atime
-	 * @param mtime
-	 * @param modes
-	 * @param uid
-	 * @param gid
-	 * @param md5Hash    md5sum of the file, or null or "NULL" if not present
-	 * @param knownState knownState status of the file, or null if unknown
-	 *                   (default)
-	 * @param parentPath
-	 *
-	 * @deprecated Do not make subclasses outside of this package.
-	 */
-	@Deprecated
-	@SuppressWarnings("deprecation")
-	protected AbstractFile(SleuthkitCase db, long objId, TskData.TSK_FS_ATTR_TYPE_ENUM attrType, short attrId,
-			String name, TskData.TSK_DB_FILES_TYPE_ENUM fileType, long metaAddr, int metaSeq,
-			TSK_FS_NAME_TYPE_ENUM dirType, TSK_FS_META_TYPE_ENUM metaType, TSK_FS_NAME_FLAG_ENUM dirFlag, short metaFlags,
-			long size, long ctime, long crtime, long atime, long mtime, short modes, int uid, int gid, String md5Hash, FileKnown knownState,
-			String parentPath) {
-		this(db, objId, db.getDataSourceObjectId(objId), attrType, (int) attrId, name, fileType, metaAddr, metaSeq, dirType, metaType, dirFlag, metaFlags, size, ctime, crtime, atime, mtime, modes, uid, gid, md5Hash, null, knownState, parentPath, null, null, OsAccount.NO_OWNER_ID, OsAccount.NO_ACCOUNT, Collections.emptyList());
-	}
-
-	/**
-	 * Initializes common fields used by AbstactFile implementations (objects in
-	 * tsk_files table). This deprecated version has attrId filed defined as a
-	 * short which has since been changed to an int.
-	 *
-	 * @param db                 case / db handle where this file belongs to
-	 * @param objId              object id in tsk_objects table
-	 * @param dataSourceObjectId The object id of the root data source of this
-	 *                           file.
-	 * @param attrType
-	 * @param attrId
-	 * @param name               name field of the file
-	 * @param fileType           type of the file
-	 * @param metaAddr
-	 * @param metaSeq
-	 * @param dirType
-	 * @param metaType
-	 * @param dirFlag
-	 * @param metaFlags
-	 * @param size
-	 * @param ctime
-	 * @param crtime
-	 * @param atime
-	 * @param mtime
-	 * @param modes
-	 * @param uid
-	 * @param gid
-	 * @param md5Hash            md5sum of the file, or null or "NULL" if not
-	 *                           present
-	 * @param knownState         knownState status of the file, or null if
-	 *                           unknown (default)
-	 * @param parentPath
-	 * @param mimeType           The MIME type of the file, can be null
-	 *
-	 * @deprecated Do not make subclasses outside of this package.
-	 */
-	@Deprecated
-	@SuppressWarnings("deprecation")
-	AbstractFile(SleuthkitCase db, long objId, long dataSourceObjectId, TskData.TSK_FS_ATTR_TYPE_ENUM attrType, short attrId,
-			String name, TskData.TSK_DB_FILES_TYPE_ENUM fileType, long metaAddr, int metaSeq, TSK_FS_NAME_TYPE_ENUM dirType, TSK_FS_META_TYPE_ENUM metaType,
-			TSK_FS_NAME_FLAG_ENUM dirFlag, short metaFlags, long size, long ctime, long crtime, long atime, long mtime, short modes,
-			int uid, int gid, String md5Hash, FileKnown knownState, String parentPath, String mimeType) {
-		this(db, objId, dataSourceObjectId, attrType, (int) attrId, name, fileType, metaAddr, metaSeq, dirType, metaType, dirFlag, metaFlags, size, ctime, crtime, atime, mtime, modes, uid, gid, md5Hash, null, knownState, parentPath, null, null, OsAccount.NO_OWNER_ID, OsAccount.NO_ACCOUNT, Collections.emptyList());
-	}
-
 	/**
 	 * Get the attribute id
 	 *
@@ -1612,4 +1751,74 @@ public static String epochToTime(long epoch, TimeZone tzone) {
 	public static long timeToEpoch(String time) {
 		return TimeUtilities.timeToEpoch(time);
 	}
+	
+		/**
+	 * Initializes common fields used by AbstactFile implementations (objects in
+	 * tsk_files table)
+	 *
+	 * @param db                 case / db handle where this file belongs to
+	 * @param objId              object id in tsk_objects table
+	 * @param dataSourceObjectId The object id of the root data source of this
+	 *                           file.
+	 * @param fileSystemObjectId The object id of the file system. Can be null (or 0 representing null)
+	 * @param attrType
+	 * @param attrId
+	 * @param name               name field of the file
+	 * @param fileType           type of the file
+	 * @param metaAddr
+	 * @param metaSeq
+	 * @param dirType
+	 * @param metaType
+	 * @param dirFlag
+	 * @param metaFlags
+	 * @param size
+	 * @param ctime
+	 * @param crtime
+	 * @param atime
+	 * @param mtime
+	 * @param modes
+	 * @param uid
+	 * @param gid
+	 * @param md5Hash            md5sum of the file, or null if not present
+	 * @param sha256Hash         sha256 hash of the file, or null if not present
+	 * @param sha1Hash           SHA-1 hash of the file, or null if not present
+	 * @param knownState         knownState status of the file, or null if
+	 *                           unknown (default)
+	 * @param parentPath
+	 * @param mimeType           The MIME type of the file, can be null.
+	 * @param extension          The extension part of the file name (not
+	 *                           including the '.'), can be null.
+	 * @param ownerUid           Owner uid/SID, can be null if not available.
+	 * @param osAccountObjectId	 Object Id of the owner OsAccount, may be null.
+	 *
+	 * @deprecated
+	 */
+	@Deprecated
+	AbstractFile(SleuthkitCase db,
+			long objId,
+			long dataSourceObjectId,
+			Long fileSystemObjectId,
+			TskData.TSK_FS_ATTR_TYPE_ENUM attrType, int attrId,
+			String name,
+			TskData.TSK_DB_FILES_TYPE_ENUM fileType,
+			long metaAddr, int metaSeq,
+			TSK_FS_NAME_TYPE_ENUM dirType, TSK_FS_META_TYPE_ENUM metaType,
+			TSK_FS_NAME_FLAG_ENUM dirFlag, short metaFlags,
+			long size,
+			long ctime, long crtime, long atime, long mtime,
+			short modes,
+			int uid, int gid,
+			String md5Hash, String sha256Hash, String sha1Hash, 
+			FileKnown knownState,
+			String parentPath,
+			String mimeType,
+			String extension,
+			String ownerUid,
+			Long osAccountObjectId,
+			List<Attribute> fileAttributes) {
+		this(db, objId, dataSourceObjectId, fileSystemObjectId, attrType, attrId, name, fileType, metaAddr, metaSeq, 
+				dirType, metaType, dirFlag, metaFlags, size, ctime, crtime, atime, mtime, modes, uid, gid, 
+				md5Hash, sha256Hash, sha1Hash, knownState, parentPath, mimeType, extension, 
+				ownerUid, osAccountObjectId, TskData.CollectedStatus.UNKNOWN, fileAttributes);
+	}
 }
diff --git a/bindings/java/src/org/sleuthkit/datamodel/Blackboard.java b/bindings/java/src/org/sleuthkit/datamodel/Blackboard.java
old mode 100644
new mode 100755
index 6df67e88659a184ef3d82dc39da5ab9b410e7bc5..1826e40793702e3b59858f4fb321fdbe79b86b5f
--- a/bindings/java/src/org/sleuthkit/datamodel/Blackboard.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/Blackboard.java
@@ -18,24 +18,33 @@
  */
 package org.sleuthkit.datamodel;
 
+import com.google.common.annotations.Beta;
 import com.google.common.collect.ImmutableSet;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
+import java.text.MessageFormat;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.Objects;
+import java.util.Optional;
 import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.logging.Level;
 import java.util.logging.Logger;
 import java.util.stream.Collectors;
 import org.sleuthkit.datamodel.SleuthkitCase.CaseDbConnection;
 import org.sleuthkit.datamodel.SleuthkitCase.CaseDbTransaction;
+import static org.sleuthkit.datamodel.SleuthkitCase.closeConnection;
+import static org.sleuthkit.datamodel.SleuthkitCase.closeResultSet;
+import static org.sleuthkit.datamodel.SleuthkitCase.closeStatement;
 
 /**
  * A representation of the blackboard, a place where artifacts and their
@@ -45,6 +54,20 @@ public final class Blackboard {
 
 	private static final Logger LOGGER = Logger.getLogger(Blackboard.class.getName());
 
+	/*
+	 * ConcurrentHashMap semantics are fine for these caches to which entries
+	 * are added, but never removed. There is also no need to keep each pair of
+	 * related caches strictly consistent with each other, because cache misses
+	 * will be extremely rare (standard types are loaded when the case is
+	 * opened), and the cost of a cache miss is low.
+	 */
+	private final Map<Integer, BlackboardArtifact.Type> typeIdToArtifactTypeMap = new ConcurrentHashMap<>();
+	private final Map<Integer, BlackboardAttribute.Type> typeIdToAttributeTypeMap = new ConcurrentHashMap<>();
+	private final Map<String, BlackboardArtifact.Type> typeNameToArtifactTypeMap = new ConcurrentHashMap<>();
+	private final Map<String, BlackboardAttribute.Type> typeNameToAttributeTypeMap = new ConcurrentHashMap<>();
+
+	static final int MIN_USER_DEFINED_TYPE_ID = 10000;
+
 	private final SleuthkitCase caseDb;
 
 	/**
@@ -58,49 +81,88 @@ public final class Blackboard {
 	}
 
 	/**
-	 * Posts the artifact. The artifact should be complete (all attributes have
-	 * been added) before being posted. Posting the artifact includes making any
-	 * timeline events that may be derived from it, and broadcasting a
-	 * notification that the artifact is ready for further analysis.
+	 * Posts an artifact to the blackboard. The artifact should be complete (all
+	 * attributes have been added) before it is posted. Posting the artifact
+	 * triggers the creation of appropriate timeline events, if any, and
+	 * broadcast of a notification that the artifact is ready for further
+	 * analysis.
 	 *
-	 * @param artifact   The artifact to be posted.
-	 * @param moduleName The name of the module that is posting the artifacts.
+	 * @param artifact   The artifact.
+	 * @param moduleName The display name of the module posting the artifact.
 	 *
-	 * @throws BlackboardException If there is a problem posting the artifact.
+	 * @throws BlackboardException The exception is thrown if there is an issue
+	 *                             posting the artifact.
+	 * @deprecated Use postArtifact(BlackboardArtifact artifact, String
+	 * moduleName, Long ingestJobId) instead.
 	 */
+	@Deprecated
 	public void postArtifact(BlackboardArtifact artifact, String moduleName) throws BlackboardException {
-		postArtifacts(Collections.singleton(artifact), moduleName);
+		postArtifacts(Collections.singleton(artifact), moduleName, null);
 	}
 
 	/**
-	 * Posts a Collection of artifacts. The artifacts should be complete (all
-	 * attributes have been added) before being posted. Posting the artifacts
-	 * includes making any events that may be derived from them, and
-	 * broadcasting notifications that the artifacts are ready for further
+	 * Posts a collection of artifacts to the blackboard. The artifacts should
+	 * be complete (all attributes have been added) before they are posted.
+	 * Posting the artifacts triggers the creation of appropriate timeline
+	 * events, if any, and broadcast of a notification that the artifacts are
+	 * ready for further analysis.
+	 *
+	 * @param artifacts  The artifacts.
+	 * @param moduleName The display name of the module posting the artifacts.
+	 *
+	 * @throws BlackboardException The exception is thrown if there is an issue
+	 *                             posting the artifact.
+	 * @deprecated postArtifacts(Collection\<BlackboardArtifact\> artifacts,
+	 * String moduleName, Long ingestJobId)
+	 */
+	@Deprecated
+	public void postArtifacts(Collection<BlackboardArtifact> artifacts, String moduleName) throws BlackboardException {
+		postArtifacts(artifacts, moduleName, null);
+	}
+
+	/**
+	 * Posts an artifact to the blackboard. The artifact should be complete (all
+	 * attributes have been added) before it is posted. Posting the artifact
+	 * triggers the creation of appropriate timeline events, if any, and
+	 * broadcast of a notification that the artifact is ready for further
 	 * analysis.
 	 *
+	 * @param artifact    The artifact.
+	 * @param moduleName  The display name of the module posting the artifact.
+	 * @param ingestJobId The numeric identifier of the ingest job for which the
+	 *                    artifact was posted, may be null.
 	 *
-	 * @param artifacts  The artifacts to be posted .
-	 * @param moduleName The name of the module that is posting the artifacts.
-	 *
-	 *
-	 * @throws BlackboardException If there is a problem posting the artifacts.
-	 *
+	 * @throws BlackboardException The exception is thrown if there is an issue
+	 *                             posting the artifact.
 	 */
-	public void postArtifacts(Collection<BlackboardArtifact> artifacts, String moduleName) throws BlackboardException {
-		/*
-		 * For now this just processes them one by one, but in the future it
-		 * could be smarter and use transactions, etc.
-		 */
+	public void postArtifact(BlackboardArtifact artifact, String moduleName, Long ingestJobId) throws BlackboardException {
+		postArtifacts(Collections.singleton(artifact), moduleName, ingestJobId);
+	}
+
+	/**
+	 * Posts a collection of artifacts to the blackboard. The artifacts should
+	 * be complete (all attributes have been added) before they are posted.
+	 * Posting the artifacts triggers the creation of appropriate timeline
+	 * events, if any, and broadcast of a notification that the artifacts are
+	 * ready for further analysis.
+	 *
+	 * @param artifacts   The artifacts.
+	 * @param moduleName  The display name of the module posting the artifacts.
+	 * @param ingestJobId The numeric identifier of the ingest job for which the
+	 *                    artifacts were posted, may be null.
+	 *
+	 * @throws BlackboardException The exception is thrown if there is an issue
+	 *                             posting the artifact.
+	 */
+	public void postArtifacts(Collection<BlackboardArtifact> artifacts, String moduleName, Long ingestJobId) throws BlackboardException {
 		for (BlackboardArtifact artifact : artifacts) {
 			try {
 				caseDb.getTimelineManager().addArtifactEvents(artifact);
 			} catch (TskCoreException ex) {
-				throw new BlackboardException("Failed to add events for artifact: " + artifact, ex);
+				throw new BlackboardException(String.format("Failed to add events to timeline for artifact '%s'", artifact), ex);
 			}
 		}
-
-		caseDb.fireTSKEvent(new ArtifactsPostedEvent(artifacts, moduleName));
+		caseDb.fireTSKEvent(new ArtifactsPostedEvent(artifacts, moduleName, ingestJobId));
 	}
 
 	/**
@@ -118,7 +180,6 @@ public void postArtifacts(Collection<BlackboardArtifact> artifacts, String modul
 	 *                             artifact type.
 	 */
 	public BlackboardArtifact.Type getOrAddArtifactType(String typeName, String displayName) throws BlackboardException {
-
 		return getOrAddArtifactType(typeName, displayName, BlackboardArtifact.Category.DATA_ARTIFACT);
 	}
 
@@ -139,17 +200,592 @@ public BlackboardArtifact.Type getOrAddArtifactType(String typeName, String disp
 		if (category == null) {
 			throw new BlackboardException("Category provided must be non-null");
 		}
-		
+
+		if (typeNameToArtifactTypeMap.containsKey(typeName)) {
+			return typeNameToArtifactTypeMap.get(typeName);
+		}
+
+		Statement s = null;
+		ResultSet rs = null;
+		CaseDbTransaction trans = null;
 		try {
-			return caseDb.addBlackboardArtifactType(typeName, displayName, category);
-		} catch (TskDataException typeExistsEx) {
+			trans = caseDb.beginTransaction();
+
+			CaseDbConnection connection = trans.getConnection();
+			s = connection.createStatement();
+			rs = connection.executeQuery(s, "SELECT artifact_type_id FROM blackboard_artifact_types WHERE type_name = '" + typeName + "'"); //NON-NLS
+			if (!rs.next()) {
+				rs.close();
+				rs = connection.executeQuery(s, "SELECT MAX(artifact_type_id) AS highest_id FROM blackboard_artifact_types");
+				int maxID = 0;
+				if (rs.next()) {
+					maxID = rs.getInt("highest_id");
+					if (maxID < MIN_USER_DEFINED_TYPE_ID) {
+						maxID = MIN_USER_DEFINED_TYPE_ID;
+					} else {
+						maxID++;
+					}
+				}
+				connection.executeUpdate(s, "INSERT INTO blackboard_artifact_types (artifact_type_id, type_name, display_name, category_type) VALUES ('" + maxID + "', '" + typeName + "', '" + displayName + "', " + category.getID() + " )"); //NON-NLS
+				BlackboardArtifact.Type type = new BlackboardArtifact.Type(maxID, typeName, displayName, category);
+				this.typeIdToArtifactTypeMap.put(type.getTypeID(), type);
+				this.typeNameToArtifactTypeMap.put(type.getTypeName(), type);
+				trans.commit();
+				trans = null;
+				return type;
+			} else {
+				trans.commit();
+				trans = null;
+				try {
+					return getArtifactType(typeName);
+				} catch (TskCoreException ex) {
+					throw new BlackboardException("Failed to get or add artifact type: " + typeName, ex);
+				}
+			}
+		} catch (SQLException | TskCoreException ex) {
 			try {
-				return caseDb.getArtifactType(typeName);
-			} catch (TskCoreException ex) {
-				throw new BlackboardException("Failed to get or add artifact type", ex);
+				if (trans != null) {
+					trans.rollback();
+					trans = null;
+				}
+			} catch (TskCoreException ex2) {
+				LOGGER.log(Level.SEVERE, "Error rolling back transaction", ex2);
 			}
-		} catch (TskCoreException ex) {
-			throw new BlackboardException("Failed to get or add artifact type", ex);
+			throw new BlackboardException("Error adding artifact type: " + typeName, ex);
+		} finally {
+			closeResultSet(rs);
+			closeStatement(s);
+			if (trans != null) {
+				try {
+					trans.rollback();
+				} catch (TskCoreException ex) {
+					throw new BlackboardException("Error rolling back transaction", ex);
+				}
+			}
+		}
+	}
+
+	/**
+	 * Get the attribute type associated with an attribute type name.
+	 *
+	 * @param attrTypeName An attribute type name.
+	 *
+	 * @return An attribute type or null if the attribute type does not exist.
+	 *
+	 * @throws TskCoreException If an error occurs accessing the case database.
+	 *
+	 */
+	public BlackboardAttribute.Type getAttributeType(String attrTypeName) throws TskCoreException {
+		if (this.typeNameToAttributeTypeMap.containsKey(attrTypeName)) {
+			return this.typeNameToAttributeTypeMap.get(attrTypeName);
+		}
+		CaseDbConnection connection = null;
+		Statement s = null;
+		ResultSet rs = null;
+		caseDb.acquireSingleUserCaseReadLock();
+		try {
+			connection = caseDb.getConnection();
+			s = connection.createStatement();
+			rs = connection.executeQuery(s, "SELECT attribute_type_id, type_name, display_name, value_type FROM blackboard_attribute_types WHERE type_name = '" + attrTypeName + "'"); //NON-NLS
+			BlackboardAttribute.Type type = null;
+			if (rs.next()) {
+				type = new BlackboardAttribute.Type(rs.getInt("attribute_type_id"), rs.getString("type_name"),
+						rs.getString("display_name"), BlackboardAttribute.TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.fromType(rs.getLong("value_type")));
+				this.typeIdToAttributeTypeMap.put(type.getTypeID(), type);
+				this.typeNameToAttributeTypeMap.put(attrTypeName, type);
+			}
+			return type;
+		} catch (SQLException ex) {
+			throw new TskCoreException("Error getting attribute type id", ex);
+		} finally {
+			closeResultSet(rs);
+			closeStatement(s);
+			closeConnection(connection);
+			caseDb.releaseSingleUserCaseReadLock();
+		}
+	}
+
+	/**
+	 * Get the attribute type associated with an attribute type ID.
+	 *
+	 * @param typeID An attribute type ID.
+	 *
+	 * @return An attribute type or null if the attribute type does not exist.
+	 *
+	 * @throws TskCoreException If an error occurs accessing the case database.
+	 *
+	 */
+	BlackboardAttribute.Type getAttributeType(int typeID) throws TskCoreException {
+		if (this.typeIdToAttributeTypeMap.containsKey(typeID)) {
+			return this.typeIdToAttributeTypeMap.get(typeID);
+		}
+		CaseDbConnection connection = null;
+		Statement s = null;
+		ResultSet rs = null;
+		caseDb.acquireSingleUserCaseReadLock();
+		try {
+			connection = caseDb.getConnection();
+			s = connection.createStatement();
+			rs = connection.executeQuery(s, "SELECT attribute_type_id, type_name, display_name, value_type FROM blackboard_attribute_types WHERE attribute_type_id = " + typeID + ""); //NON-NLS
+			BlackboardAttribute.Type type = null;
+			if (rs.next()) {
+				type = new BlackboardAttribute.Type(rs.getInt("attribute_type_id"), rs.getString("type_name"),
+						rs.getString("display_name"), BlackboardAttribute.TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.fromType(rs.getLong("value_type")));
+				this.typeIdToAttributeTypeMap.put(typeID, type);
+				this.typeNameToAttributeTypeMap.put(type.getTypeName(), type);
+			}
+			return type;
+		} catch (SQLException ex) {
+			throw new TskCoreException("Error getting attribute type id", ex);
+		} finally {
+			closeResultSet(rs);
+			closeStatement(s);
+			closeConnection(connection);
+			caseDb.releaseSingleUserCaseReadLock();
+		}
+	}
+
+	/**
+	 * Get the artifact type associated with an artifact type name.
+	 *
+	 * @param artTypeName An artifact type name.
+	 *
+	 * @return An artifact type or null if the artifact type does not exist.
+	 *
+	 * @throws TskCoreException If an error occurs accessing the case database.
+	 *
+	 */
+	public BlackboardArtifact.Type getArtifactType(String artTypeName) throws TskCoreException {
+		if (this.typeNameToArtifactTypeMap.containsKey(artTypeName)) {
+			return this.typeNameToArtifactTypeMap.get(artTypeName);
+		}
+		CaseDbConnection connection = null;
+		Statement s = null;
+		ResultSet rs = null;
+		caseDb.acquireSingleUserCaseReadLock();
+		try {
+			connection = caseDb.getConnection();
+			s = connection.createStatement();
+			rs = connection.executeQuery(s, "SELECT artifact_type_id, type_name, display_name, category_type FROM blackboard_artifact_types WHERE type_name = '" + artTypeName + "'"); //NON-NLS
+			BlackboardArtifact.Type type = null;
+			if (rs.next()) {
+				type = new BlackboardArtifact.Type(rs.getInt("artifact_type_id"),
+						rs.getString("type_name"), rs.getString("display_name"),
+						BlackboardArtifact.Category.fromID(rs.getInt("category_type")));
+				this.typeIdToArtifactTypeMap.put(type.getTypeID(), type);
+				this.typeNameToArtifactTypeMap.put(artTypeName, type);
+			}
+			return type;
+		} catch (SQLException ex) {
+			throw new TskCoreException("Error getting artifact type from the database", ex);
+		} finally {
+			closeResultSet(rs);
+			closeStatement(s);
+			closeConnection(connection);
+			caseDb.releaseSingleUserCaseReadLock();
+		}
+	}
+
+	/**
+	 * Get the artifact type associated with an artifact type id.
+	 *
+	 * @param artTypeId An artifact type id.
+	 *
+	 * @return The artifact type.
+	 *
+	 * @throws TskCoreException If an error occurs accessing the case database
+	 *                          or no value is found.
+	 *
+	 */
+	public BlackboardArtifact.Type getArtifactType(int artTypeId) throws TskCoreException {
+		if (this.typeIdToArtifactTypeMap.containsKey(artTypeId)) {
+			return typeIdToArtifactTypeMap.get(artTypeId);
+		}
+		CaseDbConnection connection = null;
+		Statement s = null;
+		ResultSet rs = null;
+		caseDb.acquireSingleUserCaseReadLock();
+		try {
+			connection = caseDb.getConnection();
+			s = connection.createStatement();
+			rs = connection.executeQuery(s, "SELECT artifact_type_id, type_name, display_name, category_type FROM blackboard_artifact_types WHERE artifact_type_id = " + artTypeId + ""); //NON-NLS
+			BlackboardArtifact.Type type = null;
+			if (rs.next()) {
+				type = new BlackboardArtifact.Type(rs.getInt("artifact_type_id"),
+						rs.getString("type_name"), rs.getString("display_name"),
+						BlackboardArtifact.Category.fromID(rs.getInt("category_type")));
+				this.typeIdToArtifactTypeMap.put(artTypeId, type);
+				this.typeNameToArtifactTypeMap.put(type.getTypeName(), type);
+				return type;
+			} else {
+				throw new TskCoreException("No artifact type found matching id: " + artTypeId);
+			}
+		} catch (SQLException ex) {
+			throw new TskCoreException("Error getting artifact type from the database", ex);
+		} finally {
+			closeResultSet(rs);
+			closeStatement(s);
+			closeConnection(connection);
+			caseDb.releaseSingleUserCaseReadLock();
+		}
+	}
+
+	/**
+	 * Get the list of attributes for the given artifact.
+	 *
+	 * @param artifact The artifact to load attributes for.
+	 *
+	 * @return The list of attributes.
+	 *
+	 * @throws TskCoreException
+	 */
+	public ArrayList<BlackboardAttribute> getBlackboardAttributes(final BlackboardArtifact artifact) throws TskCoreException {
+		CaseDbConnection connection = null;
+		Statement statement = null;
+		ResultSet rs = null;
+		
+		String rowId;
+		switch (caseDb.getDatabaseType()) {
+			case POSTGRESQL: 
+				rowId = "attrs.CTID";
+				break;
+			case SQLITE:
+				rowId = "attrs.ROWID";
+				break;
+			default:
+				throw new TskCoreException("Unknown database type: " + caseDb.getDatabaseType());
+		}
+		
+		caseDb.acquireSingleUserCaseReadLock();
+		try {
+			connection = caseDb.getConnection();
+			statement = connection.createStatement();
+			rs = connection.executeQuery(statement, "SELECT attrs.artifact_id AS artifact_id, "
+					+ "attrs.source AS source, attrs.context AS context, attrs.attribute_type_id AS attribute_type_id, "
+					+ "attrs.value_type AS value_type, attrs.value_byte AS value_byte, "
+					+ "attrs.value_text AS value_text, attrs.value_int32 AS value_int32, "
+					+ "attrs.value_int64 AS value_int64, attrs.value_double AS value_double, "
+					+ "types.type_name AS type_name, types.display_name AS display_name "
+					+ "FROM blackboard_attributes AS attrs, blackboard_attribute_types AS types WHERE attrs.artifact_id = " + artifact.getArtifactID()
+					+ " AND attrs.attribute_type_id = types.attribute_type_id " 
+					+ " ORDER BY " + rowId);
+			ArrayList<BlackboardAttribute> attributes = new ArrayList<>();
+			while (rs.next()) {
+				final BlackboardAttribute attr = createAttributeFromResultSet(rs);
+				attr.setParentDataSourceID(artifact.getDataSourceObjectID());
+				attributes.add(attr);
+			}
+			return attributes;
+		} catch (SQLException ex) {
+			throw new TskCoreException("Error getting attributes for artifact, artifact id = " + artifact.getArtifactID(), ex);
+		} finally {
+			closeResultSet(rs);
+			closeStatement(statement);
+			closeConnection(connection);
+			caseDb.releaseSingleUserCaseReadLock();
+		}
+	}
+
+	/**
+	 * Populate the attributes for all artifacts in the list. This is done using
+	 * one database call as an efficient way to load many artifacts/attributes
+	 * at once.
+	 *
+	 * @param arts The list of artifacts. When complete, each will have its
+	 *             attributes loaded.
+	 *
+	 * @throws org.sleuthkit.datamodel.TskCoreException
+	 */
+	@Beta
+	public <T extends BlackboardArtifact> void loadBlackboardAttributes(List<T> arts) throws TskCoreException {
+
+		if (arts.isEmpty()) {
+			return;
+		}
+
+		// Make a map of artifact ID to artifact
+		Map<Long, BlackboardArtifact> artifactMap = new HashMap<>();
+		for (BlackboardArtifact art : arts) {
+			artifactMap.put(art.getArtifactID(), art);
+		}
+
+		// Make a map of artifact ID to attribute list
+		Map<Long, List<BlackboardAttribute>> attributeMap = new HashMap<>();
+
+		// Get all artifact IDs as a comma-separated string
+		String idString = arts.stream().map(p -> Long.toString(p.getArtifactID())).collect(Collectors.joining(", "));
+
+		String rowId;
+		switch (caseDb.getDatabaseType()) {
+			case POSTGRESQL:
+				rowId = "attrs.CTID";
+				break;
+			case SQLITE:
+				rowId = "attrs.ROWID";
+				break;
+			default:
+				throw new TskCoreException("Unknown database type: " + caseDb.getDatabaseType());
+		}
+
+		// Get the attributes
+		CaseDbConnection connection = null;
+		Statement statement = null;
+		ResultSet rs = null;
+		caseDb.acquireSingleUserCaseReadLock();
+		try {
+			connection = caseDb.getConnection();
+			statement = connection.createStatement();
+			rs = connection.executeQuery(statement, "SELECT attrs.artifact_id AS artifact_id, "
+					+ "attrs.source AS source, attrs.context AS context, attrs.attribute_type_id AS attribute_type_id, "
+					+ "attrs.value_type AS value_type, attrs.value_byte AS value_byte, "
+					+ "attrs.value_text AS value_text, attrs.value_int32 AS value_int32, "
+					+ "attrs.value_int64 AS value_int64, attrs.value_double AS value_double, "
+					+ "types.type_name AS type_name, types.display_name AS display_name "
+					+ "FROM blackboard_attributes AS attrs, blackboard_attribute_types AS types WHERE attrs.artifact_id IN (" + idString + ") "
+					+ " AND attrs.attribute_type_id = types.attribute_type_id"
+					+ " ORDER BY " + rowId);
+			while (rs.next()) {
+				final BlackboardAttribute attr = createAttributeFromResultSet(rs);
+				attr.setParentDataSourceID(artifactMap.get(attr.getArtifactID()).getDataSourceObjectID());
+
+				// Collect the list of attributes for each artifact
+				if (!attributeMap.containsKey(attr.getArtifactID())) {
+					attributeMap.put(attr.getArtifactID(), new ArrayList<>());
+				}
+				attributeMap.get(attr.getArtifactID()).add(attr);
+			}
+
+			// Save the attributes to the artifacts
+			for (Long artifactID : attributeMap.keySet()) {
+				artifactMap.get(artifactID).setAttributes(attributeMap.get(artifactID));
+			}
+
+		} catch (SQLException ex) {
+			throw new TskCoreException("Error loading attributes", ex);
+		} finally {
+			closeResultSet(rs);
+			closeStatement(statement);
+			closeConnection(connection);
+			caseDb.releaseSingleUserCaseReadLock();
+		}
+	}
+
+	/**
+	 * Create a BlackboardAttribute artifact from the result set. Does not set
+	 * the data source ID.
+	 *
+	 * @param rs The result set.
+	 *
+	 * @return The corresponding BlackboardAttribute object.
+	 */
+	private BlackboardAttribute createAttributeFromResultSet(ResultSet rs) throws SQLException {
+		int attributeTypeId = rs.getInt("attribute_type_id");
+		String attributeTypeName = rs.getString("type_name");
+		BlackboardAttribute.Type attributeType;
+		if (this.typeIdToAttributeTypeMap.containsKey(attributeTypeId)) {
+			attributeType = this.typeIdToAttributeTypeMap.get(attributeTypeId);
+		} else {
+			attributeType = new BlackboardAttribute.Type(attributeTypeId, attributeTypeName,
+					rs.getString("display_name"),
+					BlackboardAttribute.TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.fromType(rs.getInt("value_type")));
+			this.typeIdToAttributeTypeMap.put(attributeTypeId, attributeType);
+			this.typeNameToAttributeTypeMap.put(attributeTypeName, attributeType);
+		}
+
+		return new BlackboardAttribute(
+				rs.getLong("artifact_id"),
+				attributeType,
+				rs.getString("source"),
+				rs.getString("context"),
+				rs.getInt("value_int32"),
+				rs.getLong("value_int64"),
+				rs.getDouble("value_double"),
+				rs.getString("value_text"),
+				rs.getBytes("value_byte"), caseDb
+		);
+	}
+
+	/**
+	 * Get the attributes associated with the given file.
+	 *
+	 * @param file
+	 *
+	 * @return
+	 *
+	 * @throws TskCoreException
+	 */
+	ArrayList<Attribute> getFileAttributes(final AbstractFile file) throws TskCoreException {
+		CaseDbConnection connection = null;
+		Statement statement = null;
+		ResultSet rs = null;
+		caseDb.acquireSingleUserCaseReadLock();
+		try {
+			connection = caseDb.getConnection();
+			statement = connection.createStatement();
+			rs = connection.executeQuery(statement, "SELECT attrs.id as id,  attrs.obj_id AS obj_id, "
+					+ "attrs.attribute_type_id AS attribute_type_id, "
+					+ "attrs.value_type AS value_type, attrs.value_byte AS value_byte, "
+					+ "attrs.value_text AS value_text, attrs.value_int32 AS value_int32, "
+					+ "attrs.value_int64 AS value_int64, attrs.value_double AS value_double, "
+					+ "types.type_name AS type_name, types.display_name AS display_name "
+					+ "FROM tsk_file_attributes AS attrs "
+					+ " INNER JOIN blackboard_attribute_types AS types "
+					+ " ON attrs.attribute_type_id = types.attribute_type_id "
+					+ " WHERE attrs.obj_id = " + file.getId());
+
+			ArrayList<Attribute> attributes = new ArrayList<Attribute>();
+			while (rs.next()) {
+				int attributeTypeId = rs.getInt("attribute_type_id");
+				String attributeTypeName = rs.getString("type_name");
+				BlackboardAttribute.Type attributeType;
+				if (this.typeIdToAttributeTypeMap.containsKey(attributeTypeId)) {
+					attributeType = this.typeIdToAttributeTypeMap.get(attributeTypeId);
+				} else {
+					attributeType = new BlackboardAttribute.Type(attributeTypeId, attributeTypeName,
+							rs.getString("display_name"),
+							BlackboardAttribute.TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.fromType(rs.getInt("value_type")));
+					this.typeIdToAttributeTypeMap.put(attributeTypeId, attributeType);
+					this.typeNameToAttributeTypeMap.put(attributeTypeName, attributeType);
+				}
+
+				final Attribute attr = new Attribute(
+						rs.getLong("id"),
+						rs.getLong("obj_id"),
+						attributeType,
+						rs.getInt("value_int32"),
+						rs.getLong("value_int64"),
+						rs.getDouble("value_double"),
+						rs.getString("value_text"),
+						rs.getBytes("value_byte"), caseDb
+				);
+				attributes.add(attr);
+			}
+			return attributes;
+		} catch (SQLException ex) {
+			throw new TskCoreException("Error getting attributes for file, file id = " + file.getId(), ex);
+		} finally {
+			closeResultSet(rs);
+			closeStatement(statement);
+			closeConnection(connection);
+			caseDb.releaseSingleUserCaseReadLock();
+		}
+	}
+
+	/**
+	 * Adds the standard artifact types to the blackboard_artifact_types table
+	 * and the artifact type caches.
+	 *
+	 * @param connection A connection to the case database.
+	 *
+	 * @throws SQLException Thrown if there is an error adding a type to the
+	 *                      table.
+	 */
+	void initBlackboardArtifactTypes(CaseDbConnection connection) throws SQLException {
+		caseDb.acquireSingleUserCaseWriteLock();
+		try (Statement statement = connection.createStatement()) {
+			/*
+			 * Determine which types, if any, have already been added to the
+			 * case database, and load them into the type caches. For a case
+			 * that is being reopened, this should reduce the number of separate
+			 * INSERT staements that will be executed below.
+			 */
+			ResultSet resultSet = connection.executeQuery(statement, "SELECT artifact_type_id, type_name, display_name, category_type FROM blackboard_artifact_types"); //NON-NLS
+			while (resultSet.next()) {
+				BlackboardArtifact.Type type = new BlackboardArtifact.Type(resultSet.getInt("artifact_type_id"),
+						resultSet.getString("type_name"), resultSet.getString("display_name"),
+						BlackboardArtifact.Category.fromID(resultSet.getInt("category_type")));
+				typeIdToArtifactTypeMap.put(type.getTypeID(), type);
+				typeNameToArtifactTypeMap.put(type.getTypeName(), type);
+			}
+
+			/*
+			 * INSERT any missing standard types. A conflict clause is used to
+			 * avoid a potential race condition. It also eliminates the need to
+			 * add schema update code when new types are added.
+			 *
+			 * The use here of the soon to be deprecated
+			 * BlackboardArtifact.ARTIFACT_TYPE enum instead of the
+			 * BlackboardArtifact.Type.STANDARD_TYPES collection currently
+			 * ensures that the deprecated types in the former, and not in the
+			 * latter, are added to the case database.
+			 */
+			for (BlackboardArtifact.ARTIFACT_TYPE type : BlackboardArtifact.ARTIFACT_TYPE.values()) {
+				if (typeIdToArtifactTypeMap.containsKey(type.getTypeID())) {
+					continue;
+				}
+				if (caseDb.getDatabaseType() == TskData.DbType.POSTGRESQL) {
+					statement.execute("INSERT INTO blackboard_artifact_types (artifact_type_id, type_name, display_name, category_type) VALUES (" + type.getTypeID() + " , '" + type.getLabel() + "', '" + type.getDisplayName() + "' , " + type.getCategory().getID() + ") ON CONFLICT DO NOTHING"); //NON-NLS
+				} else {
+					statement.execute("INSERT OR IGNORE INTO blackboard_artifact_types (artifact_type_id, type_name, display_name, category_type) VALUES (" + type.getTypeID() + " , '" + type.getLabel() + "', '" + type.getDisplayName() + "' , " + type.getCategory().getID() + ")"); //NON-NLS
+				}
+				typeIdToArtifactTypeMap.put(type.getTypeID(), new BlackboardArtifact.Type(type));
+				typeNameToArtifactTypeMap.put(type.getLabel(), new BlackboardArtifact.Type(type));
+			}
+			if (caseDb.getDatabaseType() == TskData.DbType.POSTGRESQL) {
+				int newPrimaryKeyIndex = Collections.max(Arrays.asList(BlackboardArtifact.ARTIFACT_TYPE.values())).getTypeID() + 1;
+				statement.execute("ALTER SEQUENCE blackboard_artifact_types_artifact_type_id_seq RESTART WITH " + newPrimaryKeyIndex); //NON-NLS
+			}
+		} finally {
+			caseDb.releaseSingleUserCaseWriteLock();
+		}
+	}
+
+	/**
+	 * Adds the standard attribute types to the blackboard_attribute_types table
+	 * and the attribute type caches.
+	 *
+	 * @param connection A connection to the case database.
+	 *
+	 * @throws SQLException Thrown if there is an error adding a type to the
+	 *                      table.
+	 */
+	void initBlackboardAttributeTypes(CaseDbConnection connection) throws SQLException {
+		caseDb.acquireSingleUserCaseWriteLock();
+		try (Statement statement = connection.createStatement()) {
+			/*
+			 * Determine which types, if any, have already been added to the
+			 * case database, and load them into the type caches. For a case
+			 * that is being reopened, this should reduce the number of separate
+			 * INSERT staements that will be executed below.
+			 */
+			ResultSet resultSet = connection.executeQuery(statement, "SELECT attribute_type_id, type_name, display_name, value_type FROM blackboard_attribute_types"); //NON-NLS
+			while (resultSet.next()) {
+				BlackboardAttribute.Type type = new BlackboardAttribute.Type(resultSet.getInt("attribute_type_id"),
+						resultSet.getString("type_name"), resultSet.getString("display_name"),
+						BlackboardAttribute.TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.fromType(resultSet.getLong("value_type")));
+				typeIdToAttributeTypeMap.put(type.getTypeID(), type);
+				typeNameToAttributeTypeMap.put(type.getTypeName(), type);
+			}
+
+			/*
+			 * INSERT any missing standard types. A conflict clause is used to
+			 * avoid a potential race condition. It also eliminates the need to
+			 * add schema update code when new types are added.
+			 *
+			 * The use here of the soon to be deprecated
+			 * BlackboardAttribute.ATTRIBUTE_TYPE enum instead of the
+			 * BlackboardAttribute.Type.STANDARD_TYPES collection currently
+			 * ensures that the deprecated types in the former, and not in the
+			 * latter, are added to the case database.
+			 */
+			for (BlackboardAttribute.ATTRIBUTE_TYPE type : BlackboardAttribute.ATTRIBUTE_TYPE.values()) {
+				if (typeIdToAttributeTypeMap.containsKey(type.getTypeID())) {
+					continue;
+				}
+				if (caseDb.getDatabaseType() == TskData.DbType.POSTGRESQL) {
+					statement.execute("INSERT INTO blackboard_attribute_types (attribute_type_id, type_name, display_name, value_type) VALUES (" + type.getTypeID() + ", '" + type.getLabel() + "', '" + type.getDisplayName() + "', '" + type.getValueType().getType() + "') ON CONFLICT DO NOTHING"); //NON-NLS
+				} else {
+					statement.execute("INSERT OR IGNORE INTO blackboard_attribute_types (attribute_type_id, type_name, display_name, value_type) VALUES (" + type.getTypeID() + ", '" + type.getLabel() + "', '" + type.getDisplayName() + "', '" + type.getValueType().getType() + "')"); //NON-NLS
+				}
+				typeIdToAttributeTypeMap.put(type.getTypeID(), new BlackboardAttribute.Type(type));
+				typeNameToAttributeTypeMap.put(type.getLabel(), new BlackboardAttribute.Type(type));
+			}
+			if (caseDb.getDatabaseType() == TskData.DbType.POSTGRESQL) {
+				int newPrimaryKeyIndex = Collections.max(Arrays.asList(BlackboardAttribute.ATTRIBUTE_TYPE.values())).getTypeID() + 1;
+				statement.execute("ALTER SEQUENCE blackboard_attribute_types_attribute_type_id_seq RESTART WITH " + newPrimaryKeyIndex); //NON-NLS
+			}
+		} finally {
+			caseDb.releaseSingleUserCaseWriteLock();
 		}
 	}
 
@@ -296,7 +932,7 @@ public Score deleteAnalysisResult(AnalysisResult analysisResult) throws TskCoreE
 	 */
 	public Score deleteAnalysisResult(long artifactObjId, CaseDbTransaction transaction) throws TskCoreException {
 
-		List<AnalysisResult> analysisResults = getAnalysisResultsWhere(" arts.artifact_obj_id = " + artifactObjId, transaction.getConnection());
+		List<AnalysisResult> analysisResults = getAnalysisResultsWhere(" artifacts.artifact_obj_id = " + artifactObjId, transaction.getConnection());
 
 		if (analysisResults.isEmpty()) {
 			throw new TskCoreException(String.format("Analysis Result not found for artifact obj id %d", artifactObjId));
@@ -342,21 +978,29 @@ private Score deleteAnalysisResult(AnalysisResult analysisResult, CaseDbTransact
 		}
 	}
 
-	private final static String ANALYSIS_RESULT_QUERY_STRING = "SELECT DISTINCT arts.artifact_id AS artifact_id, " //NON-NLS
-			+ " arts.obj_id AS obj_id, arts.artifact_obj_id AS artifact_obj_id, arts.data_source_obj_id AS data_source_obj_id, arts.artifact_type_id AS artifact_type_id, "
+	private final static String ANALYSIS_RESULT_QUERY_STRING_GENERIC = "SELECT DISTINCT artifacts.artifact_id AS artifact_id, " //NON-NLS
+			+ " artifacts.obj_id AS obj_id, artifacts.artifact_obj_id AS artifact_obj_id, artifacts.data_source_obj_id AS data_source_obj_id, artifacts.artifact_type_id AS artifact_type_id, "
 			+ " types.type_name AS type_name, types.display_name AS display_name, types.category_type as category_type,"//NON-NLS
-			+ " arts.review_status_id AS review_status_id, " //NON-NLS
+			+ " artifacts.review_status_id AS review_status_id, " //NON-NLS
 			+ " results.conclusion AS conclusion,  results.significance AS significance,  results.priority AS priority,  "
 			+ " results.configuration AS configuration,  results.justification AS justification "
-			+ " FROM blackboard_artifacts AS arts "
+			+ " FROM blackboard_artifacts AS artifacts "
 			+ " JOIN blackboard_artifact_types AS types " //NON-NLS
-			+ "		ON arts.artifact_type_id = types.artifact_type_id" //NON-NLS
+			+ "		ON artifacts.artifact_type_id = types.artifact_type_id" //NON-NLS
 			+ " LEFT JOIN tsk_analysis_results AS results "
-			+ "		ON arts.artifact_obj_id = results.artifact_obj_id " //NON-NLS
-			+ " WHERE arts.review_status_id != " + BlackboardArtifact.ReviewStatus.REJECTED.getID() //NON-NLS
+			+ "		ON artifacts.artifact_obj_id = results.artifact_obj_id "; //NON-NLS
+
+	private final static String ANALYSIS_RESULT_QUERY_STRING_WITH_ATTRIBUTES
+			= ANALYSIS_RESULT_QUERY_STRING_GENERIC
+			+ " JOIN blackboard_attributes AS attributes " //NON-NLS 
+			+ " ON artifacts.artifact_id = attributes.artifact_id " //NON-NLS 
+			+ " WHERE types.category_type = " + BlackboardArtifact.Category.ANALYSIS_RESULT.getID(); // NON-NLS
+
+	private final static String ANALYSIS_RESULT_QUERY_STRING_WHERE
+			= ANALYSIS_RESULT_QUERY_STRING_GENERIC
+			+ " WHERE artifacts.review_status_id != " + BlackboardArtifact.ReviewStatus.REJECTED.getID() //NON-NLS
 			+ "     AND types.category_type = " + BlackboardArtifact.Category.ANALYSIS_RESULT.getID(); // NON-NLS
 
-	
 	/**
 	 * Get all analysis results of given artifact type.
 	 *
@@ -368,25 +1012,50 @@ private Score deleteAnalysisResult(AnalysisResult analysisResult, CaseDbTransact
 	 *                          within TSK core.
 	 */
 	public List<AnalysisResult> getAnalysisResultsByType(int artifactTypeId) throws TskCoreException {
-		return getAnalysisResultsWhere(" arts.artifact_type_id = " + artifactTypeId);
+		return getAnalysisResultsWhere(" artifacts.artifact_type_id = " + artifactTypeId);
 	}
 
 	/**
 	 * Get all analysis results of given artifact type.
 	 *
-	 * @param artifactTypeId The artifact type id for which to search.
+	 * @param artifactTypeId  The artifact type id for which to search.
 	 * @param dataSourceObjId Object Id of the data source to look under.
-	 * 
+	 *
 	 * @return The list of analysis results.
 	 *
 	 * @throws TskCoreException Exception thrown if a critical error occurs
 	 *                          within TSK core.
 	 */
 	public List<AnalysisResult> getAnalysisResultsByType(int artifactTypeId, long dataSourceObjId) throws TskCoreException {
-		return getAnalysisResultsWhere(" arts.artifact_type_id = " + artifactTypeId + " AND arts.data_source_obj_id = " + dataSourceObjId);
+		return getAnalysisResultsWhere(" artifacts.artifact_type_id = " + artifactTypeId + " AND artifacts.data_source_obj_id = " + dataSourceObjId);
+	}
+
+	/**
+	 * Gets all analysis results of a given type for a given data source. To get
+	 * all the analysis results for the data source, pass null for the type ID.
+	 *
+	 * @param dataSourceObjId The object ID of the data source.
+	 * @param artifactTypeID  The type ID of the desired analysis results or
+	 *                        null.
+	 *
+	 * @return A list of the analysis results, possibly empty.
+	 *
+	 * @throws TskCoreException This exception is thrown if there is an error
+	 *                          querying the case database.
+	 */
+	public List<AnalysisResult> getAnalysisResults(long dataSourceObjId, Integer artifactTypeID) throws TskCoreException {
+		caseDb.acquireSingleUserCaseReadLock();
+		try (CaseDbConnection connection = caseDb.getConnection()) {
+			String whereClause = " artifacts.data_source_obj_id = " + dataSourceObjId;
+			if (artifactTypeID != null) {
+				whereClause += " AND artifacts.artifact_type_id = " + artifactTypeID;
+			}
+			return getAnalysisResultsWhere(whereClause, connection);
+		} finally {
+			caseDb.releaseSingleUserCaseReadLock();
+		}
 	}
 
-	
 	/**
 	 * Get all analysis results for a given object.
 	 *
@@ -398,10 +1067,9 @@ public List<AnalysisResult> getAnalysisResultsByType(int artifactTypeId, long da
 	 *                          within TSK core.
 	 */
 	public List<AnalysisResult> getAnalysisResults(long sourceObjId) throws TskCoreException {
-		return getAnalysisResultsWhere(" arts.obj_id = " + sourceObjId);
+		return getAnalysisResultsWhere(" artifacts.obj_id = " + sourceObjId);
 	}
-	
-	
+
 	/**
 	 * Get all data artifacts for a given object.
 	 *
@@ -415,48 +1083,58 @@ public List<AnalysisResult> getAnalysisResults(long sourceObjId) throws TskCoreE
 	List<DataArtifact> getDataArtifactsBySource(long sourceObjId) throws TskCoreException {
 		caseDb.acquireSingleUserCaseReadLock();
 		try (CaseDbConnection connection = caseDb.getConnection()) {
-			return getDataArtifactsWhere(String.format(" artifacts.obj_id = " + sourceObjId), connection);
+			return getDataArtifactsWhere(String.format(" artifacts.obj_id = %d", sourceObjId), connection);
 		} finally {
 			caseDb.releaseSingleUserCaseReadLock();
 		}
 	}
-	
-	
+
 	/**
 	 * Returns true if there are data artifacts belonging to the sourceObjId.
+	 *
 	 * @param sourceObjId The source content object id.
+	 *
 	 * @return True if there are data artifacts belonging to this source obj id.
-	 * @throws TskCoreException 
+	 *
+	 * @throws TskCoreException
 	 */
 	public boolean hasDataArtifacts(long sourceObjId) throws TskCoreException {
 		return hasArtifactsOfCategory(BlackboardArtifact.Category.DATA_ARTIFACT, sourceObjId);
 	}
-	
+
 	/**
 	 * Returns true if there are analysis results belonging to the sourceObjId.
+	 *
 	 * @param sourceObjId The source content object id.
-	 * @return True if there are analysis results belonging to this source obj id.
-	 * @throws TskCoreException 
+	 *
+	 * @return True if there are analysis results belonging to this source obj
+	 *         id.
+	 *
+	 * @throws TskCoreException
 	 */
 	public boolean hasAnalysisResults(long sourceObjId) throws TskCoreException {
 		return hasArtifactsOfCategory(BlackboardArtifact.Category.ANALYSIS_RESULT, sourceObjId);
 	}
-	
-	
+
 	/**
-	 * Returns true if there are artifacts of the given category belonging to the sourceObjId.
-	 * @param category The category of the artifacts.
+	 * Returns true if there are artifacts of the given category belonging to
+	 * the sourceObjId.
+	 *
+	 * @param category    The category of the artifacts.
 	 * @param sourceObjId The source content object id.
-	 * @return True if there are artifacts of the given category belonging to this source obj id.
-	 * @throws TskCoreException 
+	 *
+	 * @return True if there are artifacts of the given category belonging to
+	 *         this source obj id.
+	 *
+	 * @throws TskCoreException
 	 */
 	private boolean hasArtifactsOfCategory(BlackboardArtifact.Category category, long sourceObjId) throws TskCoreException {
 		String queryString = "SELECT COUNT(*) AS count " //NON-NLS
-			+ " FROM blackboard_artifacts AS arts "
-			+ " JOIN blackboard_artifact_types AS types " //NON-NLS
-			+ "		ON arts.artifact_type_id = types.artifact_type_id" //NON-NLS
-			+ " WHERE types.category_type = " + category.getID()
-			+ " AND arts.obj_id = " + sourceObjId;
+				+ " FROM blackboard_artifacts AS arts "
+				+ " JOIN blackboard_artifact_types AS types " //NON-NLS
+				+ "		ON arts.artifact_type_id = types.artifact_type_id" //NON-NLS
+				+ " WHERE types.category_type = " + category.getID()
+				+ " AND arts.obj_id = " + sourceObjId;
 
 		caseDb.acquireSingleUserCaseReadLock();
 		try (SleuthkitCase.CaseDbConnection connection = caseDb.getConnection();
@@ -473,9 +1151,6 @@ private boolean hasArtifactsOfCategory(BlackboardArtifact.Category category, lon
 		}
 	}
 
-
-	
-	
 	/**
 	 * Get all analysis results for a given object.
 	 *
@@ -489,7 +1164,7 @@ private boolean hasArtifactsOfCategory(BlackboardArtifact.Category category, lon
 	 *                          within TSK core.
 	 */
 	List<AnalysisResult> getAnalysisResults(long sourceObjId, CaseDbConnection connection) throws TskCoreException {
-		return getAnalysisResultsWhere(" arts.obj_id = " + sourceObjId, connection);
+		return getAnalysisResultsWhere(" artifacts.obj_id = " + sourceObjId, connection);
 	}
 
 	/**
@@ -505,13 +1180,13 @@ List<AnalysisResult> getAnalysisResults(long sourceObjId, CaseDbConnection conne
 	 */
 	public List<AnalysisResult> getAnalysisResults(long sourceObjId, int artifactTypeId) throws TskCoreException {
 		// Get the artifact type to check that it in the analysis result category.
-		BlackboardArtifact.Type artifactType = caseDb.getArtifactType(artifactTypeId);
+		BlackboardArtifact.Type artifactType = getArtifactType(artifactTypeId);
 		if (artifactType.getCategory() != BlackboardArtifact.Category.ANALYSIS_RESULT) {
 			throw new TskCoreException(String.format("Artifact type id %d is not in analysis result catgeory.", artifactTypeId));
 		}
 
 		String whereClause = " types.artifact_type_id = " + artifactTypeId
-				+ " AND arts.obj_id = " + sourceObjId;
+				+ " AND artifacts.obj_id = " + sourceObjId;
 		return getAnalysisResultsWhere(whereClause);
 	}
 
@@ -549,7 +1224,7 @@ public List<AnalysisResult> getAnalysisResultsWhere(String whereClause) throws T
 	 */
 	List<AnalysisResult> getAnalysisResultsWhere(String whereClause, CaseDbConnection connection) throws TskCoreException {
 
-		final String queryString = ANALYSIS_RESULT_QUERY_STRING
+		final String queryString = ANALYSIS_RESULT_QUERY_STRING_WHERE
 				+ " AND " + whereClause;
 
 		try (Statement statement = connection.createStatement();
@@ -573,7 +1248,7 @@ List<AnalysisResult> getAnalysisResultsWhere(String whereClause, CaseDbConnectio
 	 */
 	public AnalysisResult getAnalysisResultById(long artifactObjId) throws TskCoreException {
 
-		String whereClause = " arts.artifact_obj_id = " + artifactObjId;
+		String whereClause = " artifacts.artifact_obj_id = " + artifactObjId;
 		List<AnalysisResult> results = getAnalysisResultsWhere(whereClause);
 
 		if (results.isEmpty()) { // throw an error if no analysis result found by id.
@@ -617,19 +1292,53 @@ private List<AnalysisResult> resultSetToAnalysisResults(ResultSet resultSet) thr
 		return analysisResults;
 	}
 
-	private final static String DATA_ARTIFACT_QUERY_STRING = "SELECT DISTINCT artifacts.artifact_id AS artifact_id, " //NON-NLS
+	private final static String DATA_ARTIFACT_QUERY_STRING_GENERIC = "SELECT DISTINCT artifacts.artifact_id AS artifact_id, " //NON-NLS
 			+ "artifacts.obj_id AS obj_id, artifacts.artifact_obj_id AS artifact_obj_id, artifacts.data_source_obj_id AS data_source_obj_id, artifacts.artifact_type_id AS artifact_type_id, " //NON-NLS
 			+ " types.type_name AS type_name, types.display_name AS display_name, types.category_type as category_type,"//NON-NLS
 			+ " artifacts.review_status_id AS review_status_id, " //NON-NLS
 			+ " data_artifacts.os_account_obj_id as os_account_obj_id " //NON-NLS
-			+ " FROM blackboard_artifacts AS artifacts "
+			+ " FROM blackboard_artifacts AS artifacts " //NON-NLS 
 			+ " JOIN blackboard_artifact_types AS types " //NON-NLS
 			+ "		ON artifacts.artifact_type_id = types.artifact_type_id" //NON-NLS
-			+ " LEFT JOIN tsk_data_artifacts AS data_artifacts "
-			+ "		ON artifacts.artifact_obj_id = data_artifacts.artifact_obj_id " //NON-NLS
+			+ " LEFT JOIN tsk_data_artifacts AS data_artifacts " //NON-NLS 
+			+ "		ON artifacts.artifact_obj_id = data_artifacts.artifact_obj_id "; //NON-NLS
+
+	private final static String DATA_ARTIFACT_QUERY_STRING_WITH_ATTRIBUTES
+			= DATA_ARTIFACT_QUERY_STRING_GENERIC
+			+ " JOIN blackboard_attributes AS attributes " //NON-NLS 
+			+ " ON artifacts.artifact_id = attributes.artifact_id " //NON-NLS 
+			+ " WHERE types.category_type = " + BlackboardArtifact.Category.DATA_ARTIFACT.getID(); // NON-NLS	
+
+	private final static String DATA_ARTIFACT_QUERY_STRING_WHERE
+			= DATA_ARTIFACT_QUERY_STRING_GENERIC
 			+ " WHERE artifacts.review_status_id != " + BlackboardArtifact.ReviewStatus.REJECTED.getID() //NON-NLS
 			+ "     AND types.category_type = " + BlackboardArtifact.Category.DATA_ARTIFACT.getID(); // NON-NLS
 
+	/**
+	 * Gets all data artifacts of a given type for a given data source. To get
+	 * all the data artifacts for the data source, pass null for the type ID.
+	 *
+	 * @param dataSourceObjId The object ID of the data source.
+	 * @param artifactTypeID  The type ID of the desired artifacts or null.
+	 *
+	 * @return A list of the data artifacts, possibly empty.
+	 *
+	 * @throws TskCoreException This exception is thrown if there is an error
+	 *                          querying the case database.
+	 */
+	public List<DataArtifact> getDataArtifacts(long dataSourceObjId, Integer artifactTypeID) throws TskCoreException {
+		caseDb.acquireSingleUserCaseReadLock();
+		try (CaseDbConnection connection = caseDb.getConnection()) {
+			String whereClause = " artifacts.data_source_obj_id = " + dataSourceObjId;
+			if (artifactTypeID != null) {
+				whereClause += " AND artifacts.artifact_type_id = " + artifactTypeID;
+			}
+			return getDataArtifactsWhere(whereClause, connection);
+		} finally {
+			caseDb.releaseSingleUserCaseReadLock();
+		}
+	}
+
 	/**
 	 * Get all data artifacts of a given type for a given data source.
 	 *
@@ -644,7 +1353,7 @@ private List<AnalysisResult> resultSetToAnalysisResults(ResultSet resultSet) thr
 	public List<DataArtifact> getDataArtifacts(int artifactTypeID, long dataSourceObjId) throws TskCoreException {
 
 		// Get the artifact type to check that it in the data artifact category.
-		BlackboardArtifact.Type artifactType = caseDb.getArtifactType(artifactTypeID);
+		BlackboardArtifact.Type artifactType = getArtifactType(artifactTypeID);
 		if (artifactType.getCategory() != BlackboardArtifact.Category.DATA_ARTIFACT) {
 			throw new TskCoreException(String.format("Artifact type id %d is not in data artifact catgeory.", artifactTypeID));
 		}
@@ -672,7 +1381,7 @@ public List<DataArtifact> getDataArtifacts(int artifactTypeID, long dataSourceOb
 	 */
 	public List<DataArtifact> getDataArtifacts(int artifactTypeID) throws TskCoreException {
 		// Get the artifact type to check that it in the data artifact category.
-		BlackboardArtifact.Type artifactType = caseDb.getArtifactType(artifactTypeID);
+		BlackboardArtifact.Type artifactType = getArtifactType(artifactTypeID);
 		if (artifactType.getCategory() != BlackboardArtifact.Category.DATA_ARTIFACT) {
 			throw new TskCoreException(String.format("Artifact type id %d is not in data artifact catgeory.", artifactTypeID));
 		}
@@ -716,6 +1425,25 @@ public DataArtifact getDataArtifactById(long artifactObjId) throws TskCoreExcept
 		}
 	}
 
+	/**
+	 * Get all data artifacts matching the given where sub-clause.
+	 *
+	 * @param whereClause SQL Where sub-clause, specifies conditions to match.
+	 *
+	 * @return List of data artifacts. May be an empty list.
+	 *
+	 * @throws TskCoreException exception thrown if a critical error occurs
+	 *                          within TSK core.
+	 */
+	public List<DataArtifact> getDataArtifactsWhere(String whereClause) throws TskCoreException {
+		caseDb.acquireSingleUserCaseReadLock();
+		try (CaseDbConnection connection = caseDb.getConnection()) {
+			return getDataArtifactsWhere(whereClause, connection);
+		} finally {
+			caseDb.releaseSingleUserCaseReadLock();
+		}
+	}
+
 	/**
 	 * Get all data artifacts matching the given where sub-clause. Uses the
 	 * given database connection to execute the query.
@@ -728,15 +1456,15 @@ public DataArtifact getDataArtifactById(long artifactObjId) throws TskCoreExcept
 	 * @throws TskCoreException exception thrown if a critical error occurs
 	 *                          within TSK core.
 	 */
-	private List<DataArtifact> getDataArtifactsWhere(String whereClause, CaseDbConnection connection) throws TskCoreException {
+	List<DataArtifact> getDataArtifactsWhere(String whereClause, CaseDbConnection connection) throws TskCoreException {
 
-		final String queryString = DATA_ARTIFACT_QUERY_STRING
-				+ " AND ( " + whereClause + " )";
+		final String queryString = DATA_ARTIFACT_QUERY_STRING_WHERE
+				+ " AND " + whereClause + " ";
 
 		try (Statement statement = connection.createStatement();
 				ResultSet resultSet = connection.executeQuery(statement, queryString);) {
 
-			List<DataArtifact> dataArtifacts = resultSetToDataArtifacts(resultSet, connection);
+			List<DataArtifact> dataArtifacts = resultSetToDataArtifacts(resultSet);
 			return dataArtifacts;
 		} catch (SQLException ex) {
 			throw new TskCoreException(String.format("Error getting data artifacts with queryString = %s", queryString), ex);
@@ -747,10 +1475,9 @@ private List<DataArtifact> getDataArtifactsWhere(String whereClause, CaseDbConne
 	 * Creates DataArtifacts objects for the resultset of a table query of the
 	 * form "SELECT * FROM blackboard_artifacts JOIN data_artifacts WHERE ...".
 	 *
-	 * @param resultSet  A result set from a query of the blackboard_artifacts
-	 *                   table of the form "SELECT * FROM blackboard_artifacts,
-	 *                   tsk_data_artifacts WHERE ...".
-	 * @param connection Database connection.
+	 * @param resultSet A result set from a query of the blackboard_artifacts
+	 *                  table of the form "SELECT * FROM blackboard_artifacts,
+	 *                  tsk_data_artifacts WHERE ...".
 	 *
 	 * @return A list of DataArtifact objects.
 	 *
@@ -759,7 +1486,7 @@ private List<DataArtifact> getDataArtifactsWhere(String whereClause, CaseDbConne
 	 * @throws TskCoreException Thrown if there is an error looking up the
 	 *                          artifact type id.
 	 */
-	private List<DataArtifact> resultSetToDataArtifacts(ResultSet resultSet, CaseDbConnection connection) throws SQLException, TskCoreException {
+	private List<DataArtifact> resultSetToDataArtifacts(ResultSet resultSet) throws SQLException, TskCoreException {
 		ArrayList<DataArtifact> dataArtifacts = new ArrayList<>();
 
 		while (resultSet.next()) {
@@ -779,25 +1506,14 @@ private List<DataArtifact> resultSetToDataArtifacts(ResultSet resultSet, CaseDbC
 		return dataArtifacts;
 	}
 
-	/**
-	 * Get the artifact type associated with an artifact type id.
-	 *
-	 * @param artTypeId An artifact type id.
-	 *
-	 * @return The artifact type.
-	 *
-	 * @throws TskCoreException If an error occurs accessing the case database 
-	 *						    or no value is found.
-	 *
-	 */
-	public BlackboardArtifact.Type getArtifactType(int artTypeId) throws TskCoreException {
-		return caseDb.getArtifactType(artTypeId);
-	}
-	
 	/**
 	 * Gets an attribute type, creating it if it does not already exist. Use
 	 * this method to define custom attribute types.
 	 *
+	 * NOTE: This method is synchronized to prevent simultaneous access from
+	 * different threads, but there is still the possibility of concurrency 
+	 * issues from different clients.
+	 *
 	 * @param typeName    The type name of the attribute type.
 	 * @param valueType   The value type of the attribute type.
 	 * @param displayName The display name of the attribute type.
@@ -807,18 +1523,89 @@ public BlackboardArtifact.Type getArtifactType(int artTypeId) throws TskCoreExce
 	 * @throws BlackboardException If there is a problem getting or adding the
 	 *                             attribute type.
 	 */
-	public BlackboardAttribute.Type getOrAddAttributeType(String typeName, BlackboardAttribute.TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE valueType, String displayName) throws BlackboardException {
+	public synchronized BlackboardAttribute.Type getOrAddAttributeType(String typeName, BlackboardAttribute.TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE valueType, String displayName) throws BlackboardException {
+		// check local cache
+		if (typeNameToAttributeTypeMap.containsKey(typeName)) {
+			return typeNameToAttributeTypeMap.get(typeName);
+		}
 
+		CaseDbTransaction trans = null;
 		try {
-			return caseDb.addArtifactAttributeType(typeName, valueType, displayName);
-		} catch (TskDataException typeExistsEx) {
+			trans = this.caseDb.beginTransaction();
+			String matchingAttrQuery = "SELECT attribute_type_id, type_name, display_name, value_type "
+					+ "FROM blackboard_attribute_types WHERE type_name = ?";
+			// find matching attribute name
+			PreparedStatement query = trans.getConnection().getPreparedStatement(matchingAttrQuery, Statement.RETURN_GENERATED_KEYS);
+			query.clearParameters();
+			query.setString(1, typeName);
+			try (ResultSet rs = query.executeQuery()) {
+				// if previously existing, commit the results and return the attribute type
+				if (rs.next()) {
+					trans.commit();
+					trans = null;
+					BlackboardAttribute.Type foundType = new BlackboardAttribute.Type(
+							rs.getInt("attribute_type_id"),
+							rs.getString("type_name"),
+							rs.getString("display_name"),
+							BlackboardAttribute.TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.fromType(rs.getLong("value_type"))
+					);
+
+					this.typeIdToAttributeTypeMap.put(foundType.getTypeID(), foundType);
+					this.typeNameToAttributeTypeMap.put(foundType.getTypeName(), foundType);
+
+					return foundType;
+				}
+			}
+
+			// if not found in database, insert
+			String insertStatement = "INSERT INTO blackboard_attribute_types (attribute_type_id, type_name, display_name, value_type) VALUES (\n"
+					// get the maximum of the attribute type id's or the min user defined type id and add 1 to it for the new id
+					+ "(SELECT MAX(q.attribute_type_id) FROM (SELECT attribute_type_id FROM blackboard_attribute_types UNION SELECT " + (MIN_USER_DEFINED_TYPE_ID - 1) + ") q) + 1,\n"
+					// typeName, displayName, valueType
+					+ "?, ?, ?)";
+
+			PreparedStatement insertPreparedStatement = trans.getConnection().getPreparedStatement(insertStatement, Statement.RETURN_GENERATED_KEYS);
+			insertPreparedStatement.clearParameters();
+			insertPreparedStatement.setString(1, typeName);
+			insertPreparedStatement.setString(2, displayName);
+			insertPreparedStatement.setLong(3, valueType.getType());
+
+			int numUpdated = insertPreparedStatement.executeUpdate();
+
+			// get id for inserted to create new attribute.
+			Integer attrId = null;
+
+			if (numUpdated > 0) {
+				try (ResultSet insertResult = insertPreparedStatement.getGeneratedKeys()) {
+					if (insertResult.next()) {
+						attrId = insertResult.getInt(1);
+					}
+				}
+			}
+
+			if (attrId == null) {
+				throw new BlackboardException(MessageFormat.format(
+						"Error adding attribute type.  Item with name {0} was not inserted successfully into the database.", typeName));
+			}
+
+			trans.commit();
+			trans = null;
+
+			BlackboardAttribute.Type type = new BlackboardAttribute.Type(attrId, typeName, displayName, valueType);
+			this.typeIdToAttributeTypeMap.put(type.getTypeID(), type);
+			this.typeNameToAttributeTypeMap.put(type.getTypeName(), type);
+			return type;
+		} catch (SQLException | TskCoreException ex) {
+			throw new BlackboardException("Error adding attribute type: " + typeName, ex);
+		} finally {
 			try {
-				return caseDb.getAttributeType(typeName);
-			} catch (TskCoreException ex) {
-				throw new BlackboardException("Failed to get or add attribute type", ex);
+				if (trans != null) {
+					trans.rollback();
+					trans = null;
+				}
+			} catch (TskCoreException ex2) {
+				LOGGER.log(Level.SEVERE, "Error rolling back transaction", ex2);
 			}
-		} catch (TskCoreException ex) {
-			throw new BlackboardException("Failed to get or add attribute type", ex);
 		}
 	}
 
@@ -852,7 +1639,7 @@ public List<BlackboardArtifact.Type> getArtifactTypesInUse(long dataSourceObjId)
 			List<BlackboardArtifact.Type> uniqueArtifactTypes = new ArrayList<>();
 			while (resultSet.next()) {
 				uniqueArtifactTypes.add(new BlackboardArtifact.Type(resultSet.getInt("artifact_type_id"),
-						resultSet.getString("type_name"), resultSet.getString("display_name"), 
+						resultSet.getString("type_name"), resultSet.getString("display_name"),
 						BlackboardArtifact.Category.fromID(resultSet.getInt("category_type"))));
 			}
 			return uniqueArtifactTypes;
@@ -880,6 +1667,21 @@ public long getArtifactsCount(int artifactTypeID, long dataSourceObjId) throws T
 				"blackboard_artifacts.data_source_obj_id = '" + dataSourceObjId + "';");
 	}
 
+	/**
+	 * Get count of all blackboard artifacts of a given type. Does not include
+	 * rejected artifacts.
+	 *
+	 * @param artifactTypeID artifact type id (must exist in database)
+	 *
+	 * @return count of blackboard artifacts
+	 *
+	 * @throws TskCoreException exception thrown if a critical error occurs
+	 *                          within TSK core
+	 */
+	public long getArtifactsCount(int artifactTypeID) throws TskCoreException {
+		return getArtifactsCountHelper(artifactTypeID, null);
+	}
+
 	/**
 	 * Get all blackboard artifacts of a given type. Does not included rejected
 	 * artifacts.
@@ -893,8 +1695,8 @@ public long getArtifactsCount(int artifactTypeID, long dataSourceObjId) throws T
 	 *                          within TSK core
 	 */
 	public List<BlackboardArtifact> getArtifacts(int artifactTypeID, long dataSourceObjId) throws TskCoreException {
-		return caseDb.getArtifactsHelper("blackboard_artifacts.data_source_obj_id = " + dataSourceObjId
-				+ " AND blackboard_artifact_types.artifact_type_id = " + artifactTypeID + ";");
+		String whereClause = String.format("artifacts.data_source_obj_id = %d", dataSourceObjId);
+		return getArtifactsWhere(getArtifactType(artifactTypeID), whereClause);
 	}
 
 	/**
@@ -916,12 +1718,21 @@ public List<BlackboardArtifact> getArtifacts(Collection<BlackboardArtifact.Type>
 			return new ArrayList<>();
 		}
 
-		String typeQuery = "";
+		String analysisResultQuery = "";
+		String dataArtifactQuery = "";
+
 		for (BlackboardArtifact.Type type : artifactTypes) {
-			if (!typeQuery.isEmpty()) {
-				typeQuery += " OR ";
+			if (type.getCategory() == BlackboardArtifact.Category.ANALYSIS_RESULT) {
+				if (!analysisResultQuery.isEmpty()) {
+					analysisResultQuery += " OR ";
+				}
+				analysisResultQuery += "types.artifact_type_id = " + type.getTypeID();
+			} else {
+				if (!dataArtifactQuery.isEmpty()) {
+					dataArtifactQuery += " OR ";
+				}
+				dataArtifactQuery += "types.artifact_type_id = " + type.getTypeID();
 			}
-			typeQuery += "blackboard_artifact_types.artifact_type_id = " + type.getTypeID();
 		}
 
 		String dsQuery = "";
@@ -929,20 +1740,231 @@ public List<BlackboardArtifact> getArtifacts(Collection<BlackboardArtifact.Type>
 			if (!dsQuery.isEmpty()) {
 				dsQuery += " OR ";
 			}
-			dsQuery += "blackboard_artifacts.data_source_obj_id = " + dsId;
+			dsQuery += "artifacts.data_source_obj_id = " + dsId;
 		}
 
-		String fullQuery = "( " + typeQuery + " ) AND ( " + dsQuery + " );";
+		List<BlackboardArtifact> artifacts = new ArrayList<>();
 
-		return caseDb.getArtifactsHelper(fullQuery);
+		if (!analysisResultQuery.isEmpty()) {
+			String fullQuery = "( " + analysisResultQuery + " ) AND (" + dsQuery + ") ";
+			artifacts.addAll(this.getAnalysisResultsWhere(fullQuery));
+		}
+
+		if (!dataArtifactQuery.isEmpty()) {
+			String fullQuery = "( " + dataArtifactQuery + " ) AND (" + dsQuery + ") ";
+			artifacts.addAll(this.getDataArtifactsWhere(fullQuery));
+		}
+
+		return artifacts;
 	}
 
+	/**
+	 * Get all blackboard artifacts of the given type that contain attribute of
+	 * given type and value, for a given data source(s).
+	 *
+	 * @param artifactType		  artifact type to get
+	 * @param attributeType		 attribute type to be included
+	 * @param value				       attribute value to be included. can be empty.
+	 * @param dataSourceObjId	data source to look under. If Null, then search
+	 *                        all data sources.
+	 * @param showRejected		  a flag whether to display rejected artifacts
+	 *
+	 * @return list of blackboard artifacts
+	 *
+	 * @throws TskCoreException exception thrown if a critical error occurs
+	 *                          within TSK core
+	 */
+	public List<BlackboardArtifact> getArtifacts(BlackboardArtifact.Type artifactType,
+			BlackboardAttribute.Type attributeType, String value, Long dataSourceObjId,
+			boolean showRejected) throws TskCoreException {
+
+		String query = " AND artifacts.artifact_type_id = " + artifactType.getTypeID() //NON-NLS 
+				+ " AND attributes.attribute_type_id = " + attributeType.getTypeID() //NON-NLS
+				+ ((value == null || value.isEmpty()) ? "" : " AND attributes.value_text = '" + value + "'") //NON-NLS
+				+ (showRejected ? "" : " AND artifacts.review_status_id != " + BlackboardArtifact.ReviewStatus.REJECTED.getID()) //NON-NLS
+				+ (dataSourceObjId != null ? " AND artifacts.data_source_obj_id = " + dataSourceObjId : ""); //NON-NLS
+
+		List<BlackboardArtifact> artifacts = new ArrayList<>();
+		caseDb.acquireSingleUserCaseReadLock();
+
+		String finalQuery = (artifactType.getCategory() == BlackboardArtifact.Category.ANALYSIS_RESULT
+				? ANALYSIS_RESULT_QUERY_STRING_WITH_ATTRIBUTES + query
+				: DATA_ARTIFACT_QUERY_STRING_WITH_ATTRIBUTES + query);
+
+		try (CaseDbConnection connection = caseDb.getConnection()) {
+			try (Statement statement = connection.createStatement();
+					ResultSet resultSet = connection.executeQuery(statement, finalQuery);) {
+
+				if (artifactType.getCategory() == BlackboardArtifact.Category.ANALYSIS_RESULT) {
+					artifacts.addAll(resultSetToAnalysisResults(resultSet));
+				} else {
+					artifacts.addAll(resultSetToDataArtifacts(resultSet));
+				}
+			} catch (SQLException ex) {
+				throw new TskCoreException(String.format("Error getting results with queryString = '%s'", finalQuery), ex);
+			}
+		} finally {
+			caseDb.releaseSingleUserCaseReadLock();
+		}
+		return artifacts;
+	}
+
+	/**
+	 * Returns a list of "Exact match / Literal" keyword hits blackboard
+	 * artifacts according to the input conditions.
+	 *
+	 * @param keyword      The keyword string to search for. This should always
+	 *                     be populated unless you are trying to get all keyword
+	 *                     hits of specific keyword search type or keyword list
+	 *                     name.
+	 * @param searchType   Type of keyword search query.
+	 * @param kwsListName  (Optional) Name of the keyword list for which the
+	 *                     search results are for. If not specified, then the
+	 *                     results will be for ad-hoc keyword searches.
+	 * @param dataSourceId (Optional) Data source id of the target data source.
+	 *                     If null, then the results will be for all data
+	 *                     sources.
+	 *
+	 * @return A list of keyword hits blackboard artifacts
+	 *
+	 * @throws TskCoreException If an exception is encountered while running
+	 *                          database query to obtain the keyword hits.
+	 */
+	public List<BlackboardArtifact> getExactMatchKeywordSearchResults(String keyword, TskData.KeywordSearchQueryType searchType, String kwsListName, Long dataSourceId) throws TskCoreException {
+		return getKeywordSearchResults(keyword, "", searchType, kwsListName, dataSourceId);
+	}
+
+	/**
+	 * Returns a list of keyword hits blackboard artifacts according to the
+	 * input conditions.
+	 *
+	 * @param keyword      The keyword string to search for. This should always
+	 *                     be populated unless you are trying to get all keyword
+	 *                     hits of specific keyword search type or keyword list
+	 *                     name.
+	 * @param regex        For substring and regex keyword search types, the
+	 *                     regex/substring query string should be specified as
+	 *                     well as the keyword. It should be empty for literal
+	 *                     exact match keyword search types.
+	 * @param searchType   Type of keyword search query.
+	 * @param kwsListName  (Optional) Name of the keyword list for which the
+	 *                     search results are for. If not specified, then the
+	 *                     results will be for ad-hoc keyword searches.
+	 * @param dataSourceId (Optional) Data source id of the target data source.
+	 *                     If null, then the results will be for all data
+	 *                     sources.
+	 *
+	 * @return A list of keyword hits blackboard artifacts
+	 *
+	 * @throws TskCoreException If an exception is encountered while running
+	 *                          database query to obtain the keyword hits.
+	 */
+	public List<BlackboardArtifact> getKeywordSearchResults(String keyword, String regex, TskData.KeywordSearchQueryType searchType, String kwsListName, Long dataSourceId) throws TskCoreException {
+		
+		String dataSourceClause = dataSourceId == null
+				? ""
+				: " AND artifacts.data_source_obj_id = ? "; // dataSourceId
+
+		String kwsListClause = (kwsListName == null || kwsListName.isEmpty()
+				? " WHERE r.set_name IS NULL "
+				: " WHERE r.set_name = ? ");
+
+		String keywordClause = (keyword == null || keyword.isEmpty()
+				? ""
+				: " AND r.keyword = ? ");
+
+		String searchTypeClause = (searchType == null
+				? ""
+				: " AND r.search_type = ? ");
+
+		String regexClause = (regex == null || regex.isEmpty()
+				? ""
+				: " AND r.regexp_str = ? ");
+
+		String query = "SELECT r.* FROM ( "
+				+ " SELECT DISTINCT artifacts.artifact_id AS artifact_id, "
+				+ " artifacts.obj_id AS obj_id, "
+				+ " artifacts.artifact_obj_id AS artifact_obj_id, "
+				+ " artifacts.data_source_obj_id AS data_source_obj_id, "
+				+ " artifacts.artifact_type_id AS artifact_type_id, "
+				+ " types.type_name AS type_name, "
+				+ " types.display_name AS display_name, "
+				+ " types.category_type as category_type,"
+				+ " artifacts.review_status_id AS review_status_id, "
+				+ " results.conclusion AS conclusion, "
+				+ " results.significance AS significance, "
+				+ " results.priority AS priority, "
+				+ " results.configuration AS configuration, "
+				+ " results.justification AS justification, "
+				+ " (SELECT value_text FROM blackboard_attributes attr WHERE attr.artifact_id = artifacts.artifact_id AND attr.attribute_type_id = "
+				+ BlackboardAttribute.Type.TSK_SET_NAME.getTypeID() + " LIMIT 1) AS set_name, "
+				+ " (SELECT value_int32 FROM blackboard_attributes attr WHERE attr.artifact_id = artifacts.artifact_id AND attr.attribute_type_id = "
+				+ BlackboardAttribute.ATTRIBUTE_TYPE.TSK_KEYWORD_SEARCH_TYPE.getTypeID() + " LIMIT 1) AS search_type, "
+				+ " (SELECT value_text FROM blackboard_attributes attr WHERE attr.artifact_id = artifacts.artifact_id AND attr.attribute_type_id = "
+				+ BlackboardAttribute.Type.TSK_KEYWORD_REGEXP.getTypeID() + " LIMIT 1) AS regexp_str, "
+				+ " (SELECT value_text FROM blackboard_attributes attr WHERE attr.artifact_id = artifacts.artifact_id AND attr.attribute_type_id = "
+				+ BlackboardAttribute.Type.TSK_KEYWORD.getTypeID() + " LIMIT 1) AS keyword "
+				+ " FROM blackboard_artifacts artifacts "
+				+ " JOIN blackboard_artifact_types AS types "
+				+ " ON artifacts.artifact_type_id = types.artifact_type_id "
+				+ " LEFT JOIN tsk_analysis_results AS results "
+				+ " ON artifacts.artifact_obj_id = results.artifact_obj_id "
+				+ " WHERE types.category_type = " + BlackboardArtifact.Category.ANALYSIS_RESULT.getID()
+				+ " AND artifacts.artifact_type_id = " + BlackboardArtifact.Type.TSK_KEYWORD_HIT.getTypeID() + " "
+				+ dataSourceClause + " ) r "
+				+ kwsListClause
+				+ keywordClause
+				+ searchTypeClause
+				+ regexClause;
+
+		List<BlackboardArtifact> artifacts = new ArrayList<>();
+		caseDb.acquireSingleUserCaseReadLock();
+		try (CaseDbConnection connection = caseDb.getConnection()) {
+
+			try {
+				PreparedStatement preparedStatement = connection.getPreparedStatement(query, Statement.RETURN_GENERATED_KEYS);
+				preparedStatement.clearParameters();
+				int paramIdx = 0;
+				if (dataSourceId != null) {
+					preparedStatement.setLong(++paramIdx, dataSourceId);
+				}
+								
+				if (!(kwsListName == null || kwsListName.isEmpty())) {
+					preparedStatement.setString(++paramIdx, kwsListName);
+				}
+
+				if (!(keyword == null || keyword.isEmpty())) {
+					preparedStatement.setString(++paramIdx, keyword);
+				}
+
+				if (searchType != null) {
+					preparedStatement.setInt(++paramIdx, searchType.getType());
+				}
+
+				if (!(regex == null || regex.isEmpty())) {
+					preparedStatement.setString(++paramIdx, regex);
+				}
+				
+				try (ResultSet resultSet = connection.executeQuery(preparedStatement)) {
+					artifacts.addAll(resultSetToAnalysisResults(resultSet));
+				}
+
+			} catch (SQLException ex) {
+				throw new TskCoreException(String.format("Error getting keyword search results with queryString = '%s'", query), ex);
+			}
+		} finally {
+			caseDb.releaseSingleUserCaseReadLock();
+		}
+		return artifacts;
+	}
+	
 	/**
 	 * Gets count of blackboard artifacts of given type that match a given WHERE
 	 * clause. Uses a SELECT COUNT(*) FROM blackboard_artifacts statement
 	 *
 	 * @param artifactTypeID artifact type to count
-	 * @param whereClause    The WHERE clause to append to the SELECT statement.
+	 * @param whereClause    The WHERE clause to append to the SELECT statement
+	 *                       (may be null).
 	 *
 	 * @return A count of matching BlackboardArtifact .
 	 *
@@ -952,14 +1974,16 @@ public List<BlackboardArtifact> getArtifacts(Collection<BlackboardArtifact.Type>
 	private long getArtifactsCountHelper(int artifactTypeID, String whereClause) throws TskCoreException {
 		String queryString = "SELECT COUNT(*) AS count FROM blackboard_artifacts "
 				+ "WHERE blackboard_artifacts.artifact_type_id = " + artifactTypeID
-				+ " AND blackboard_artifacts.review_status_id !=" + BlackboardArtifact.ReviewStatus.REJECTED.getID()
-				+ " AND " + whereClause;
+				+ " AND blackboard_artifacts.review_status_id !=" + BlackboardArtifact.ReviewStatus.REJECTED.getID();
+
+		if (whereClause != null) {
+			queryString += " AND " + whereClause;
+		}
 
 		caseDb.acquireSingleUserCaseReadLock();
 		try (SleuthkitCase.CaseDbConnection connection = caseDb.getConnection();
 				Statement statement = connection.createStatement();
 				ResultSet resultSet = connection.executeQuery(statement, queryString);) {
-			//NON-NLS	
 			long count = 0;
 			if (resultSet.next()) {
 				count = resultSet.getLong("count");
@@ -972,53 +1996,49 @@ private long getArtifactsCountHelper(int artifactTypeID, String whereClause) thr
 		}
 	}
 
-	/*
-	 * Determine if an artifact of a given type exists for given content with a
-	 * specific list of attributes.
+	/**
+	 * Determines whether or not an artifact of a given type with a given set of
+	 * attributes already exists for a given content.
 	 *
-	 * @param content The content whose artifacts need to be looked at. @param
-	 * artifactType The type of artifact to look for. @param attributesList The
-	 * list of attributes to look for.
+	 * @param content      The content.
+	 * @param artifactType The artifact type.
+	 * @param attributes   The attributes.
 	 *
-	 * @return True if the specific artifact exists; otherwise false.
+	 * @return True or false
 	 *
-	 * @throws TskCoreException If there is a problem getting artifacts or
-	 * attributes.
+	 * @throws TskCoreException The exception is thrown if there is an issue
+	 *                          querying the case database.
 	 */
-	public boolean artifactExists(Content content, BlackboardArtifact.ARTIFACT_TYPE artifactType,
-			Collection<BlackboardAttribute> attributesList) throws TskCoreException {
-
-		ArrayList<BlackboardArtifact> artifactsList;
-
-		/*
-		 * Get the content's artifacts.
-		 */
-		artifactsList = content.getArtifacts(artifactType);
-		if (artifactsList.isEmpty()) {
-			return false;
-		}
-
-		/*
-		 * Get each artifact's attributes and analyze them for matches.
-		 */
-		for (BlackboardArtifact artifact : artifactsList) {
-			if (attributesMatch(artifact.getAttributes(), attributesList)) {
-				/*
-				 * The exact artifact exists, so we don't need to look any
-				 * further.
-				 */
+	public boolean artifactExists(Content content, BlackboardArtifact.Type artifactType, Collection<BlackboardAttribute> attributes) throws TskCoreException {
+		List<BlackboardArtifact> existingArtifacts = content.getArtifacts(artifactType.getTypeID());
+		for (BlackboardArtifact artifact : existingArtifacts) {
+			if (attributesMatch(artifact.getAttributes(), attributes)) {
 				return true;
 			}
 		}
-
-		/*
-		 * None of the artifacts have the exact set of attribute type/value
-		 * combinations. The provided content does not have the artifact being
-		 * sought.
-		 */
 		return false;
 	}
 
+	/**
+	 * Determines whether or not an artifact of a given type with a given set of
+	 * attributes already exists for a given content.
+	 *
+	 * @param content      The content.
+	 * @param artifactType The artifact type.
+	 * @param attributes   The attributes.
+	 *
+	 * @return True or false
+	 *
+	 * @throws TskCoreException The exception is thrown if there is an issue
+	 *                          querying the case database.
+	 * @deprecated Use artifactExists(Content content, BlackboardArtifact.Type
+	 * artifactType, Collection\<BlackboardAttribute\> attributes) instead.
+	 */
+	@Deprecated
+	public boolean artifactExists(Content content, BlackboardArtifact.ARTIFACT_TYPE artifactType, Collection<BlackboardAttribute> attributes) throws TskCoreException {
+		return artifactExists(content, getArtifactType(artifactType.getTypeID()), attributes);
+	}
+
 	/**
 	 * Determine if the expected attributes can all be found in the supplied
 	 * file attributes list.
@@ -1098,7 +2118,6 @@ private boolean attributesMatch(Collection<BlackboardAttribute> fileAttributesLi
 
 	}
 
-
 	/**
 	 * A Blackboard exception.
 	 */
@@ -1217,6 +2236,9 @@ public DataArtifact newDataArtifact(BlackboardArtifact.Type artifactType, long s
 					statement.setLong(1, artifact_obj_id);
 					statement.setLong(2, osAccountObjId);
 					connection.executeUpdate(statement);
+					
+					// Add an OS account instance 
+					caseDb.getOsAccountManager().newOsAccountInstance(osAccountObjId, dataSourceObjId, OsAccountInstance.OsAccountInstanceType.ACCESSED, connection);
 				}
 
 				// if attributes are provided, add them to the artifact.
@@ -1232,24 +2254,101 @@ public DataArtifact newDataArtifact(BlackboardArtifact.Type artifactType, long s
 	}
 
 	/**
-	 * Event published by SleuthkitCase when one or more artifacts are posted. A
-	 * posted artifact is complete (all attributes have been added) and ready
-	 * for further processing.
+	 * Returns a list of BlackboardArtifacts of the given artifact type and
+	 * source object id.
+	 *
+	 * @param artifactType The artifact type.
+	 * @param sourceObjId  The artifact parent source id (obj_id)
+	 *
+	 * @return A list of BlackboardArtifacts for the given parameters.
+	 *
+	 * @throws TskCoreException
+	 */
+	List<BlackboardArtifact> getArtifactsBySourceId(BlackboardArtifact.Type artifactType, long sourceObjId) throws TskCoreException {
+		String whereClause = String.format("artifacts.obj_id = %d", sourceObjId);
+		return getArtifactsWhere(artifactType, whereClause);
+	}
+
+	/**
+	 * Returns a list of artifacts of the given type.
+	 *
+	 * @param artifactType The type of artifacts to retrieve.
+	 *
+	 * @return A list of artifacts of the given type.
+	 *
+	 * @throws TskCoreException
+	 */
+	List<BlackboardArtifact> getArtifactsByType(BlackboardArtifact.Type artifactType) throws TskCoreException {
+		List<BlackboardArtifact> artifacts = new ArrayList<>();
+		if (artifactType.getCategory() == BlackboardArtifact.Category.ANALYSIS_RESULT) {
+			artifacts.addAll(getAnalysisResultsByType(artifactType.getTypeID()));
+		} else {
+			artifacts.addAll(getDataArtifacts(artifactType.getTypeID()));
+		}
+		return artifacts;
+	}
+
+	/**
+	 * Returns a list of artifacts for the given artifact type with the given
+	 * where clause.
+	 *
+	 * The Where clause will be added to the basic query for retrieving
+	 * DataArtifacts or AnalysisResults from the DB. The where clause should not
+	 * include the artifact type. This method will add the artifact type to the
+	 * where clause.
+	 *
+	 * @param artifactType The artifact type.
+	 * @param whereClause  Additional where clause.
+	 *
+	 * @return A list of BlackboardArtifacts of the given type with the given
+	 *         conditional.
+	 *
+	 * @throws TskCoreException
+	 */
+	private List<BlackboardArtifact> getArtifactsWhere(BlackboardArtifact.Type artifactType, String whereClause) throws TskCoreException {
+		List<BlackboardArtifact> artifacts = new ArrayList<>();
+		String whereWithType = whereClause + " AND artifacts.artifact_type_id = " + artifactType.getTypeID();
+
+		if (artifactType.getCategory() == BlackboardArtifact.Category.ANALYSIS_RESULT) {
+			artifacts.addAll(getAnalysisResultsWhere(whereWithType));
+		} else {
+			artifacts.addAll(getDataArtifactsWhere(whereWithType));
+		}
+
+		return artifacts;
+	}
+
+	/**
+	 * An event published by SleuthkitCase when one or more artifacts are
+	 * posted. Posted artifacts should be complete (all attributes have been
+	 * added) and ready for further analysis.
 	 */
 	final public class ArtifactsPostedEvent {
 
 		private final String moduleName;
 		private final ImmutableSet<BlackboardArtifact.Type> artifactTypes;
 		private final ImmutableSet<BlackboardArtifact> artifacts;
+		private final Long ingestJobId;
 
-		private ArtifactsPostedEvent(Collection<BlackboardArtifact> artifacts, String moduleName) throws BlackboardException {
+		/**
+		 * Constructs an event published by SleuthkitCase when one or more
+		 * artifacts are posted. Posted artifacts should be complete (all
+		 * attributes have been added) and ready for further analysis.
+		 *
+		 * @param artifacts   The artifacts. 
+		 * @param moduleName  The display name of the module posting the
+		 *                    artifacts.
+		 * @param ingestJobId The numeric identifier of the ingest job within
+		 *                    which the artifacts were posted, may be null.
+		 */
+		private ArtifactsPostedEvent(Collection<BlackboardArtifact> artifacts, String moduleName, Long ingestJobId) throws BlackboardException {
 			Set<Integer> typeIDS = artifacts.stream()
 					.map(BlackboardArtifact::getArtifactTypeID)
 					.collect(Collectors.toSet());
 			Set<BlackboardArtifact.Type> types = new HashSet<>();
 			for (Integer typeID : typeIDS) {
 				try {
-					types.add(caseDb.getArtifactType(typeID));
+					types.add(getArtifactType(typeID));
 				} catch (TskCoreException tskCoreException) {
 					throw new BlackboardException("Error getting artifact type by id.", tskCoreException);
 				}
@@ -1257,13 +2356,25 @@ private ArtifactsPostedEvent(Collection<BlackboardArtifact> artifacts, String mo
 			artifactTypes = ImmutableSet.copyOf(types);
 			this.artifacts = ImmutableSet.copyOf(artifacts);
 			this.moduleName = moduleName;
-
+			this.ingestJobId = ingestJobId;
 		}
 
+		/**
+		 * Gets the posted artifacts.
+		 *
+		 * @return The artifacts (data artifacts and/or analysis results).
+		 */
 		public Collection<BlackboardArtifact> getArtifacts() {
 			return ImmutableSet.copyOf(artifacts);
 		}
 
+		/**
+		 * Gets the posted artifacts of a given type.
+		 *
+		 * @param artifactType The artifact type.
+		 *
+		 * @return The artifacts, if any.
+		 */
 		public Collection<BlackboardArtifact> getArtifacts(BlackboardArtifact.Type artifactType) {
 			Set<BlackboardArtifact> tempSet = artifacts.stream()
 					.filter(artifact -> artifact.getArtifactTypeID() == artifactType.getTypeID())
@@ -1271,12 +2382,33 @@ public Collection<BlackboardArtifact> getArtifacts(BlackboardArtifact.Type artif
 			return ImmutableSet.copyOf(tempSet);
 		}
 
+		/**
+		 * Gets the display name of the module that posted the artifacts.
+		 *
+		 * @return The display name.
+		 */
 		public String getModuleName() {
 			return moduleName;
 		}
 
+		/**
+		 * Gets the types of artifacts that were posted.
+		 *
+		 * @return The types.
+		 */
 		public Collection<BlackboardArtifact.Type> getArtifactTypes() {
 			return ImmutableSet.copyOf(artifactTypes);
 		}
+
+		/**
+		 * Gets the numeric identifier of the ingest job for which the artifacts
+		 * were posted.
+		 *
+		 * @return The ingest job ID, may be null.
+		 */
+		public Optional<Long> getIngestJobId() {
+			return Optional.ofNullable(ingestJobId);
+		}
+
 	}
 }
diff --git a/bindings/java/src/org/sleuthkit/datamodel/BlackboardArtifact.java b/bindings/java/src/org/sleuthkit/datamodel/BlackboardArtifact.java
index a1d8c218a8656ad86d496b3887db607fc857860f..d7c49c54ff291bfe75e0e57885bfeec6347a59d5 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/BlackboardArtifact.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/BlackboardArtifact.java
@@ -18,12 +18,12 @@
  */
 package org.sleuthkit.datamodel;
 
+import com.google.common.annotations.Beta;
 import java.io.Serializable;
 import java.io.UnsupportedEncodingException;
 import java.sql.SQLException;
 import java.text.MessageFormat;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
@@ -39,7 +39,6 @@
 import org.sleuthkit.datamodel.BlackboardArtifact.ARTIFACT_TYPE;
 import org.sleuthkit.datamodel.BlackboardAttribute.ATTRIBUTE_TYPE;
 import org.sleuthkit.datamodel.SleuthkitCase.CaseDbTransaction;
-import org.sleuthkit.datamodel.SleuthkitCase.ObjectInfo;
 
 /**
  * An artifact that has been posted to the blackboard. Artifacts store analysis
@@ -52,7 +51,7 @@
  * IMPORTANT NOTE: No more than one attribute of a given type should be added to
  * an artifact. It is undefined about which will be used.
  */
-public class BlackboardArtifact implements Content {
+public abstract class BlackboardArtifact implements Content {
 
 	private static final ResourceBundle bundle = ResourceBundle.getBundle("org.sleuthkit.datamodel.Bundle");
 	private final long artifactId;
@@ -179,7 +178,8 @@ public long getObjectID() {
 	 *
 	 * @return The data source object id, may be null.
 	 */
-	Long getDataSourceObjectID() {
+	@Beta
+	public Long getDataSourceObjectID() {
 		return this.dataSourceObjId;
 	}
 
@@ -191,18 +191,20 @@ Long getDataSourceObjectID() {
 	public int getArtifactTypeID() {
 		return this.artifactTypeId;
 	}
-	
+
 	/**
 	 * Gets the artifact type for this artifact.
-	 * 
+	 *
 	 * @return The artifact type.
+	 * 
+	 * @throws TskCoreException
 	 */
 	public BlackboardArtifact.Type getType() throws TskCoreException {
 		BlackboardArtifact.Type standardTypesValue = BlackboardArtifact.Type.STANDARD_TYPES.get(getArtifactTypeID());
 		if (standardTypesValue != null) {
 			return standardTypesValue;
 		} else {
-			return getSleuthkitCase().getArtifactType(getArtifactTypeID());
+			return getSleuthkitCase().getBlackboard().getArtifactType(getArtifactTypeID());
 		}
 	}
 
@@ -234,44 +236,112 @@ public String getDisplayName() {
 	public String getShortDescription() throws TskCoreException {
 		BlackboardAttribute attr = null;
 		StringBuilder shortDescription = new StringBuilder("");
-		switch (ARTIFACT_TYPE.fromID(artifactTypeId)) {
-			case TSK_WEB_BOOKMARK:  //web_bookmark, web_cookie, web_download, and web_history are the same attribute for now
-			case TSK_WEB_COOKIE:
-			case TSK_WEB_DOWNLOAD:
-			case TSK_WEB_HISTORY:
-				attr = getAttribute(new BlackboardAttribute.Type(ATTRIBUTE_TYPE.TSK_DOMAIN));
-				break;
-			case TSK_KEYWORD_HIT:
-				attr = getAttribute(new BlackboardAttribute.Type(ATTRIBUTE_TYPE.TSK_KEYWORD_PREVIEW));
-				break;
-			case TSK_DEVICE_ATTACHED:
-				attr = getAttribute(new BlackboardAttribute.Type(ATTRIBUTE_TYPE.TSK_DEVICE_ID));
-				break;
-			case TSK_CONTACT: //contact, message, and calllog are the same attributes for now
-			case TSK_MESSAGE:
-			case TSK_CALLLOG:
-				//get the first of these attributes which exists and is non null
-				final ATTRIBUTE_TYPE[] typesThatCanHaveName = {ATTRIBUTE_TYPE.TSK_NAME,
-					ATTRIBUTE_TYPE.TSK_PHONE_NUMBER,
-					ATTRIBUTE_TYPE.TSK_PHONE_NUMBER_FROM,
-					ATTRIBUTE_TYPE.TSK_PHONE_NUMBER_TO,
-					ATTRIBUTE_TYPE.TSK_PHONE_NUMBER_HOME,
-					ATTRIBUTE_TYPE.TSK_PHONE_NUMBER_MOBILE,
-					ATTRIBUTE_TYPE.TSK_PHONE_NUMBER_OFFICE,
-					ATTRIBUTE_TYPE.TSK_EMAIL,
-					ATTRIBUTE_TYPE.TSK_EMAIL_FROM,
-					ATTRIBUTE_TYPE.TSK_EMAIL_TO,
-					ATTRIBUTE_TYPE.TSK_EMAIL_HOME,
-					ATTRIBUTE_TYPE.TSK_EMAIL_OFFICE}; //in the order we want to use them
-				for (ATTRIBUTE_TYPE t : typesThatCanHaveName) {
-					attr = getAttribute(new BlackboardAttribute.Type(t));
-					if (attr != null && !attr.getDisplayString().isEmpty()) {
-						break;
+		if (BlackboardArtifact.Type.STANDARD_TYPES.get(artifactTypeId) != null) {
+			switch (ARTIFACT_TYPE.fromID(artifactTypeId)) {
+				case TSK_WIFI_NETWORK_ADAPTER:
+					attr = getAttribute(new BlackboardAttribute.Type(ATTRIBUTE_TYPE.TSK_MAC_ADDRESS));
+					break;
+				case TSK_WIFI_NETWORK:
+					attr = getAttribute(new BlackboardAttribute.Type(ATTRIBUTE_TYPE.TSK_SSID));
+					break;
+				case TSK_REMOTE_DRIVE:
+					attr = getAttribute(new BlackboardAttribute.Type(ATTRIBUTE_TYPE.TSK_REMOTE_PATH));
+					break;
+				case TSK_SERVICE_ACCOUNT:
+				case TSK_SCREEN_SHOTS:
+				case TSK_DELETED_PROG:
+				case TSK_METADATA:
+				case TSK_OS_INFO:
+				case TSK_PROG_NOTIFICATIONS:
+				case TSK_PROG_RUN:
+				case TSK_RECENT_OBJECT:
+				case TSK_USER_DEVICE_EVENT:
+				case TSK_WEB_SEARCH_QUERY:
+					attr = getAttribute(new BlackboardAttribute.Type(ATTRIBUTE_TYPE.TSK_PROG_NAME));
+					break;
+				case TSK_BLUETOOTH_PAIRING:
+					attr = getAttribute(new BlackboardAttribute.Type(ATTRIBUTE_TYPE.TSK_DEVICE_NAME));
+					break;
+				case TSK_ACCOUNT:
+					attr = getAttribute(new BlackboardAttribute.Type(ATTRIBUTE_TYPE.TSK_ID));
+					if (attr == null) {
+						attr = getAttribute(new BlackboardAttribute.Type(ATTRIBUTE_TYPE.TSK_CARD_NUMBER));
 					}
-				}
-				break;
-			default:
-				break;
+					break;
+				case TSK_WEB_CATEGORIZATION:
+				case TSK_BLUETOOTH_ADAPTER:
+				case TSK_GPS_AREA:
+				case TSK_GPS_BOOKMARK:
+				case TSK_GPS_LAST_KNOWN_LOCATION:
+				case TSK_GPS_ROUTE:
+				case TSK_GPS_SEARCH:
+				case TSK_GPS_TRACK:
+				case TSK_WEB_FORM_AUTOFILL:
+					attr = getAttribute(new BlackboardAttribute.Type(ATTRIBUTE_TYPE.TSK_NAME));
+					break;
+				case TSK_WEB_ACCOUNT_TYPE:
+					attr = getAttribute(new BlackboardAttribute.Type(ATTRIBUTE_TYPE.TSK_TEXT));
+					break;
+				case TSK_HASHSET_HIT:
+				case TSK_INTERESTING_ARTIFACT_HIT:
+				case TSK_INTERESTING_FILE_HIT:
+				case TSK_INTERESTING_ITEM:
+				case TSK_YARA_HIT:
+					attr = getAttribute(new BlackboardAttribute.Type(ATTRIBUTE_TYPE.TSK_SET_NAME));
+					break;
+				case TSK_ENCRYPTION_DETECTED:
+				case TSK_ENCRYPTION_SUSPECTED:
+				case TSK_OBJECT_DETECTED:
+				case TSK_USER_CONTENT_SUSPECTED:
+				case TSK_VERIFICATION_FAILED:
+					attr = getAttribute(new BlackboardAttribute.Type(ATTRIBUTE_TYPE.TSK_COMMENT));
+					break;
+				case TSK_DATA_SOURCE_USAGE:
+				case TSK_CALENDAR_ENTRY:
+					attr = getAttribute(new BlackboardAttribute.Type(ATTRIBUTE_TYPE.TSK_DESCRIPTION));
+					break;
+				case TSK_WEB_BOOKMARK:  //web_bookmark, web_cookie, web_download, and web_history are the same attribute for now
+				case TSK_WEB_COOKIE:
+				case TSK_WEB_DOWNLOAD:
+				case TSK_WEB_HISTORY:
+				case TSK_WEB_CACHE:
+					attr = getAttribute(new BlackboardAttribute.Type(ATTRIBUTE_TYPE.TSK_DOMAIN));
+					break;
+				case TSK_KEYWORD_HIT:
+					attr = getAttribute(new BlackboardAttribute.Type(ATTRIBUTE_TYPE.TSK_KEYWORD_PREVIEW));
+					break;
+				case TSK_DEVICE_ATTACHED:
+					attr = getAttribute(new BlackboardAttribute.Type(ATTRIBUTE_TYPE.TSK_DEVICE_ID));
+					break;
+				case TSK_CONTACT: //contact, message, and calllog are the same attributes for now
+				case TSK_MESSAGE:
+				case TSK_CALLLOG:
+				case TSK_SPEED_DIAL_ENTRY:
+				case TSK_WEB_FORM_ADDRESS:
+					//get the first of these attributes which exists and is non null
+					final ATTRIBUTE_TYPE[] typesThatCanHaveName = {ATTRIBUTE_TYPE.TSK_NAME,
+						ATTRIBUTE_TYPE.TSK_PHONE_NUMBER,
+						ATTRIBUTE_TYPE.TSK_PHONE_NUMBER_FROM,
+						ATTRIBUTE_TYPE.TSK_PHONE_NUMBER_TO,
+						ATTRIBUTE_TYPE.TSK_PHONE_NUMBER_HOME,
+						ATTRIBUTE_TYPE.TSK_PHONE_NUMBER_MOBILE,
+						ATTRIBUTE_TYPE.TSK_PHONE_NUMBER_OFFICE,
+						ATTRIBUTE_TYPE.TSK_EMAIL,
+						ATTRIBUTE_TYPE.TSK_EMAIL_FROM,
+						ATTRIBUTE_TYPE.TSK_EMAIL_TO,
+						ATTRIBUTE_TYPE.TSK_EMAIL_HOME,
+						ATTRIBUTE_TYPE.TSK_EMAIL_OFFICE,
+						ATTRIBUTE_TYPE.TSK_LOCATION}; //in the order we want to use them
+					for (ATTRIBUTE_TYPE t : typesThatCanHaveName) {
+						attr = getAttribute(new BlackboardAttribute.Type(t));
+						if (attr != null && !attr.getDisplayString().isEmpty()) {
+							break;
+						}
+					}
+					break;
+				default:
+					break;
+			}
 		}
 		if (attr != null) {
 			shortDescription.append(attr.getAttributeType().getDisplayName()).append(": ").append(attr.getDisplayString());
@@ -351,15 +421,27 @@ public void addAttribute(BlackboardAttribute attribute) throws TskCoreException
 	public List<BlackboardAttribute> getAttributes() throws TskCoreException {
 		ArrayList<BlackboardAttribute> attributes;
 		if (false == loadedCacheFromDb) {
-			attributes = getSleuthkitCase().getBlackboardAttributes(this);
+			attributes = getSleuthkitCase().getBlackboard().getBlackboardAttributes(this);
 			attrsCache.clear();
 			attrsCache.addAll(attributes);
 			loadedCacheFromDb = true;
 		} else {
-			attributes = new ArrayList<BlackboardAttribute>(attrsCache);
+			attributes = new ArrayList<>(attrsCache);
 		}
 		return attributes;
 	}
+	
+	/**
+	 * Set all attributes at once.
+	 * Will overwrite any already loaded attributes.
+	 * 
+	 * @param attributes The set of attributes for this artifact.
+	 */
+	void setAttributes(List<BlackboardAttribute> attributes) {
+		attrsCache.clear();
+		attrsCache.addAll(attributes);
+		loadedCacheFromDb = true;
+	}
 
 	/**
 	 * Gets the attribute of this artifact that matches a given type.
@@ -467,16 +549,8 @@ public String getUniquePath() throws TskCoreException {
 
 	@Override
 	public Content getParent() throws TskCoreException {
-		// It is possible that multiple threads could be doing this calculation
-		// simultaneously, but it's worth the potential extra processing to prevent deadlocks.
 		if (parent == null) {
-			ObjectInfo parentInfo;
-			parentInfo = getSleuthkitCase().getParentInfo(this);
-			if (parentInfo == null) {
-				parent = null;
-			} else {
-				parent = getSleuthkitCase().getContentById(parentInfo.getId());
-			}
+			parent = getSleuthkitCase().getContentById(sourceObjId);
 		}
 		return parent;
 	}
@@ -503,7 +577,7 @@ public List<AnalysisResult> getAllAnalysisResults() throws TskCoreException {
 	public List<DataArtifact> getAllDataArtifacts() throws TskCoreException {
 		return sleuthkitCase.getBlackboard().getDataArtifactsBySource(artifactObjId);
 	}
-	
+
 	@Override
 	public Score getAggregateScore() throws TskCoreException {
 		return sleuthkitCase.getScoringManager().getAggregateScore(artifactObjId);
@@ -699,7 +773,8 @@ public Set<String> getHashSetNames() throws TskCoreException {
 	 *         looked up from this)
 	 *
 	 * @throws TskCoreException if critical error occurred within tsk core
-	 * @deprecated Use the Blackboard to create Data Artifacts and Analysis Results.
+	 * @deprecated Use the Blackboard to create Data Artifacts and Analysis
+	 * Results.
 	 */
 	@Deprecated
 	@Override
@@ -709,9 +784,12 @@ public BlackboardArtifact newArtifact(int artifactTypeID) throws TskCoreExceptio
 
 	@Override
 	public AnalysisResultAdded newAnalysisResult(BlackboardArtifact.Type artifactType, Score score, String conclusion, String configuration, String justification, Collection<BlackboardAttribute> attributesList) throws TskCoreException {
+		// Get the ID before starting the transaction
+		long dataSourceId = this.getDataSource().getId();
+
 		CaseDbTransaction trans = sleuthkitCase.beginTransaction();
 		try {
-			AnalysisResultAdded resultAdded = sleuthkitCase.getBlackboard().newAnalysisResult(artifactType, this.getObjectID(), this.getDataSource().getId(), score, conclusion, configuration, justification, attributesList, trans);
+			AnalysisResultAdded resultAdded = sleuthkitCase.getBlackboard().newAnalysisResult(artifactType, this.getId(), dataSourceId, score, conclusion, configuration, justification, attributesList, trans);
 
 			trans.commit();
 			return resultAdded;
@@ -725,7 +803,7 @@ public AnalysisResultAdded newAnalysisResult(BlackboardArtifact.Type artifactTyp
 	public AnalysisResultAdded newAnalysisResult(BlackboardArtifact.Type artifactType, Score score, String conclusion, String configuration, String justification, Collection<BlackboardAttribute> attributesList, long dataSourceId) throws TskCoreException {
 		CaseDbTransaction trans = sleuthkitCase.beginTransaction();
 		try {
-			AnalysisResultAdded resultAdded = sleuthkitCase.getBlackboard().newAnalysisResult(artifactType, this.getObjectID(), dataSourceId, score, conclusion, configuration, justification, attributesList, trans);
+			AnalysisResultAdded resultAdded = sleuthkitCase.getBlackboard().newAnalysisResult(artifactType, this.getId(), dataSourceId, score, conclusion, configuration, justification, attributesList, trans);
 
 			trans.commit();
 			return resultAdded;
@@ -739,12 +817,12 @@ public AnalysisResultAdded newAnalysisResult(BlackboardArtifact.Type artifactTyp
 	public DataArtifact newDataArtifact(BlackboardArtifact.Type artifactType, Collection<BlackboardAttribute> attributesList, Long osAccountId) throws TskCoreException {
 		throw new TskCoreException("Cannot create data artifact of an artifact. Not supported.");
 	}
-	
+
 	@Override
 	public DataArtifact newDataArtifact(BlackboardArtifact.Type artifactType, Collection<BlackboardAttribute> attributesList, Long osAccountId, long dataSourceId) throws TskCoreException {
 		throw new TskCoreException("Cannot create data artifact of an artifact. Not supported.");
 	}
-	
+
 	@Override
 	public DataArtifact newDataArtifact(BlackboardArtifact.Type artifactType, Collection<BlackboardAttribute> attributesList) throws TskCoreException {
 		return newDataArtifact(artifactType, attributesList, null);
@@ -759,7 +837,8 @@ public DataArtifact newDataArtifact(BlackboardArtifact.Type artifactType, Collec
 	 *         looked up from this)
 	 *
 	 * @throws TskCoreException if critical error occurred within tsk core
-	 * @deprecated Use the Blackboard to create Data Artifacts and Analysis Results.
+	 * @deprecated Use the Blackboard to create Data Artifacts and Analysis
+	 * Results.
 	 */
 	@Deprecated
 	@Override
@@ -1012,7 +1091,10 @@ public static final class Type implements Serializable {
 		/**
 		 * An meta-artifact to call attention to a file deemed to be
 		 * interesting.
+		 *
+		 * @deprecated Use TSK_INTERESTING_ITEM instead.
 		 */
+		@Deprecated
 		public static final Type TSK_INTERESTING_FILE_HIT = new BlackboardArtifact.Type(12, "TSK_INTERESTING_FILE_HIT", bundle.getString("BlackboardArtifact.tskInterestingFileHit.text"), Category.ANALYSIS_RESULT);
 
 		/**
@@ -1121,7 +1203,10 @@ public static final class Type implements Serializable {
 		/**
 		 * An meta-artifact to call attention to an artifact deemed to be
 		 * interesting.
+		 *
+		 * @deprecated Use TSK_INTERESTING_ITEM instead.
 		 */
+		@Deprecated
 		public static final Type TSK_INTERESTING_ARTIFACT_HIT = new BlackboardArtifact.Type(35, "TSK_INTERESTING_ARTIFACT_HIT", bundle.getString("BlackboardArtifact.tskInterestingArtifactHit.text"), Category.ANALYSIS_RESULT);
 
 		/**
@@ -1288,7 +1373,47 @@ public static final class Type implements Serializable {
 		 */
 		public static final Type TSK_WEB_CATEGORIZATION = new BlackboardArtifact.Type(68, "TSK_WEB_CATEGORIZATION", bundle.getString("BlackboardArtifact.tskWebCategorization.text"), Category.ANALYSIS_RESULT);
 
-		// NOTE: When adding a new standard BlackboardArtifact.Type, add the instance and then add to the STANDARD_TYPES map.
+		/**
+		 * Indicates that the file or artifact was previously seen in another
+		 * Autopsy case.
+		 */
+		public static final Type TSK_PREVIOUSLY_SEEN = new BlackboardArtifact.Type(69, "TSK_PREVIOUSLY_SEEN", bundle.getString("BlackboardArtifact.tskPreviouslySeen.text"), Category.ANALYSIS_RESULT);
+
+		/**
+		 * Indicates that the file or artifact was previously unseen in another
+		 * Autopsy case.
+		 */
+		public static final Type TSK_PREVIOUSLY_UNSEEN = new BlackboardArtifact.Type(70, "TSK_PREVIOUSLY_UNSEEN", bundle.getString("BlackboardArtifact.tskPreviouslyUnseen.text"), Category.ANALYSIS_RESULT);
+
+		/**
+		 * Indicates that the file or artifact was previously tagged as
+		 * "Notable" in another Autopsy case.
+		 */
+		public static final Type TSK_PREVIOUSLY_NOTABLE = new BlackboardArtifact.Type(71, "TSK_PREVIOUSLY_NOTABLE", bundle.getString("BlackboardArtifact.tskPreviouslyNotable.text"), Category.ANALYSIS_RESULT);
+
+		/**
+		 * An meta-artifact to call attention to an item deemed to be
+		 * interesting.
+		 */
+		public static final Type TSK_INTERESTING_ITEM = new BlackboardArtifact.Type(72, "TSK_INTERESTING_ITEM", bundle.getString("BlackboardArtifact.tskInterestingItem.text"), Category.ANALYSIS_RESULT);
+		
+		/**
+		 * Malware artifact.
+		 */
+		public static final Type TSK_MALWARE = new BlackboardArtifact.Type(73, "TSK_MALWARE", bundle.getString("BlackboardArtifact.tskMalware.text"), Category.ANALYSIS_RESULT);
+		/*
+		 * IMPORTANT!
+		 *
+		 * Until BlackboardArtifact.ARTIFACT_TYPE is deprecated and/or removed,
+		 * new standard artifact types need to be added to both
+		 * BlackboardArtifact.ARTIFACT_TYPE and
+		 * BlackboardArtifact.Type.STANDARD_TYPES.
+		 *
+		 * Also, ensure that new types have a one line JavaDoc description and
+		 * are added to the standard artifacts catalog (artifact_catalog.dox).
+		 *
+		 */
+
 		/**
 		 * All standard artifact types with ids mapped to the type.
 		 */
@@ -1303,7 +1428,6 @@ public static final class Type implements Serializable {
 				TSK_KEYWORD_HIT,
 				TSK_HASHSET_HIT,
 				TSK_DEVICE_ATTACHED,
-				TSK_INTERESTING_FILE_HIT,
 				TSK_EMAIL_MSG,
 				TSK_EXTRACTED_TEXT,
 				TSK_WEB_SEARCH_QUERY,
@@ -1322,7 +1446,6 @@ public static final class Type implements Serializable {
 				TSK_PROG_RUN,
 				TSK_ENCRYPTION_DETECTED,
 				TSK_EXT_MISMATCH_DETECTED,
-				TSK_INTERESTING_ARTIFACT_HIT,
 				TSK_GPS_ROUTE,
 				TSK_REMOTE_DRIVE,
 				TSK_FACE_DETECTED,
@@ -1353,14 +1476,19 @@ public static final class Type implements Serializable {
 				TSK_USER_DEVICE_EVENT,
 				TSK_YARA_HIT,
 				TSK_GPS_AREA,
-				TSK_WEB_CATEGORIZATION
+				TSK_WEB_CATEGORIZATION,
+				TSK_PREVIOUSLY_SEEN,
+				TSK_PREVIOUSLY_UNSEEN,
+				TSK_PREVIOUSLY_NOTABLE,
+				TSK_INTERESTING_ITEM,
+				TSK_MALWARE
 		).collect(Collectors.toMap(type -> type.getTypeID(), type -> type)));
 
 		private final String typeName;
 		private final int typeID;
 		private final String displayName;
 		private final Category category;
-		
+
 		/**
 		 * Constructs a custom artifact type.
 		 *
@@ -1543,7 +1671,10 @@ public enum ARTIFACT_TYPE implements SleuthkitVisitableItem {
 		/**
 		 * An meta-artifact to call attention to a file deemed to be
 		 * interesting.
+		 *
+		 * @deprecated Use TSK_INTERESTING_ITEM instead.
 		 */
+		@Deprecated
 		TSK_INTERESTING_FILE_HIT(12, "TSK_INTERESTING_FILE_HIT", //NON-NLS
 				bundle.getString("BlackboardArtifact.tskInterestingFileHit.text"), Category.ANALYSIS_RESULT), ///< an interesting/notable file hit
 		/**
@@ -1678,7 +1809,10 @@ public enum ARTIFACT_TYPE implements SleuthkitVisitableItem {
 		/**
 		 * An meta-artifact to call attention to an artifact deemed to be
 		 * interesting.
+		 *
+		 * @deprecated Use TSK_INTERESTING_ITEM instead.
 		 */
+		@Deprecated
 		TSK_INTERESTING_ARTIFACT_HIT(35, "TSK_INTERESTING_ARTIFACT_HIT", //NON-NLS
 				bundle.getString("BlackboardArtifact.tskInterestingArtifactHit.text"), Category.ANALYSIS_RESULT),
 		/**
@@ -1849,13 +1983,48 @@ public enum ARTIFACT_TYPE implements SleuthkitVisitableItem {
 		TSK_GPS_AREA(67, "TSK_GPS_AREA",
 				bundle.getString("BlackboardArtifact.tskGPSArea.text"), Category.DATA_ARTIFACT),
 		TSK_WEB_CATEGORIZATION(68, "TSK_WEB_CATEGORIZATION",
-				bundle.getString("BlackboardArtifact.tskWebCategorization.text"), Category.ANALYSIS_RESULT),;
-
+				bundle.getString("BlackboardArtifact.tskWebCategorization.text"), Category.ANALYSIS_RESULT),
+		/**
+		 * Indicates that the file or artifact was previously seen in another
+		 * Autopsy case.
+		 */
+		TSK_PREVIOUSLY_SEEN(69, "TSK_PREVIOUSLY_SEEN",
+				bundle.getString("BlackboardArtifact.tskPreviouslySeen.text"), Category.ANALYSIS_RESULT),
+		/**
+		 * Indicates that the file or artifact was previously unseen in another
+		 * Autopsy case.
+		 */
+		TSK_PREVIOUSLY_UNSEEN(70, "TSK_PREVIOUSLY_UNSEEN",
+				bundle.getString("BlackboardArtifact.tskPreviouslyUnseen.text"), Category.ANALYSIS_RESULT),
+		/**
+		 * Indicates that the file or artifact was previously tagged as
+		 * "Notable" in another Autopsy case.
+		 */
+		TSK_PREVIOUSLY_NOTABLE(71, "TSK_PREVIOUSLY_NOTABLE",
+				bundle.getString("BlackboardArtifact.tskPreviouslyNotable.text"), Category.ANALYSIS_RESULT),
+		/**
+		 * An meta-artifact to call attention to an item deemed to be
+		 * interesting.
+		 */
+		TSK_INTERESTING_ITEM(72, "TSK_INTERESTING_ITEM", //NON-NLS
+				bundle.getString("BlackboardArtifact.tskInterestingItem.text"), Category.ANALYSIS_RESULT),
+		/**
+		 * Malware artifact.
+		 */
+		TSK_MALWARE(73, "TSK_MALWARE", //NON-NLS
+				bundle.getString("BlackboardArtifact.tskMalware.text"), Category.ANALYSIS_RESULT);
 		/*
-		 * To developers: For each new artifact, ensure that: - The enum value
-		 * has 1-line JavaDoc description - The artifact catalog
-		 * (artifact_catalog.dox) is updated to reflect the attributes it uses
+		 * IMPORTANT!
+		 *
+		 * Until BlackboardArtifact.ARTIFACT_TYPE is deprecated and/or removed,
+		 * new standard artifact types need to be added to both
+		 * BlackboardArtifact.ARTIFACT_TYPE and
+		 * BlackboardArtifact.Type.STANDARD_TYPES.
+		 *
+		 * Also, ensure that new types have a one line JavaDoc description and
+		 * are added to the standard artifacts catalog (artifact_catalog.dox).
 		 */
+
 		private final String label;
 		private final int typeId;
 		private final String displayName;
diff --git a/bindings/java/src/org/sleuthkit/datamodel/BlackboardAttribute.java b/bindings/java/src/org/sleuthkit/datamodel/BlackboardAttribute.java
index 5696355d4cd0f109aa1d5887208f19d78342418c..ae799531abce5e222b02b450475e15ee694caae9 100755
--- a/bindings/java/src/org/sleuthkit/datamodel/BlackboardAttribute.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/BlackboardAttribute.java
@@ -455,7 +455,7 @@ public static final class Type implements Serializable {
 		public static final Type TSK_ENTROPY = new Type(29, "TSK_ENTROPY", bundle.getString("BlackboardAttribute.tskEntropy.text"), TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.DOUBLE);
 
 		// TSK_HASHSET_NAME (id: 30) has been deprecated.  Please use TSK_SET_NAME instead.
-		// TSK_INTERESTING_FILE (id: 31) has been deprecated.  Please use TSK_INTERESTING_FILE_HIT instead.
+		// TSK_INTERESTING_FILE (id: 31) has been deprecated.  Please use TSK_INTERESTING_ITEM analysis result instead.
 		public static final Type TSK_REFERRER = new Type(32, "TSK_REFERRER", bundle.getString("BlackboardAttribute.tskReferrer.text"), TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.STRING);
 		public static final Type TSK_DATETIME_ACCESSED = new Type(33, "TSK_DATETIME_ACCESSED", bundle.getString("BlackboardAttribute.tskDateTimeAccessed.text"), TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.DATETIME);
 		public static final Type TSK_IP_ADDRESS = new Type(34, "TSK_IP_ADDRESS", bundle.getString("BlackboardAttribute.tskIpAddress.text"), TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.STRING);
@@ -609,11 +609,14 @@ public static final class Type implements Serializable {
 		public static final Type TSK_HOST = new Type(154, "TSK_HOST", bundle.getString("BlackboardAttribute.tskHost.text"), TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.STRING);
 		public static final Type TSK_HOME_DIR = new Type(155, "TSK_HOME_DIR", bundle.getString("BlackboardAttribute.tskHomeDir.text"), TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.STRING);
 		public static final Type TSK_IS_ADMIN = new Type(156, "TSK_IS_ADMIN", bundle.getString("BlackboardAttribute.tskIsAdmin.text"), TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.INTEGER);
+		public static final Type TSK_CORRELATION_TYPE = new Type(157, "TSK_CORRELATION_TYPE", bundle.getString("BlackboardAttribute.tskCorrelationType.text"), TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.STRING);
+		public static final Type TSK_CORRELATION_VALUE = new Type(158, "TSK_CORRELATION_VALUE", bundle.getString("BlackboardAttribute.tskCorrelationValue.text"), TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.STRING);
+		public static final Type TSK_OTHER_CASES = new Type(159, "TSK_OTHER_CASES", bundle.getString("BlackboardAttribute.tskOtherCases.text"), TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.STRING);
+
 		// NOTE: When adding a new standard BlackboardAttribute.Type, add the instance and then add to the STANDARD_TYPES list.
 		/**
 		 * A list of all the standard attribute types.
 		 */
-		
 		static final List<Type> STANDARD_TYPES = Collections.unmodifiableList(Arrays.asList(
 				TSK_URL,
 				TSK_DATETIME,
@@ -761,7 +764,10 @@ public static final class Type implements Serializable {
 				TSK_REALM,
 				TSK_HOST,
 				TSK_HOME_DIR,
-				TSK_IS_ADMIN
+				TSK_IS_ADMIN,
+				TSK_CORRELATION_TYPE,
+				TSK_CORRELATION_VALUE,
+				TSK_OTHER_CASES
 		));
 
 		private static final long serialVersionUID = 1L;
@@ -1106,7 +1112,7 @@ public enum ATTRIBUTE_TYPE {
 				bundle.getString("BlackboardAttribute.tskHashsetName.text"),
 				TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.STRING),
 		/**
-		 * @deprecated Use a TSK_INTERESTING_FILE_HIT artifact instead.
+		 * @deprecated Use a TSK_INTERESTING_ITEM artifact instead.
 		 */
 		@Deprecated
 		TSK_INTERESTING_FILE(31, "TSK_INTERESTING_FILE", //NON-NLS
@@ -1529,7 +1535,16 @@ public enum ATTRIBUTE_TYPE {
 				TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.STRING),
 		TSK_IS_ADMIN(156, "TSK_IS_ADMIN",
 				bundle.getString("BlackboardAttribute.tskIsAdmin.text"),
-				TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.INTEGER),;
+				TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.INTEGER),
+		TSK_CORRELATION_TYPE(157, "TSK_CORRELATION_TYPE",
+				bundle.getString("BlackboardAttribute.tskCorrelationType.text"),
+				TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.STRING),
+		TSK_CORRELATION_VALUE(158, "TSK_CORRELATION_VALUE",
+				bundle.getString("BlackboardAttribute.tskCorrelationValue.text"),
+				TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.STRING),
+		TSK_OTHER_CASES(159, "TSK_OTHER_CASES",
+				bundle.getString("BlackboardAttribute.tskOtherCases.text"),
+				TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.STRING),;
 
 		private final int typeID;
 		private final String typeName;
diff --git a/bindings/java/src/org/sleuthkit/datamodel/Bundle.properties b/bindings/java/src/org/sleuthkit/datamodel/Bundle.properties
index 08f80085abc599e1a553eece27e8e4a5c1119486..c0791a15b0540b9b8dda248f5828a2a024c9815a 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/Bundle.properties
+++ b/bindings/java/src/org/sleuthkit/datamodel/Bundle.properties
@@ -64,6 +64,11 @@ BlackboardArtifact.tskUserDeviceEvent.text=User Device Events
 BlackboardArtifact.shortDescriptionDate.text=at {0}
 BlackboardArtifact.tskAssociatedObject.text=Associated Object
 BlackboardArtifact.tskWebCategorization.text=Web Categories
+BlackboardArtifact.tskPreviouslySeen.text=Previously Seen
+BlackboardArtifact.tskPreviouslyUnseen.text=Previously Unseen
+BlackboardArtifact.tskPreviouslyNotable.text=Previously Notable
+BlackboardArtifact.tskInterestingItem.text=Interesting Items
+BlackboardArtifact.tskMalware.text=Malware
 BlackboardArtifact.tskYaraHit.text=YARA Hit
 BlackboardArtifact.tskGPSArea.text=GPS Area
 BlackboardAttribute.tskAccountType.text=Account Type
@@ -216,6 +221,9 @@ BlackboardAttribute.tskRealm.text=Realm
 BlackboardAttribute.tskHost.text=Host
 BlackboardAttribute.tskHomeDir.text=Home Directory
 BlackboardAttribute.tskIsAdmin.text=Is Administrator
+BlackboardAttribute.tskCorrelationType.text=Correlation Type
+BlackboardAttribute.tskCorrelationValue.text=Correlation Value
+BlackboardAttribute.tskOtherCases.text=Other Cases
 AbstractFile.readLocal.exception.msg4.text=Error reading local file\: {0}
 AbstractFile.readLocal.exception.msg1.text=Error reading local file, local path is not set
 AbstractFile.readLocal.exception.msg2.text=Error reading local file, it does not exist at local path\: {0}
@@ -243,8 +251,10 @@ SleuthkitCase.addLocalFile.exception.msg1.text=Error adding local file\: {0}, pa
 SleuthkitCase.addLocalFile.exception.msg2.text=Error creating a local file, cannot get new id of the object, file name\: {0}
 SleuthkitCase.addLocalFile.exception.msg3.text=Error creating a derived file, file name\: {0}
 SleuthkitCase.getLastObjectId.exception.msg.text=Error closing result set after getting last object id.
+TskData.tskFsNameFlagEnum.unknown=Unknown
 TskData.tskFsNameFlagEnum.allocated=Allocated
 TskData.tskFsNameFlagEnum.unallocated=Unallocated
+TskData.tskFsMetaFlagEnum.unknown=Unknown
 TskData.tskFsMetaFlagEnum.allocated=Allocated
 TskData.tskFsMetaFlagEnum.unallocated=Unallocated
 TskData.tskFsMetaFlagEnum.used=Used
@@ -275,6 +285,8 @@ TskData.fileKnown.known=known
 TskData.fileKnown.knownBad=notable
 TskData.fileKnown.exception.msg1.text=No FileKnown of value\: {0}
 TskData.encodingType.exception.msg1.text=No EncodingType of value\: {0}
+TskData.collectedStatus.exception.msg1.text=No CollectedStatus of value\: {0}
+TskData.keywordSearchQueryType.exception.msg1.text=No KeywordSearchQueryType of value\: {0}
 TskData.tskDbFilesTypeEnum.exception.msg1.text=No TSK_FILE_TYPE_ENUM of value\: {0}
 TskData.objectTypeEnum.exception.msg1.text=No ObjectType of value\: {0}
 TskData.tskImgTypeEnum.exception.msg1.text=No TSK_IMG_TYPE_ENUM of value\: {0}
@@ -312,7 +324,10 @@ IngestJobInfo.IngestJobStatusType.Started.displayName=Started
 IngestJobInfo.IngestJobStatusType.Cancelled.displayName=Cancelled
 IngestJobInfo.IngestJobStatusType.Completed.displayName=Completed
 IngestModuleInfo.IngestModuleType.FileLevel.displayName=File Level
+IngestModuleInfo.IngestModuleType.DataArtifact.displayName=Data Artifact
+IngestModuleInfo.IngestModuleType.AnalysisResult.displayName=Analysis Result
 IngestModuleInfo.IngestModuleType.DataSourceLevel.displayName=Data Source Level
+IngestModuleInfo.IngestModuleType.Multiple.displayName=Multiple
 ReviewStatus.Approved=Approved
 ReviewStatus.Rejected=Rejected
 ReviewStatus.Undecided=Undecided
@@ -323,7 +338,7 @@ TimelineLevelOfDetail.medium=Medium
 TimelineLevelOfDetail.high=High
 BaseTypes.fileSystem.name=File System
 BaseTypes.webActivity.name=Web Activity
-BaseTypes.miscTypes.name=Miscellaneous
+BaseTypes.miscTypes.name=Other
 FileSystemTypes.fileModified.name=File Modified
 FileSystemTypes.fileAccessed.name=File Accessed
 FileSystemTypes.fileCreated.name=File Created
@@ -363,9 +378,9 @@ WebTypes.webFormAutoFill.name=Web Form Autofill Created
 WebTypes.webFormAddress.name=Web Form Address Created
 WebTypes.webFormAddressModified.name=Web Form Address Modified
 WebTypes.webFormAutofillAccessed.name=Web Form Autofill Accessed
-CustomTypes.other.name=Standard Types
-CustomTypes.userCreated.name=Custom Types
-BaseTypes.customTypes.name=Other
+CustomTypes.other.name=Standard Artifact Event
+CustomTypes.userCreated.name=Manually Created Event
+CustomTypes.customArtifact.name=Custom Artifact Event
 EventTypeHierarchyLevel.root=Root
 EventTypeHierarchyLevel.category=Category
 EventTypeHierarchyLevel.event=Event
@@ -383,15 +398,16 @@ OsAccountStatus.Unknown.text=Unknown
 OsAccountStatus.Active.text=Active
 OsAccountStatus.Disabled.text=Disabled
 OsAccountStatus.Deleted.text=Deleted
+OsAccountStatus.NonExistent.text=Non Existent
 OsAccountType.Unknown.text=Unknown
 OsAccountType.Service.text=Service
 OsAccountType.Interactive.text=Interactive
 OsAccountInstanceType.Launched.text=Launched
 OsAccountInstanceType.Accessed.text=Accessed
 OsAccountInstanceType.Referenced.text=Referenced
-OsAccountInstanceType.Launched.descr.text=Account owner launched a program action on the host.
-OsAccountInstanceType.Accessed.descr.text=Account owner accessed resources on the host for read/write via some service.
-OsAccountInstanceType.Referenced.descr.text=Account owner was referenced in a log file on the host.
+OsAccountInstanceType.Launched.descr.text=User launched a program or had an interactive session on the host.
+OsAccountInstanceType.Accessed.descr.text=User accessed resources on the host via a service or created a file on the host.
+OsAccountInstanceType.Referenced.descr.text=User was referenced on the host and it is unclear if they had any access. For example, if they are mentioned in a log file.
 OsAccountRealm.Known.text=Known
 OsAccountRealm.Inferred.text=Inferred
 OsAccountRealm.Unknown.text=Unknown
@@ -429,4 +445,15 @@ TimelineEventType.WebCache.text=Web Cache
 TimelineEventType.BluetoothAdapter.txt=Bluetooth Adapter
 BaseTypes.geolocation.name=Geolocation
 BaseTypes.communication.name=Communication
+TskData.ObjectType.IMG.name=Disk Image
+TskData.ObjectType.VS.name=Volume System
+TskData.ObjectType.VOL.name=Volume
+TskData.ObjectType.FS.name=File System
+TskData.ObjectType.AbstractFile.name=File
+TskData.ObjectType.Artifact.name=Artifact
+TskData.ObjectType.Report.name=Report
+TskData.ObjectType.Pool.name=Pool
+TskData.ObjectType.OsAccount.name=OS Account
+TskData.ObjectType.HostAddress.name=Host Address
+TskData.ObjectType.Unsupported.name=Unsupported
 
diff --git a/bindings/java/src/org/sleuthkit/datamodel/Bundle.properties-MERGED b/bindings/java/src/org/sleuthkit/datamodel/Bundle.properties-MERGED
index 08f80085abc599e1a553eece27e8e4a5c1119486..c0791a15b0540b9b8dda248f5828a2a024c9815a 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/Bundle.properties-MERGED
+++ b/bindings/java/src/org/sleuthkit/datamodel/Bundle.properties-MERGED
@@ -64,6 +64,11 @@ BlackboardArtifact.tskUserDeviceEvent.text=User Device Events
 BlackboardArtifact.shortDescriptionDate.text=at {0}
 BlackboardArtifact.tskAssociatedObject.text=Associated Object
 BlackboardArtifact.tskWebCategorization.text=Web Categories
+BlackboardArtifact.tskPreviouslySeen.text=Previously Seen
+BlackboardArtifact.tskPreviouslyUnseen.text=Previously Unseen
+BlackboardArtifact.tskPreviouslyNotable.text=Previously Notable
+BlackboardArtifact.tskInterestingItem.text=Interesting Items
+BlackboardArtifact.tskMalware.text=Malware
 BlackboardArtifact.tskYaraHit.text=YARA Hit
 BlackboardArtifact.tskGPSArea.text=GPS Area
 BlackboardAttribute.tskAccountType.text=Account Type
@@ -216,6 +221,9 @@ BlackboardAttribute.tskRealm.text=Realm
 BlackboardAttribute.tskHost.text=Host
 BlackboardAttribute.tskHomeDir.text=Home Directory
 BlackboardAttribute.tskIsAdmin.text=Is Administrator
+BlackboardAttribute.tskCorrelationType.text=Correlation Type
+BlackboardAttribute.tskCorrelationValue.text=Correlation Value
+BlackboardAttribute.tskOtherCases.text=Other Cases
 AbstractFile.readLocal.exception.msg4.text=Error reading local file\: {0}
 AbstractFile.readLocal.exception.msg1.text=Error reading local file, local path is not set
 AbstractFile.readLocal.exception.msg2.text=Error reading local file, it does not exist at local path\: {0}
@@ -243,8 +251,10 @@ SleuthkitCase.addLocalFile.exception.msg1.text=Error adding local file\: {0}, pa
 SleuthkitCase.addLocalFile.exception.msg2.text=Error creating a local file, cannot get new id of the object, file name\: {0}
 SleuthkitCase.addLocalFile.exception.msg3.text=Error creating a derived file, file name\: {0}
 SleuthkitCase.getLastObjectId.exception.msg.text=Error closing result set after getting last object id.
+TskData.tskFsNameFlagEnum.unknown=Unknown
 TskData.tskFsNameFlagEnum.allocated=Allocated
 TskData.tskFsNameFlagEnum.unallocated=Unallocated
+TskData.tskFsMetaFlagEnum.unknown=Unknown
 TskData.tskFsMetaFlagEnum.allocated=Allocated
 TskData.tskFsMetaFlagEnum.unallocated=Unallocated
 TskData.tskFsMetaFlagEnum.used=Used
@@ -275,6 +285,8 @@ TskData.fileKnown.known=known
 TskData.fileKnown.knownBad=notable
 TskData.fileKnown.exception.msg1.text=No FileKnown of value\: {0}
 TskData.encodingType.exception.msg1.text=No EncodingType of value\: {0}
+TskData.collectedStatus.exception.msg1.text=No CollectedStatus of value\: {0}
+TskData.keywordSearchQueryType.exception.msg1.text=No KeywordSearchQueryType of value\: {0}
 TskData.tskDbFilesTypeEnum.exception.msg1.text=No TSK_FILE_TYPE_ENUM of value\: {0}
 TskData.objectTypeEnum.exception.msg1.text=No ObjectType of value\: {0}
 TskData.tskImgTypeEnum.exception.msg1.text=No TSK_IMG_TYPE_ENUM of value\: {0}
@@ -312,7 +324,10 @@ IngestJobInfo.IngestJobStatusType.Started.displayName=Started
 IngestJobInfo.IngestJobStatusType.Cancelled.displayName=Cancelled
 IngestJobInfo.IngestJobStatusType.Completed.displayName=Completed
 IngestModuleInfo.IngestModuleType.FileLevel.displayName=File Level
+IngestModuleInfo.IngestModuleType.DataArtifact.displayName=Data Artifact
+IngestModuleInfo.IngestModuleType.AnalysisResult.displayName=Analysis Result
 IngestModuleInfo.IngestModuleType.DataSourceLevel.displayName=Data Source Level
+IngestModuleInfo.IngestModuleType.Multiple.displayName=Multiple
 ReviewStatus.Approved=Approved
 ReviewStatus.Rejected=Rejected
 ReviewStatus.Undecided=Undecided
@@ -323,7 +338,7 @@ TimelineLevelOfDetail.medium=Medium
 TimelineLevelOfDetail.high=High
 BaseTypes.fileSystem.name=File System
 BaseTypes.webActivity.name=Web Activity
-BaseTypes.miscTypes.name=Miscellaneous
+BaseTypes.miscTypes.name=Other
 FileSystemTypes.fileModified.name=File Modified
 FileSystemTypes.fileAccessed.name=File Accessed
 FileSystemTypes.fileCreated.name=File Created
@@ -363,9 +378,9 @@ WebTypes.webFormAutoFill.name=Web Form Autofill Created
 WebTypes.webFormAddress.name=Web Form Address Created
 WebTypes.webFormAddressModified.name=Web Form Address Modified
 WebTypes.webFormAutofillAccessed.name=Web Form Autofill Accessed
-CustomTypes.other.name=Standard Types
-CustomTypes.userCreated.name=Custom Types
-BaseTypes.customTypes.name=Other
+CustomTypes.other.name=Standard Artifact Event
+CustomTypes.userCreated.name=Manually Created Event
+CustomTypes.customArtifact.name=Custom Artifact Event
 EventTypeHierarchyLevel.root=Root
 EventTypeHierarchyLevel.category=Category
 EventTypeHierarchyLevel.event=Event
@@ -383,15 +398,16 @@ OsAccountStatus.Unknown.text=Unknown
 OsAccountStatus.Active.text=Active
 OsAccountStatus.Disabled.text=Disabled
 OsAccountStatus.Deleted.text=Deleted
+OsAccountStatus.NonExistent.text=Non Existent
 OsAccountType.Unknown.text=Unknown
 OsAccountType.Service.text=Service
 OsAccountType.Interactive.text=Interactive
 OsAccountInstanceType.Launched.text=Launched
 OsAccountInstanceType.Accessed.text=Accessed
 OsAccountInstanceType.Referenced.text=Referenced
-OsAccountInstanceType.Launched.descr.text=Account owner launched a program action on the host.
-OsAccountInstanceType.Accessed.descr.text=Account owner accessed resources on the host for read/write via some service.
-OsAccountInstanceType.Referenced.descr.text=Account owner was referenced in a log file on the host.
+OsAccountInstanceType.Launched.descr.text=User launched a program or had an interactive session on the host.
+OsAccountInstanceType.Accessed.descr.text=User accessed resources on the host via a service or created a file on the host.
+OsAccountInstanceType.Referenced.descr.text=User was referenced on the host and it is unclear if they had any access. For example, if they are mentioned in a log file.
 OsAccountRealm.Known.text=Known
 OsAccountRealm.Inferred.text=Inferred
 OsAccountRealm.Unknown.text=Unknown
@@ -429,4 +445,15 @@ TimelineEventType.WebCache.text=Web Cache
 TimelineEventType.BluetoothAdapter.txt=Bluetooth Adapter
 BaseTypes.geolocation.name=Geolocation
 BaseTypes.communication.name=Communication
+TskData.ObjectType.IMG.name=Disk Image
+TskData.ObjectType.VS.name=Volume System
+TskData.ObjectType.VOL.name=Volume
+TskData.ObjectType.FS.name=File System
+TskData.ObjectType.AbstractFile.name=File
+TskData.ObjectType.Artifact.name=Artifact
+TskData.ObjectType.Report.name=Report
+TskData.ObjectType.Pool.name=Pool
+TskData.ObjectType.OsAccount.name=OS Account
+TskData.ObjectType.HostAddress.name=Host Address
+TskData.ObjectType.Unsupported.name=Unsupported
 
diff --git a/bindings/java/src/org/sleuthkit/datamodel/Bundle_ja.properties b/bindings/java/src/org/sleuthkit/datamodel/Bundle_ja.properties
index 164f2f3cfbc2e60e4f74f68826d777f6377cff4d..17e3b07892f5607730a5691418230d285af22a26 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/Bundle_ja.properties
+++ b/bindings/java/src/org/sleuthkit/datamodel/Bundle_ja.properties
@@ -1,11 +1,10 @@
-#Thu Jul 01 12:01:30 UTC 2021
+#Thu Sep 30 10:23:46 UTC 2021
 AbstractFile.readLocal.exception.msg1.text=\u30ed\u30fc\u30ab\u30eb\u30d5\u30a1\u30a4\u30eb\u306e\u8aad\u307f\u53d6\u308a\u4e2d\u306b\u30a8\u30e9\u30fc\u304c\u767a\u751f\u3057\u307e\u3057\u305f\u3002\u30ed\u30fc\u30ab\u30eb\u30d1\u30b9\u304c\u30bb\u30c3\u30c8\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002
 AbstractFile.readLocal.exception.msg2.text=\u30ed\u30fc\u30ab\u30eb\u30d5\u30a1\u30a4\u30eb\u306e\u8aad\u307f\u53d6\u308a\u4e2d\u306b\u30a8\u30e9\u30fc\u304c\u767a\u751f\u3057\u307e\u3057\u305f\u3002\u4e0b\u8a18\u306e\u30ed\u30fc\u30ab\u30eb\u30d1\u30b9\u306b\u306f\u5b58\u5728\u3057\u307e\u305b\u3093\uff1a{0}
 AbstractFile.readLocal.exception.msg3.text=\u30ed\u30fc\u30ab\u30eb\u30d5\u30a1\u30a4\u30eb\u306e\u8aad\u307f\u53d6\u308a\u4e2d\u306b\u30a8\u30e9\u30fc\u304c\u767a\u751f\u3057\u307e\u3057\u305f\u3002\u4e0b\u8a18\u306e\u30ed\u30fc\u30ab\u30eb\u30d1\u30b9\u3067\u306f\u8aad\u307f\u53d6\u308a\u3067\u304d\u307e\u305b\u3093\uff1a{0}
 AbstractFile.readLocal.exception.msg4.text=\u30d5\u30a1\u30a4\u30eb{0}\u306e\u8aad\u307f\u53d6\u308a\u4e2d\u306b\u30a8\u30e9\u30fc\u304c\u767a\u751f\u3057\u307e\u3057\u305f
 AbstractFile.readLocal.exception.msg5.text=\u30ed\u30fc\u30ab\u30eb\u30d5\u30a1\u30a4\u30eb{0}\u3092\u8aad\u307f\u53d6\u308c\u307e\u305b\u3093
 BaseTypes.communication.name=\u30b3\u30df\u30e5\u30cb\u30b1\u30fc\u30b7\u30e7\u30f3
-BaseTypes.customTypes.name=\u305d\u306e\u4ed6
 BaseTypes.fileSystem.name=\u30d5\u30a1\u30a4\u30eb\u30b7\u30b9\u30c6\u30e0
 BaseTypes.geolocation.name=\u30b8\u30aa\u30ed\u30b1\u30fc\u30b7\u30e7\u30f3
 BaseTypes.miscTypes.name=\u305d\u306e\u4ed6
@@ -52,6 +51,9 @@ BlackboardArtifact.tskMetadataExif.text=EXIF\u30e1\u30bf\u30c7\u30fc\u30bf
 BlackboardArtifact.tskObjectDetected.text=\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u304c\u691c\u51fa\u3055\u308c\u307e\u3057\u305f
 BlackboardArtifact.tskOsAccount.text=\u30aa\u30da\u30ec\u30fc\u30c6\u30a3\u30f3\u30b0\u30b7\u30b9\u30c6\u30e0\u30e6\u30fc\u30b6\u30a2\u30ab\u30a6\u30f3\u30c8
 BlackboardArtifact.tskOsInfo.text=\u30aa\u30da\u30ec\u30fc\u30c6\u30a3\u30f3\u30b0\u30b7\u30b9\u30c6\u30e0\u60c5\u5831
+BlackboardArtifact.tskPreviouslyNotable.text=\u4ee5\u524d\u306b\u6ce8\u76ee\u306b\u5024\u3059\u308b
+BlackboardArtifact.tskPreviouslySeen.text=\u4ee5\u524d\u306b\u8a8d\u8b58
+BlackboardArtifact.tskPreviouslyUnseen.text=\u4ee5\u524d\u306b\u306f\u672a\u8a8d\u8b58
 BlackboardArtifact.tskProgNotifications.text=\u30d7\u30ed\u30b0\u30e9\u30e0\u901a\u77e5
 BlackboardArtifact.tskProgRun.text=\u5b9f\u884c\u30d7\u30ed\u30b0\u30e9\u30e0
 BlackboardArtifact.tskRemoteDrive.text=\u30ea\u30e2\u30fc\u30c8\u30c9\u30e9\u30a4\u30d6
@@ -96,6 +98,8 @@ BlackboardAttribute.tskCardType.text=\u30ab\u30fc\u30c9\u306e\u6709\u52b9\u671f\
 BlackboardAttribute.tskCategory.text=\u30ab\u30c6\u30b4\u30ea\u30fc
 BlackboardAttribute.tskCity.text=\u5e02
 BlackboardAttribute.tskComment.text=\u30b3\u30e1\u30f3\u30c8
+BlackboardAttribute.tskCorrelationType.text=\u76f8\u95a2\u30bf\u30a4\u30d7
+BlackboardAttribute.tskCorrelationValue.text=\u76f8\u95a2\u5024
 BlackboardAttribute.tskCount.text=\u30ab\u30a6\u30f3\u30c8
 BlackboardAttribute.tskCountry.text=\u56fd
 BlackboardAttribute.tskDateTimeAccessed.text=\u30a2\u30af\u30bb\u30b9\u65e5\u4ed8
@@ -174,6 +178,7 @@ BlackboardAttribute.tskMsgReplyId.text=\u30e1\u30c3\u30bb\u30fc\u30b8\u30ea\u30d
 BlackboardAttribute.tskName.text=\u540d\u524d
 BlackboardAttribute.tskNamePerson.text=\u4eba\u540d
 BlackboardAttribute.tskOrganization.text=\u7d44\u7e54
+BlackboardAttribute.tskOtherCases.text=\u305d\u306e\u4ed6\u306e\u30b1\u30fc\u30b9
 BlackboardAttribute.tskOwner.text=\u4fdd\u6709\u8005
 BlackboardAttribute.tskPassword.text=\u30d1\u30b9\u30ef\u30fc\u30c9
 BlackboardAttribute.tskPath.text=\u30d1\u30b9
@@ -236,8 +241,9 @@ BlackboardAttribute.tskrule.text=\u30eb\u30fc\u30eb
 BlackboardAttribute.tskthreadid.text=\u30b9\u30ec\u30c3\u30c9ID
 CategoryType.AnalysisResult=\u5206\u6790\u7d50\u679c
 CategoryType.DataArtifact=\u30c7\u30fc\u30bf\u30a2\u30fc\u30c6\u30a3\u30d5\u30a1\u30af\u30c8
-CustomTypes.other.name=\u6a19\u6e96\u30bf\u30a4\u30d7
-CustomTypes.userCreated.name=\u30ab\u30b9\u30bf\u30e0\u30bf\u30a4\u30d7
+CustomTypes.customArtifact.name=\u30ab\u30b9\u30bf\u30e0\u30fb\u30a2\u30fc\u30c6\u30a3\u30d5\u30a1\u30af\u30c8\u30fb\u30a4\u30d9\u30f3\u30c8
+CustomTypes.other.name=\u6a19\u6e96\u30a2\u30fc\u30c6\u30a3\u30d5\u30a1\u30af\u30c8\u30fb\u30a4\u30d9\u30f3\u30c8
+CustomTypes.userCreated.name=\u624b\u52d5\u3067\u4f5c\u6210\u3055\u308c\u305f\u30a4\u30d9\u30f3\u30c8
 DataSourcesFilter.displayName.text=\u30c7\u30fc\u30bf\u30bd\u30fc\u30b9\u3092\u306b\u5236\u9650\u3059\u308b
 DatabaseConnectionCheck.Access=\u30e6\u30fc\u30b6\u30fc\u540d\u304b\u30d1\u30b9\u30ef\u30fc\u30c9\u304c\u7121\u52b9\u3067\u3059\u3002
 DatabaseConnectionCheck.Authentication=\u30e6\u30fc\u30b6\u30fc\u540d\u304b\u30d1\u30b9\u30ef\u30fc\u30c9\u304c\u7121\u52b9\u3067\u3059\u3002
@@ -267,9 +273,9 @@ EventTypeZoomLevel.baseType=\u30d9\u30fc\u30b9\u30bf\u30a4\u30d7
 EventTypeZoomLevel.rootType=\u30eb\u30fc\u30c8\u30bf\u30a4\u30d7
 EventTypeZoomLevel.subType=\u30b5\u30d6\u30bf\u30a4\u30d7
 FileSystemTypes.fileAccessed.name=\u30a2\u30af\u30bb\u30b9\u3055\u308c\u305f\u30d5\u30a1\u30a4\u30eb
-FileSystemTypes.fileChanged.name=\u30d5\u30a1\u30a4\u30eb\u304c\u5909\u66f4\u3055\u308c\u307e\u3057\u305f
-FileSystemTypes.fileCreated.name=\u30d5\u30a1\u30a4\u30eb\u4f5c\u6210
-FileSystemTypes.fileModified.name=\u30d5\u30a1\u30a4\u30eb\u304c\u5909\u66f4\u3055\u308c\u307e\u3057\u305f
+FileSystemTypes.fileChanged.name=\u5909\u66f4\u3055\u308c\u305f\u30d5\u30a1\u30a4\u30eb
+FileSystemTypes.fileCreated.name=\u4f5c\u6210\u3055\u308c\u305f\u30d5\u30a1\u30a4\u30eb
+FileSystemTypes.fileModified.name=\u4fee\u6b63\u3055\u308c\u305f\u30d5\u30a1\u30a4\u30eb
 FileTypesFilter.displayName.text=\u30d5\u30a1\u30a4\u30eb\u30bf\u30a4\u30d7\u3092\u5236\u9650
 FsContent.readInt.err.msg.text=\u753b\u50cf\u30d5\u30a1\u30a4\u30eb\u304c\u5b58\u5728\u3057\u306a\u3044\u304b\u3001\u30a2\u30af\u30bb\u30b9\u3067\u304d\u307e\u305b\u3093\u3002
 Image.verifyImageSize.errStr1.text=\u4e0d\u5b8c\u5168\u306a\u753b\u50cf\u306e\u53ef\u80fd\u6027\uff1a\u30aa\u30d5\u30bb\u30c3\u30c8{0}\u3067\u30dc\u30ea\u30e5\u30fc\u30e0\u306e\u8aad\u53d6\u308a\u30a8\u30e9\u30fc\u304c\u767a\u751f\u3057\u307e\u3057\u305f
@@ -279,8 +285,10 @@ Image.verifyImageSize.errStr4.text=\u4e0d\u5b8c\u5168\u306a\u753b\u50cf\u306e\u5
 IngestJobInfo.IngestJobStatusType.Cancelled.displayName=\u30ad\u30e3\u30f3\u30bb\u30eb
 IngestJobInfo.IngestJobStatusType.Completed.displayName=\u5b8c\u4e86
 IngestJobInfo.IngestJobStatusType.Started.displayName=\u958b\u59cb
+IngestModuleInfo.IngestModuleType.DataArtifact.displayName=\u30c7\u30fc\u30bf\u30a2\u30fc\u30c6\u30a3\u30d5\u30a1\u30af\u30c8
 IngestModuleInfo.IngestModuleType.DataSourceLevel.displayName=\u30c7\u30fc\u30bf\u30bd\u30fc\u30b9\u30ec\u30d9\u30eb
 IngestModuleInfo.IngestModuleType.FileLevel.displayName=\u30d5\u30a1\u30a4\u30eb\u30ec\u30d9\u30eb
+IngestModuleInfo.IngestModuleType.Multiple.displayName=\u591a\u6570
 IntersectionFilter.displayName.text=\u4ea4\u5dee\u70b9
 MiscTypes.Calls.name=\u901a\u8a71\u958b\u59cb
 MiscTypes.CallsEnd.name=\u901a\u8a71\u7d42\u4e86
@@ -316,6 +324,7 @@ OsAccountRealm.Local.text=\u30ed\u30fc\u30ab\u30eb
 OsAccountRealm.Unknown.text=\u4e0d\u660e
 OsAccountStatus.Active.text=\u30a2\u30af\u30c6\u30a3\u30d6
 OsAccountStatus.Deleted.text=\u524a\u9664\u6e08\u307f
+OsAccountStatus.NonExistent.text=\u5b58\u5728\u3057\u306a\u3044
 OsAccountStatus.Disabled.text=\u7121\u52b9
 OsAccountStatus.Unknown.text=\u4e0d\u660e
 OsAccountType.Interactive.text=\u30a4\u30f3\u30bf\u30e9\u30af\u30c6\u30a3\u30d6
@@ -383,6 +392,7 @@ TskData.fileKnown.unknown=\u4e0d\u660e
 TskData.objectTypeEnum.exception.msg1.text=\u30d0\u30ea\u30e5\u30fc\uff1a{0}\u306f\u30aa\u30d6\u30b8\u30a7\u30af\u30c8\u30bf\u30a4\u30d7\u306b\u8a72\u5f53\u3057\u307e\u305b\u3093
 TskData.tskDbFilesTypeEnum.exception.msg1.text=\u30d0\u30ea\u30e5\u30fc\uff1a{0}\u306fTSK_FILE_TYPE_ENUM\u306b\u8a72\u5f53\u3057\u307e\u305b\u3093
 TskData.tskFsAttrTypeEnum.exception.msg1.text=\u30d0\u30ea\u30e5\u30fc\uff1a{0}\u306fTSK_FS_TYPE_ENUM\u306b\u8a72\u5f53\u3057\u307e\u305b\u3093
+TskData.tskFsMetaFlagEnum.unknown=\u4e0d\u660e
 TskData.tskFsMetaFlagEnum.allocated=\u5272\u308a\u5f53\u3066\u6e08\u307f
 TskData.tskFsMetaFlagEnum.compressed=\u5727\u7e2e\u6e08\u307f
 TskData.tskFsMetaFlagEnum.orphan=\u30aa\u30fc\u30d5\u30a1\u30f3
@@ -390,6 +400,7 @@ TskData.tskFsMetaFlagEnum.unallocated=\u672a\u5272\u308a\u5f53\u3066
 TskData.tskFsMetaFlagEnum.unused=\u672a\u4f7f\u7528
 TskData.tskFsMetaFlagEnum.used=\u4f7f\u7528\u6e08\u307f
 TskData.tskFsMetaTypeEnum.exception.msg1.text=\u30d0\u30ea\u30e5\u30fc\uff1a{0}\u306fTSK_FS_META_TYPE_ENUM\u306b\u8a72\u5f53\u3057\u307e\u305b\u3093
+TskData.tskFsNameFlagEnum.unknown=\u4e0d\u660e
 TskData.tskFsNameFlagEnum.allocated=\u5272\u308a\u5f53\u3066\u6e08\u307f
 TskData.tskFsNameFlagEnum.exception.msg1.text=\u30d0\u30ea\u30e5\u30fc\uff1a{0}\u306fTSK_FS_NAME_FLAG_ENUM\u306b\u8a72\u5f53\u3057\u307e\u305b\u3093
 TskData.tskFsNameFlagEnum.unallocated=\u672a\u5272\u308a\u5f53\u3066
diff --git a/bindings/java/src/org/sleuthkit/datamodel/CaseDatabaseFactory.java b/bindings/java/src/org/sleuthkit/datamodel/CaseDatabaseFactory.java
index e6c08af852c8b49b830ef902e4d9cc853f9a4868..f1dc2cfbec3ed560c54fc2e750e3b78f1de2b714 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/CaseDatabaseFactory.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/CaseDatabaseFactory.java
@@ -207,6 +207,8 @@ private void createFileTables(Statement stmt) throws SQLException {
 				+ "FOREIGN KEY(data_source_obj_id) REFERENCES data_source_info(obj_id) ON DELETE CASCADE, "
 				+ "FOREIGN KEY(obj_id) REFERENCES tsk_objects(obj_id) ON DELETE CASCADE)");
 
+		stmt.execute("CREATE TABLE file_collection_status_types (collection_status_type INTEGER PRIMARY KEY, name TEXT NOT NULL)");
+		
 		stmt.execute("CREATE TABLE tsk_files (obj_id " + dbQueryHelper.getPrimaryKey() + " PRIMARY KEY, "
 				+ "fs_obj_id " + dbQueryHelper.getBigIntType() + ", data_source_obj_id " + dbQueryHelper.getBigIntType() + " NOT NULL, "
 				+ "attr_type INTEGER, attr_id INTEGER, " 
@@ -215,15 +217,17 @@ private void createFileTables(Statement stmt) throws SQLException {
 				+ "dir_type INTEGER, meta_type INTEGER, dir_flags INTEGER, meta_flags INTEGER, size " + dbQueryHelper.getBigIntType() + ", "
 				+ "ctime " + dbQueryHelper.getBigIntType() + ", "
 				+ "crtime " + dbQueryHelper.getBigIntType() + ", atime " + dbQueryHelper.getBigIntType() + ", "
-				+ "mtime " + dbQueryHelper.getBigIntType() + ", mode INTEGER, uid INTEGER, gid INTEGER, md5 TEXT, sha256 TEXT, "
+				+ "mtime " + dbQueryHelper.getBigIntType() + ", mode INTEGER, uid INTEGER, gid INTEGER, md5 TEXT, sha256 TEXT, sha1 TEXT,"
 				+ "known INTEGER, "
 				+ "parent_path TEXT, mime_type TEXT, extension TEXT, "
 				+ "owner_uid TEXT DEFAULT NULL, "
 				+ "os_account_obj_id " + dbQueryHelper.getBigIntType() + " DEFAULT NULL, "
+				+ "collected INTEGER NOT NULL, "
 				+ "FOREIGN KEY(obj_id) REFERENCES tsk_objects(obj_id) ON DELETE CASCADE, "
 				+ "FOREIGN KEY(fs_obj_id) REFERENCES tsk_fs_info(obj_id) ON DELETE CASCADE, "
 				+ "FOREIGN KEY(data_source_obj_id) REFERENCES data_source_info(obj_id) ON DELETE CASCADE, "
-				+ "FOREIGN KEY(os_account_obj_id) REFERENCES tsk_os_accounts(os_account_obj_id) ON DELETE SET NULL) " ); 
+				+ "FOREIGN KEY(os_account_obj_id) REFERENCES tsk_os_accounts(os_account_obj_id) ON DELETE SET NULL, "
+				+ "FOREIGN KEY(collected) REFERENCES file_collection_status_types (collection_status_type))" ); 
 
 		stmt.execute("CREATE TABLE file_encoding_types (encoding_type INTEGER PRIMARY KEY, name TEXT NOT NULL)");
 
@@ -536,7 +540,7 @@ private void createAccountInstancesAndArtifacts(Statement stmt) throws SQLExcept
 				+ "os_account_obj_id " + dbQueryHelper.getBigIntType() + " NOT NULL, "
 				+ "data_source_obj_id " + dbQueryHelper.getBigIntType() + " NOT NULL, " 
 				+ "instance_type INTEGER NOT NULL, "	// PerformedActionOn/ReferencedOn
-				+ "UNIQUE(os_account_obj_id, data_source_obj_id), "
+				+ "UNIQUE(os_account_obj_id, data_source_obj_id, instance_type), "
 				+ "FOREIGN KEY(os_account_obj_id) REFERENCES tsk_os_accounts(os_account_obj_id) ON DELETE CASCADE, " 
 				+ "FOREIGN KEY(data_source_obj_id) REFERENCES tsk_objects(obj_id) ON DELETE CASCADE ) ");
 		
diff --git a/bindings/java/src/org/sleuthkit/datamodel/CaseDbAccessManager.java b/bindings/java/src/org/sleuthkit/datamodel/CaseDbAccessManager.java
index 2736688a0455ef79eeced90359c2ac277ec37b2b..408efe08b860385eacdf97d56b668d5a542ef5f3 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/CaseDbAccessManager.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/CaseDbAccessManager.java
@@ -18,10 +18,15 @@
  */
 package org.sleuthkit.datamodel;
 
+import com.google.common.annotations.Beta;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
+import java.sql.Time;
+import java.sql.Timestamp;
+import java.text.MessageFormat;
+import java.sql.Date;
 import java.util.logging.Level;
 import java.util.logging.Logger;
 import org.sleuthkit.datamodel.SleuthkitCase.CaseDbConnection;
@@ -517,6 +522,54 @@ public long insertOrUpdate(final String tableName, final String sql, final CaseD
 		return rowId;
 	}
 	
+	/**
+	 * Creates a prepared statement object for the purposes of running an update
+	 * statement. The given SQL should not include the starting "UPDATE" 
+	 * or the name of the table.
+	 *
+	 * @param tableName The name of the table being updated.
+	 * @param sql       The insert statement without the starting "UPDATE (table name)" part.
+	 * @param trans     The open transaction.
+	 *
+	 * @return The prepared statement object.
+	 *
+	 * @throws TskCoreException
+	 */
+	@Beta
+	public CaseDbPreparedStatement prepareUpdate(String tableName, String sql, CaseDbTransaction trans) throws TskCoreException {
+		validateTableName(tableName);
+		validateSQL(sql);
+
+		String updateSQL = "UPDATE " + tableName + " " + sql; // NON-NLS
+	
+		try {
+			return new CaseDbPreparedStatement(StatementType.UPDATE, updateSQL, trans);
+		} catch (SQLException ex) {
+			throw new TskCoreException("Error creating update prepared statement for query:\n" + updateSQL, ex);
+		}
+	}
+	
+	/**
+	 * Performs an update statement query with the given case prepared statement.
+	 *
+	 * @param preparedStatement The case prepared statement.
+	 * 
+	 * @throws TskCoreException
+	 */
+	@Beta
+	public void update(CaseDbPreparedStatement preparedStatement) throws TskCoreException {
+		
+		if (!preparedStatement.getType().equals(StatementType.UPDATE)) {
+			throw new TskCoreException("CaseDbPreparedStatement has incorrect type for update operation");
+		}
+		
+		try {
+			preparedStatement.getStatement().executeUpdate();
+		} catch (SQLException ex) {
+			throw new TskCoreException("Error updating row in table " + "" + " with sql = "+ "", ex);
+		}
+	}	
+	
 	/**
 	 * Updates row(s) in the specified table.
 	 * 
@@ -602,6 +655,134 @@ public void select(final String sql, final CaseDbAccessQueryCallback queryCallba
 		}
 	}
 	
+	/**
+	 * Creates a prepared statement object for the purposes of running a select
+	 * statement.
+	 *
+	 * NOTE: Creating the CaseDbPreparedStatement opens a connection and
+	 * acquires a read lock on the case database. For this reason, it is
+	 * recommended to close the prepared statement as soon as it is no longer
+	 * needed, through either a try-with-resources block or calling close().
+	 * Additionally, calling other methods that access or update the database
+	 * should be avoided while the prepared statement is open to prevent
+	 * possible deadlocks.
+	 *
+	 * @param sql The select statement without the starting select keyword.
+	 *
+	 * @return The prepared statement object.
+	 *
+	 * @throws TskCoreException
+	 */
+	@Beta
+	public CaseDbPreparedStatement prepareSelect(String sql) throws TskCoreException {
+		String selectSQL = "SELECT " + sql; // NON-NLS
+		try {
+			return new CaseDbPreparedStatement(StatementType.SELECT, selectSQL, false);
+		} catch (SQLException ex) {
+			throw new TskCoreException("Error creating select prepared statement for query:\n" + selectSQL, ex);
+		}
+	}
+	
+	/**
+	 * Creates a prepared statement object for the purposes of running a select
+	 * statement. The given SQL should not include the starting "SELECT" keyword.
+	 *
+	 * @param sql       The select statement without the starting select keyword.
+	 * @param trans     The open transaction.
+	 *
+	 * @return The prepared statement object.
+	 *
+	 * @throws TskCoreException
+	 */
+	@Beta
+	public CaseDbPreparedStatement prepareSelect(String sql, CaseDbTransaction trans) throws TskCoreException {
+		validateSQL(sql);
+
+		String selectSQL = "SELECT " + sql; // NON-NLS
+
+		try {
+			return new CaseDbPreparedStatement(StatementType.SELECT, selectSQL, trans);
+		} catch (SQLException ex) {
+			throw new TskCoreException("Error creating select prepared statement for query:\n" + selectSQL, ex);
+		}
+	}
+
+
+	/**
+	 * Performs a select statement query with the given case prepared statement.
+	 *
+	 * @param preparedStatement The case prepared statement.
+	 * @param queryCallback     The callback to handle the result set.
+	 *
+	 * @throws TskCoreException
+	 */
+	@Beta
+	public void select(CaseDbPreparedStatement preparedStatement, CaseDbAccessQueryCallback queryCallback) throws TskCoreException {
+		if (!preparedStatement.getType().equals(StatementType.SELECT)) {
+			throw new TskCoreException("CaseDbPreparedStatement has incorrect type for select operation");
+		}
+		
+		try (ResultSet resultSet = preparedStatement.getStatement().executeQuery()) {
+			queryCallback.process(resultSet);
+		} catch (SQLException ex) {
+			throw new TskCoreException(MessageFormat.format("Error running SELECT query:\n{0}", preparedStatement.getOriginalSql()), ex);
+		}
+	}
+	
+	/**
+	 * Creates a prepared statement object for the purposes of running an insert
+	 * statement. The given SQL should not include the starting "INSERT INTO" 
+	 * or the name of the table.
+	 * 
+	 * For PostGreSQL, the caller must include the ON CONFLICT DO NOTHING clause
+	 *
+	 * @param tableName The name of the table being updated.
+	 * @param sql       The insert statement without the starting "INSERT INTO (table name)" part.
+	 * @param trans     The open transaction.
+	 *
+	 * @return The prepared statement object.
+	 *
+	 * @throws TskCoreException
+	 */
+	@Beta
+	public CaseDbPreparedStatement prepareInsert(String tableName, String sql, CaseDbTransaction trans) throws TskCoreException {
+		validateTableName(tableName);
+		validateSQL(sql);
+		
+		String insertSQL = "INSERT";
+		if (DbType.SQLITE == tskDB.getDatabaseType()) {
+			insertSQL += " OR IGNORE";
+		}
+		insertSQL = insertSQL + " INTO " + tableName + " " + sql; // NON-NLS
+	
+		try {
+			return new CaseDbPreparedStatement(StatementType.INSERT, insertSQL, trans);
+		} catch (SQLException ex) {
+			throw new TskCoreException("Error creating insert prepared statement for query:\n" + insertSQL, ex);
+		}
+	}
+	
+	/**
+	 * Performs a insert statement query with the given case prepared statement.
+	 *
+	 * @param preparedStatement The case prepared statement.
+	 * 
+	 * @throws TskCoreException
+	 */
+	@Beta
+	public void insert(CaseDbPreparedStatement preparedStatement) throws TskCoreException {
+		
+		if (!preparedStatement.getType().equals(StatementType.INSERT)) {
+			throw new TskCoreException("CaseDbPreparedStatement has incorrect type for insert operation");
+		}
+		
+		try {
+			preparedStatement.getStatement().executeUpdate();
+		} catch (SQLException ex) {
+			throw new TskCoreException("Error inserting row in table " + "" + " with sql = "+ "", ex);
+		}
+	}
+
 	/**
 	 * Deletes a row in the specified table.
 	 * 
@@ -675,5 +856,309 @@ private void validateSQL(String sql) throws TskCoreException {
 		 * TODO (JIRA-5950): Need SQL injection defense in CaseDbAccessManager 
 		 */
 	}
+	
+	/**
+	 * Enum to track which type of lock the CaseDbPreparedStatement holds.
+	 */
+	private enum LockType {
+		READ,
+		WRITE,
+		NONE;
+	}
+	
+	/**
+	 * Enum to track which type of statement the CaseDbPreparedStatement holds.
+	 */
+	private enum StatementType {
+		SELECT,
+		INSERT,
+		UPDATE;
+	}
+	
+	/**
+	 * A wrapper around a PreparedStatement to execute queries against the
+	 * database.
+	 */
+	@Beta
+	public class CaseDbPreparedStatement implements AutoCloseable {
+
+		private final CaseDbConnection connection;
+		private final PreparedStatement preparedStatement;
+		private final String originalSql;
+		private final LockType lockType;
+		private final StatementType type;
+		
+		/**
+		 * Construct a prepared statement. This should not be used if a transaction
+		 * is already open.
+		 *
+		 * NOTE: Creating the CaseDbPreparedStatement opens a connection and
+		 * acquires a read lock on the case database. For this reason, it is
+		 * recommended to close the prepared statement as soon as it is no
+		 * longer needed, through either a try-with-resources block or calling
+		 * close(). Additionally, calling other methods that access or update
+		 * the database should be avoided while the prepared statement is open
+		 * to prevent possible deadlocks.
+		 *
+		 * @param type                The type of statement.
+		 * @param query               The query string.
+		 * @param isWriteLockRequired Whether or not a write lock is required.
+		 *                            If a write lock is not required, just a
+		 *                            read lock is acquired.
+		 *
+		 * @throws SQLException
+		 * @throws TskCoreException
+		 */
+		private CaseDbPreparedStatement(StatementType type, String query, boolean isWriteLockRequired) throws SQLException, TskCoreException {		
+			if (isWriteLockRequired) {
+				CaseDbAccessManager.this.tskDB.acquireSingleUserCaseWriteLock();
+				this.lockType = LockType.WRITE;
+			} else {
+				CaseDbAccessManager.this.tskDB.acquireSingleUserCaseReadLock();
+				this.lockType = LockType.READ;
+			}
+			this.connection = tskDB.getConnection();
+			this.preparedStatement = connection.getPreparedStatement(query, Statement.NO_GENERATED_KEYS);
+			this.originalSql = query;
+			this.type = type;
+		}
+		
+		/**
+		 * Construct a prepared statement using an already open transaction.
+		 *
+		 * @param type                The type of statement.
+		 * @param query               The query string.
+		 * @param trans               The open transaction.
+		 *
+		 * @throws SQLException
+		 * @throws TskCoreException
+		 */
+		private CaseDbPreparedStatement(StatementType type, String query, CaseDbTransaction trans) throws SQLException, TskCoreException {		
+			this.lockType = LockType.NONE;
+			this.connection = trans.getConnection();
+			this.preparedStatement = connection.getPreparedStatement(query, Statement.NO_GENERATED_KEYS);
+			this.originalSql = query;
+			this.type = type;
+		}
+
+		/**
+		 * Returns the delegate prepared statement.
+		 *
+		 * @return The delegate prepared statement.
+		 */
+		private PreparedStatement getStatement() {
+			return preparedStatement;
+		}
+		
+		/**
+		 * Get the type of statement.
+		 * 
+		 * @return The statement type (select or insert).
+		 */
+		private StatementType getType() {
+			return type;
+		}
+
+		/**
+		 * Returns the original sql query.
+		 *
+		 * @return The original sql query.
+		 */
+		private String getOriginalSql() {
+			return originalSql;
+		}
+		
+		/**
+		 * Resets the parameters in the prepared statement.
+		 *
+		 * @throws TskCoreException
+		 */
+		public void reset() throws TskCoreException {
+			try {
+				preparedStatement.clearParameters();
+			} catch (SQLException ex) {
+				throw new TskCoreException("An error occurred while clearing parameters.", ex);
+			}
+		}
+
+		/**
+		 * Sets the value at the given parameter index to the given value.
+		 *
+		 * @param parameterIndex The index.
+		 * @param x              The value to set at that index.
+		 *
+		 * @throws TskCoreException
+		 */
+		public void setBoolean(int parameterIndex, boolean x) throws TskCoreException {
+			try {
+				preparedStatement.setBoolean(parameterIndex, x);
+			} catch (SQLException ex) {
+				throw new TskCoreException(MessageFormat.format("There was an error setting the value at index: {0} to {1}", parameterIndex, x), ex);
+			}
+		}
+
+		/**
+		 * Sets the value at the given parameter index to the given value.
+		 *
+		 * @param parameterIndex The index.
+		 * @param x              The value to set at that index.
+		 *
+		 * @throws TskCoreException
+		 */
+		public void setByte(int parameterIndex, byte x) throws TskCoreException {
+			try {
+				preparedStatement.setByte(parameterIndex, x);
+			} catch (SQLException ex) {
+				throw new TskCoreException(MessageFormat.format("There was an error setting the value at index: {0} to {1}", parameterIndex, x), ex);
+			}
+		}
+
+		/**
+		 * Sets the value at the given parameter index to the given value.
+		 *
+		 * @param parameterIndex The index.
+		 * @param x              The value to set at that index.
+		 *
+		 * @throws TskCoreException
+		 */
+		public void setInt(int parameterIndex, int x) throws TskCoreException {
+			try {
+				preparedStatement.setInt(parameterIndex, x);
+			} catch (SQLException ex) {
+				throw new TskCoreException(MessageFormat.format("There was an error setting the value at index: {0} to {1}", parameterIndex, x), ex);
+			}
+		}
+
+		/**
+		 * Sets the value at the given parameter index to the given value.
+		 *
+		 * @param parameterIndex The index.
+		 * @param x              The value to set at that index.
+		 *
+		 * @throws TskCoreException
+		 */
+		public void setLong(int parameterIndex, long x) throws TskCoreException {
+			try {
+				preparedStatement.setLong(parameterIndex, x);
+			} catch (SQLException ex) {
+				throw new TskCoreException(MessageFormat.format("There was an error setting the value at index: {0} to {1}", parameterIndex, x), ex);
+			}
+		}
+
+		/**
+		 * Sets the value at the given parameter index to the given value.
+		 *
+		 * @param parameterIndex The index.
+		 * @param x              The value to set at that index.
+		 *
+		 * @throws TskCoreException
+		 */
+		public void setDouble(int parameterIndex, double x) throws TskCoreException {
+			try {
+				preparedStatement.setDouble(parameterIndex, x);
+			} catch (SQLException ex) {
+				throw new TskCoreException(MessageFormat.format("There was an error setting the value at index: {0} to {1}", parameterIndex, x), ex);
+			}
+		}
+
+		/**
+		 * Sets the value at the given parameter index to the given value.
+		 *
+		 * @param parameterIndex The index.
+		 * @param x              The value to set at that index.
+		 *
+		 * @throws TskCoreException
+		 */
+		public void setString(int parameterIndex, String x) throws TskCoreException {
+			try {
+				preparedStatement.setString(parameterIndex, x);
+			} catch (SQLException ex) {
+				throw new TskCoreException(MessageFormat.format("There was an error setting the value at index: {0} to {1}", parameterIndex, x), ex);
+			}
+		}
+
+		/**
+		 * Sets the value at the given parameter index to the given value.
+		 *
+		 * @param parameterIndex The index.
+		 * @param x              The value to set at that index.
+		 *
+		 * @throws TskCoreException
+		 */
+		public void setDate(int parameterIndex, Date x) throws TskCoreException {
+			try {
+				preparedStatement.setDate(parameterIndex, x);
+			} catch (SQLException ex) {
+				throw new TskCoreException(MessageFormat.format("There was an error setting the value at index: {0} to {1}", parameterIndex, x), ex);
+			}
+		}
+
+		/**
+		 * Sets the value at the given parameter index to the given value.
+		 *
+		 * @param parameterIndex The index.
+		 * @param x              The value to set at that index.
+		 *
+		 * @throws TskCoreException
+		 */
+		public void setTime(int parameterIndex, Time x) throws TskCoreException {
+			try {
+				preparedStatement.setTime(parameterIndex, x);
+			} catch (SQLException ex) {
+				throw new TskCoreException(MessageFormat.format("There was an error setting the value at index: {0} to {1}", parameterIndex, x), ex);
+			}
+		}
+
+		/**
+		 * Sets the value at the given parameter index to the given value.
+		 *
+		 * @param parameterIndex The index.
+		 * @param x              The value to set at that index.
+		 *
+		 * @throws TskCoreException
+		 */
+		public void setTimestamp(int parameterIndex, Timestamp x) throws TskCoreException {
+			try {
+				preparedStatement.setTimestamp(parameterIndex, x);
+			} catch (SQLException ex) {
+				throw new TskCoreException(MessageFormat.format("There was an error setting the value at index: {0} to {1}", parameterIndex, x), ex);
+			}
+		}
+
+		/**
+		 * Sets the value at the given parameter index to the given value. The
+		 * sql type is determined in the same manner as
+		 * java.sql.PreparedStatement.setObject.
+		 *
+		 * @param parameterIndex The index.
+		 * @param x              The value to set at that index.
+		 *
+		 * @throws TskCoreException
+		 */
+		public void setObject(int parameterIndex, Object x) throws TskCoreException {
+			try {
+				preparedStatement.setObject(parameterIndex, x);
+			} catch (SQLException ex) {
+				throw new TskCoreException(MessageFormat.format("There was an error setting the value at index: {0} to {1}", parameterIndex, x), ex);
+			}
+		}
+
+		@Override
+		public void close() throws SQLException {
+			
+			// Don't close the statement/connection or release a lock if we were supplied a transaction.
+			// Everything will be handled when the transaction is closed.
+			if (lockType.equals(LockType.NONE)) {
+				return;
+			}
+			
+			connection.close();
+			if (lockType.equals(LockType.WRITE)) {
+				CaseDbAccessManager.this.tskDB.releaseSingleUserCaseWriteLock();
+			} else {
+				CaseDbAccessManager.this.tskDB.releaseSingleUserCaseReadLock();
+			}
+		}
+	}
 
 }
diff --git a/bindings/java/src/org/sleuthkit/datamodel/StringUtils.java b/bindings/java/src/org/sleuthkit/datamodel/CommManagerSqlStringUtils.java
similarity index 77%
rename from bindings/java/src/org/sleuthkit/datamodel/StringUtils.java
rename to bindings/java/src/org/sleuthkit/datamodel/CommManagerSqlStringUtils.java
index 5580cc5a6b666ab1e62aebf94f93ad1f435a616c..e16174db48521f1290befb20c442b9eae42f9789 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/StringUtils.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/CommManagerSqlStringUtils.java
@@ -19,17 +19,22 @@
 package org.sleuthkit.datamodel;
 
 import java.util.Collection;
+import java.util.Collections;
 
 /**
- * Collection of string utility methods.
+ * Collection of string utility methods for use by CVT, CommunicationsManager
+ * and Timeline.
  */
-final class StringUtils {
+final class CommManagerSqlStringUtils {
 
-	private StringUtils() {
+	private CommManagerSqlStringUtils() {
 	}
 
 	/**
 	 * Utility method to convert a list to an CSV string.
+	 * 
+	 * Null entries in the values collection will be removed before
+	 * the string is created.
 	 *
 	 * @param values - collection of objects .
 	 *
@@ -41,7 +46,8 @@ static <T> String buildCSVString(Collection<T> values) {
 
 	/**
 	 * Utility method to join a collection into a string using a supplied
-	 * separator.
+	 * separator. Null entries in the values collection will be removed before
+	 * the string is created.
 	 *
 	 * @param <T>       The type of the values in the collection to be joined
 	 * @param values    The collection to be joined
@@ -54,11 +60,9 @@ static <T> String joinAsStrings(Collection<T> values, String separator) {
 		if (values == null || values.isEmpty()) {
 			return "";
 		}
+		
+		values.removeAll(Collections.singleton(null));
+		
 		return org.apache.commons.lang3.StringUtils.join(values, separator);
 	}
-
-	static String deleteWhitespace(String result) {
-		return org.apache.commons.lang3.StringUtils.deleteWhitespace(result);
-	}
-
 }
diff --git a/bindings/java/src/org/sleuthkit/datamodel/CommunicationsFilter.java b/bindings/java/src/org/sleuthkit/datamodel/CommunicationsFilter.java
index fc8c09e06bfccea6f3be15090ef79f93191f86a0..5542448d185d199ebc57ccc37a3420635eb67c1a 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/CommunicationsFilter.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/CommunicationsFilter.java
@@ -133,7 +133,7 @@ public String getSQL(CommunicationsManager commsManager) {
 				relationShipTypeIds.add(relType.getTypeID());
 			}
 			return " relationships.relationship_type IN ( "
-					+ StringUtils.buildCSVString(relationShipTypeIds) + " )";
+					+ CommManagerSqlStringUtils.buildCSVString(relationShipTypeIds) + " )";
 		}
 	}
 
@@ -255,11 +255,11 @@ public String getSQL(CommunicationsManager commsManager) {
 				return "";
 			}
 
-			List<Integer> type_ids = new ArrayList<Integer>();
+			List<Integer> type_ids = new ArrayList<>();
 			for (Account.Type accountType : accountTypes) {
 				type_ids.add(commsManager.getAccountTypeId(accountType));
 			}
-			String account_type_ids_list = StringUtils.buildCSVString(type_ids);
+			String account_type_ids_list = CommManagerSqlStringUtils.buildCSVString(type_ids);
 			return " account_types.account_type_id IN ( " + account_type_ids_list + " )";
 		}
 	}
@@ -317,7 +317,7 @@ public String getSQL(CommunicationsManager commsManager) {
 					Logger.getLogger(DeviceFilter.class.getName()).log(Level.WARNING, "failed to get datasource object ids for deviceId", ex);
 				}
 			}
-			String datasource_obj_ids_list = StringUtils.buildCSVString(ds_ids);
+			String datasource_obj_ids_list = CommManagerSqlStringUtils.buildCSVString(ds_ids);
 			if (!datasource_obj_ids_list.isEmpty()) {
 				sql = " relationships.data_source_obj_id IN ( " + datasource_obj_ids_list + " )";
 			}
diff --git a/bindings/java/src/org/sleuthkit/datamodel/CommunicationsManager.java b/bindings/java/src/org/sleuthkit/datamodel/CommunicationsManager.java
index a5ff593837cc7fa8ce7b8166cef788c4786a70ab..79dff6466a375250897957a7801d167e0564fda0 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/CommunicationsManager.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/CommunicationsManager.java
@@ -62,7 +62,7 @@ public final class CommunicationsManager {
 			BlackboardArtifact.ARTIFACT_TYPE.TSK_CONTACT.getTypeID(),
 			BlackboardArtifact.ARTIFACT_TYPE.TSK_CALLLOG.getTypeID()
 	));
-	private static final String RELATIONSHIP_ARTIFACT_TYPE_IDS_CSV_STR = StringUtils.buildCSVString(RELATIONSHIP_ARTIFACT_TYPE_IDS);
+	private static final String RELATIONSHIP_ARTIFACT_TYPE_IDS_CSV_STR = CommManagerSqlStringUtils.buildCSVString(RELATIONSHIP_ARTIFACT_TYPE_IDS);
 
 	/**
 	 * Construct a CommunicationsManager for the given SleuthkitCase.
@@ -86,7 +86,7 @@ public final class CommunicationsManager {
 	private void initAccountTypes() throws TskCoreException {
 		db.acquireSingleUserCaseWriteLock();
 		try (CaseDbConnection connection = db.getConnection();
-			Statement statement = connection.createStatement();) {
+				Statement statement = connection.createStatement();) {
 			// Read the table
 			int count = readAccountTypes();
 			if (0 == count) {
@@ -133,7 +133,7 @@ private int readAccountTypes() throws TskCoreException {
 		Statement statement = null;
 		ResultSet resultSet = null;
 		int count = 0;
-		
+
 		db.acquireSingleUserCaseReadLock();
 		try {
 			connection = db.getConnection();
@@ -240,23 +240,28 @@ public org.sleuthkit.datamodel.Account.Type addAccountType(String accountTypeNam
 	/**
 	 * Records that an account was used in a specific file. Behind the scenes,
 	 * it will create a case-specific Account object if it does not already
-	 * exist and create the needed database entries (which currently include
-	 * making a BlackboardArtifact).
-	 *
-	 * @param accountType     account type
-	 * @param accountUniqueID unique account identifier (such as email address)
-	 * @param moduleName      module creating the account
-	 * @param sourceFile      source file the account was found in (for the
-	 *                        blackboard)
-	 *
-	 * @return AccountFileInstance
-	 *
-	 * @throws TskCoreException          If a critical error occurs within TSK
-	 *                                   core
-	 * @throws InvalidAccountIDException If the account identifier is not valid.
+	 * exist, and it will create the needed database entries (which currently
+	 * includes making a TSK_ACCOUNT data artifact).
+	 *
+	 * @param accountType     The account type.
+	 * @param accountUniqueID The unique account identifier (such as an email
+	 *                        address).
+	 * @param moduleName      The module creating the account.
+	 * @param sourceFile      The source file the account was found in.
+	 * @param attributes      List of blackboard attributes to add to the data artifact (may be empty or null).
+	 * @param ingestJobId     The ingest job in which the analysis that found
+	 *                        the account was performed, may be null.
+	 *
+	 * @return	An AccountFileInstance object.
+	 *
+	 * @throws TskCoreException          The exception is thrown if there is an
+	 *                                   issue updating the case database.
+	 * @throws InvalidAccountIDException The exception is thrown if the account
+	 *                                   ID is not valid for the account type.
 	 */
 	// NOTE: Full name given for Type for doxygen linking
-	public AccountFileInstance createAccountFileInstance(org.sleuthkit.datamodel.Account.Type accountType, String accountUniqueID, String moduleName, Content sourceFile) throws TskCoreException, InvalidAccountIDException {
+	public AccountFileInstance createAccountFileInstance(org.sleuthkit.datamodel.Account.Type accountType, String accountUniqueID, 
+			String moduleName, Content sourceFile, List<BlackboardAttribute> attributes, Long ingestJobId) throws TskCoreException, InvalidAccountIDException {
 
 		// make or get the Account (unique at the case-level)
 		Account account = getOrCreateAccount(accountType, normalizeAccountID(accountType, accountUniqueID));
@@ -267,7 +272,7 @@ public AccountFileInstance createAccountFileInstance(org.sleuthkit.datamodel.Acc
 		 * address multiple times. Only one artifact is created for each email
 		 * message in that PST.
 		 */
-		BlackboardArtifact accountArtifact = getOrCreateAccountFileInstanceArtifact(accountType, normalizeAccountID(accountType, accountUniqueID), moduleName, sourceFile);
+		BlackboardArtifact accountArtifact = getOrCreateAccountFileInstanceArtifact(accountType, normalizeAccountID(accountType, accountUniqueID), moduleName, sourceFile, attributes, ingestJobId);
 
 		// The account instance map was unused so we have removed it from the database, 
 		// but we expect we may need it so I am preserving this method comment and usage here.
@@ -278,6 +283,35 @@ public AccountFileInstance createAccountFileInstance(org.sleuthkit.datamodel.Acc
 		return new AccountFileInstance(accountArtifact, account);
 	}
 
+	/**
+	 * Records that an account was used in a specific file. Behind the scenes,
+	 * it will create a case-specific Account object if it does not already
+	 * exist, and it will create the needed database entries (which currently
+	 * includes making a TSK_ACCOUNT data artifact).
+	 *
+	 * @param accountType     The account type.
+	 * @param accountUniqueID The unique account identifier (such as an email
+	 *                        address).
+	 * @param moduleName      The module creating the account.
+	 * @param sourceFile      The source file the account was found in.
+	 *
+	 * @return	An AccountFileInstance object.
+	 *
+	 * @throws TskCoreException          The exception is thrown if there is an
+	 *                                   issue updating the case database.
+	 * @throws InvalidAccountIDException The exception is thrown if the account
+	 *                                   ID is not valid for the account type.
+	 * @deprecated Use
+	 * createAccountFileInstance(org.sleuthkit.datamodel.Account.Type
+	 * accountType, String accountUniqueID, String moduleName, Content
+	 * sourceFile, Long ingestJobId) instead.
+	 */
+	@Deprecated
+	// NOTE: Full name given for Type for doxygen linking
+	public AccountFileInstance createAccountFileInstance(org.sleuthkit.datamodel.Account.Type accountType, String accountUniqueID, String moduleName, Content sourceFile) throws TskCoreException, InvalidAccountIDException {
+		return createAccountFileInstance(accountType, accountUniqueID, moduleName, sourceFile, null, null);
+	}
+
 	/**
 	 * Get the Account with the given account type and account ID.
 	 *
@@ -296,9 +330,9 @@ public Account getAccount(org.sleuthkit.datamodel.Account.Type accountType, Stri
 		Account account = null;
 		db.acquireSingleUserCaseReadLock();
 		try (CaseDbConnection connection = db.getConnection();
-			Statement s = connection.createStatement();
-			ResultSet rs = connection.executeQuery(s, "SELECT * FROM accounts WHERE account_type_id = " + getAccountTypeId(accountType)
-					+ " AND account_unique_identifier = '" + normalizeAccountID(accountType, accountUniqueID) + "'");) { //NON-NLS
+				Statement s = connection.createStatement();
+				ResultSet rs = connection.executeQuery(s, "SELECT * FROM accounts WHERE account_type_id = " + getAccountTypeId(accountType)
+						+ " AND account_unique_identifier = '" + normalizeAccountID(accountType, accountUniqueID) + "'");) { //NON-NLS
 
 			if (rs.next()) {
 				account = new Account(rs.getInt("account_id"), accountType,
@@ -482,28 +516,34 @@ private Account getOrCreateAccount(Account.Type accountType, String accountUniqu
 	 * @param moduleName      The name of the module that found the account
 	 *                        instance.
 	 * @param sourceFile      The file in which the account instance was found.
+	 * @param originalAttrs   List of blackboard attributes to add to the data artifact (may be empty or null).
+	 * @param ingestJobId     The ingest job in which the analysis that found
+	 *                        the account was performed, may be null.
 	 *
 	 * @return The account artifact.
 	 *
 	 * @throws TskCoreException If there is an error querying or updating the
 	 *                          case database.
 	 */
-	private BlackboardArtifact getOrCreateAccountFileInstanceArtifact(Account.Type accountType, String accountUniqueID, String moduleName, Content sourceFile) throws TskCoreException {
+	private BlackboardArtifact getOrCreateAccountFileInstanceArtifact(Account.Type accountType, String accountUniqueID, String moduleName, 
+			Content sourceFile, List<BlackboardAttribute> originalAttrs, Long ingestJobId) throws TskCoreException {
 		if (sourceFile == null) {
 			throw new TskCoreException("Source file not provided.");
 		}
 
 		BlackboardArtifact accountArtifact = getAccountFileInstanceArtifact(accountType, accountUniqueID, sourceFile);
 		if (accountArtifact == null) {
-			List<BlackboardAttribute> attributes = Arrays.asList(
-					new BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_ACCOUNT_TYPE, moduleName, accountType.getTypeName()),
-					new BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_ID, moduleName, accountUniqueID)
-			);
+			List<BlackboardAttribute> attributes = new ArrayList<>();
+			attributes.add(new BlackboardAttribute(BlackboardAttribute.Type.TSK_ACCOUNT_TYPE, moduleName, accountType.getTypeName()));
+			attributes.add(new BlackboardAttribute(BlackboardAttribute.Type.TSK_ID, moduleName, accountUniqueID));
+			if (originalAttrs != null) {
+				attributes.addAll(originalAttrs);
+			}
 
 			accountArtifact = sourceFile.newDataArtifact(ACCOUNT_TYPE, attributes);
-			
+
 			try {
-				db.getBlackboard().postArtifact(accountArtifact, moduleName);
+				db.getBlackboard().postArtifact(accountArtifact, moduleName, ingestJobId);
 			} catch (BlackboardException ex) {
 				LOGGER.log(Level.SEVERE, String.format("Error posting new account artifact to the blackboard (object ID = %d)", accountArtifact.getId()), ex);
 			}
@@ -526,36 +566,38 @@ private BlackboardArtifact getOrCreateAccountFileInstanceArtifact(Account.Type a
 	 */
 	private BlackboardArtifact getAccountFileInstanceArtifact(Account.Type accountType, String accountUniqueID, Content sourceFile) throws TskCoreException {
 		BlackboardArtifact accountArtifact = null;
-		
+
 		String queryStr = "SELECT artifacts.artifact_id AS artifact_id,"
-			+ " artifacts.obj_id AS obj_id,"
-			+ " artifacts.artifact_obj_id AS artifact_obj_id,"
-			+ " artifacts.data_source_obj_id AS data_source_obj_id,"
-			+ " artifacts.artifact_type_id AS artifact_type_id,"
-			+ " artifacts.review_status_id AS review_status_id"
-			+ " FROM blackboard_artifacts AS artifacts"
-			+ "	JOIN blackboard_attributes AS attr_account_type"
-			+ "		ON artifacts.artifact_id = attr_account_type.artifact_id"
-			+ " JOIN blackboard_attributes AS attr_account_id"
-			+ "		ON artifacts.artifact_id = attr_account_id.artifact_id"
-			+ "		AND attr_account_id.attribute_type_id = " + BlackboardAttribute.ATTRIBUTE_TYPE.TSK_ID.getTypeID()
-			+ "	    AND attr_account_id.value_text = '" + accountUniqueID + "'"
-			+ " WHERE artifacts.artifact_type_id = " + BlackboardArtifact.ARTIFACT_TYPE.TSK_ACCOUNT.getTypeID()
-			+ " AND attr_account_type.attribute_type_id = " + BlackboardAttribute.ATTRIBUTE_TYPE.TSK_ACCOUNT_TYPE.getTypeID()
-			+ " AND attr_account_type.value_text = '" + accountType.getTypeName() + "'"
-			+ " AND artifacts.obj_id = " + sourceFile.getId(); //NON-NLS
-		
+				+ " artifacts.obj_id AS obj_id,"
+				+ " artifacts.artifact_obj_id AS artifact_obj_id,"
+				+ " artifacts.data_source_obj_id AS data_source_obj_id,"
+				+ " artifacts.artifact_type_id AS artifact_type_id,"
+				+ " artifacts.review_status_id AS review_status_id,"
+				+ " tsk_data_artifacts.os_account_obj_id AS os_account_obj_id"
+				+ " FROM blackboard_artifacts AS artifacts"
+				+ "	JOIN blackboard_attributes AS attr_account_type"
+				+ "		ON artifacts.artifact_id = attr_account_type.artifact_id"
+				+ " JOIN blackboard_attributes AS attr_account_id"
+				+ "		ON artifacts.artifact_id = attr_account_id.artifact_id"
+				+ "		AND attr_account_id.attribute_type_id = " + BlackboardAttribute.ATTRIBUTE_TYPE.TSK_ID.getTypeID()
+				+ "	    AND attr_account_id.value_text = '" + accountUniqueID + "'"
+				+ " LEFT JOIN tsk_data_artifacts ON tsk_data_artifacts.artifact_obj_id = artifacts.artifact_obj_id"
+				+ " WHERE artifacts.artifact_type_id = " + BlackboardArtifact.ARTIFACT_TYPE.TSK_ACCOUNT.getTypeID()
+				+ " AND attr_account_type.attribute_type_id = " + BlackboardAttribute.ATTRIBUTE_TYPE.TSK_ACCOUNT_TYPE.getTypeID()
+				+ " AND attr_account_type.value_text = '" + accountType.getTypeName() + "'"
+				+ " AND artifacts.obj_id = " + sourceFile.getId(); //NON-NLS
+
 		db.acquireSingleUserCaseReadLock();
 		try (CaseDbConnection connection = db.getConnection();
-			Statement s = connection.createStatement();
-			ResultSet rs = connection.executeQuery(s, queryStr);) { //NON-NLS
+				Statement s = connection.createStatement();
+				ResultSet rs = connection.executeQuery(s, queryStr);) { //NON-NLS
 			if (rs.next()) {
-				BlackboardArtifact.Type bbartType = db.getArtifactType(rs.getInt("artifact_type_id"));
+				BlackboardArtifact.Type bbartType = db.getBlackboard().getArtifactType(rs.getInt("artifact_type_id"));
 
-				accountArtifact = new BlackboardArtifact(db, rs.getLong("artifact_id"), rs.getLong("obj_id"), rs.getLong("artifact_obj_id"),
+				accountArtifact = new DataArtifact(db, rs.getLong("artifact_id"), rs.getLong("obj_id"), rs.getLong("artifact_obj_id"),
 						rs.getObject("data_source_obj_id") != null ? rs.getLong("data_source_obj_id") : null,
 						bbartType.getTypeID(), bbartType.getTypeName(), bbartType.getDisplayName(),
-						BlackboardArtifact.ReviewStatus.withID(rs.getInt("review_status_id")));
+						BlackboardArtifact.ReviewStatus.withID(rs.getInt("review_status_id")), rs.getLong("os_account_obj_id"), false);
 			}
 		} catch (SQLException ex) {
 			throw new TskCoreException("Error getting account", ex);
@@ -583,8 +625,8 @@ public org.sleuthkit.datamodel.Account.Type getAccountType(String accountTypeNam
 
 		db.acquireSingleUserCaseReadLock();
 		try (CaseDbConnection connection = db.getConnection();
-			Statement s = connection.createStatement();
-			ResultSet rs = connection.executeQuery(s, "SELECT account_type_id, type_name, display_name FROM account_types WHERE type_name = '" + accountTypeName + "'");) { //NON-NLS
+				Statement s = connection.createStatement();
+				ResultSet rs = connection.executeQuery(s, "SELECT account_type_id, type_name, display_name FROM account_types WHERE type_name = '" + accountTypeName + "'");) { //NON-NLS
 			Account.Type accountType = null;
 			if (rs.next()) {
 				accountType = new Account.Type(accountTypeName, rs.getString("display_name"));
@@ -738,8 +780,8 @@ public Map<AccountPair, Long> getRelationshipCountsPairwise(Set<AccountDeviceIns
 				CommunicationsFilter.RelationshipTypeFilter.class.getName()
 		));
 
-		String accountIDsCSL = StringUtils.buildCSVString(accountIDs);
-		String accountDeviceIDsCSL = StringUtils.buildCSVString(accountDeviceIDs);
+		String accountIDsCSL = CommManagerSqlStringUtils.buildCSVString(accountIDs);
+		String accountDeviceIDsCSL = CommManagerSqlStringUtils.buildCSVString(accountDeviceIDs);
 		String filterSQL = getCommunicationsFilterSQL(filter, applicableFilters);
 
 		final String queryString
@@ -779,13 +821,13 @@ public Map<AccountPair, Long> getRelationshipCountsPairwise(Set<AccountDeviceIns
 				+ "		accounts2.account_id, "
 				+ "		account_types2.type_name, "
 				+ "		account_types2.display_name";
-		
+
 		Map<AccountPair, Long> results = new HashMap<AccountPair, Long>();
-		
+
 		db.acquireSingleUserCaseReadLock();
 		try (CaseDbConnection connection = db.getConnection();
-			Statement s = connection.createStatement();
-			ResultSet rs = connection.executeQuery(s, queryString);) { //NON-NLS
+				Statement s = connection.createStatement();
+				ResultSet rs = connection.executeQuery(s, queryString);) { //NON-NLS
 
 			while (rs.next()) {
 				//make account 1
@@ -838,7 +880,7 @@ public long getRelationshipSourcesCount(AccountDeviceInstance accountDeviceInsta
 		long account_id = accountDeviceInstance.getAccount().getAccountID();
 
 		// Get the list of Data source objects IDs correpsonding to this DeviceID.
-		String datasourceObjIdsCSV = StringUtils.buildCSVString(
+		String datasourceObjIdsCSV = CommManagerSqlStringUtils.buildCSVString(
 				db.getDataSourceObjIds(accountDeviceInstance.getDeviceId()));
 
 		// set up applicable filters
@@ -856,13 +898,13 @@ public long getRelationshipSourcesCount(AccountDeviceInstance accountDeviceInsta
 		}
 
 		String queryStr
-			= "SELECT count(DISTINCT relationships.relationship_source_obj_id) as count "
-			+ "	FROM" + innerQuery
-			+ " WHERE relationships.data_source_obj_id IN ( " + datasourceObjIdsCSV + " )"
-			+ " AND ( relationships.account1_id = " + account_id
-			+ "      OR  relationships.account2_id = " + account_id + " )"
-			+ (filterSQL.isEmpty() ? "" : " AND " + filterSQL);
-		
+				= "SELECT count(DISTINCT relationships.relationship_source_obj_id) as count "
+				+ "	FROM" + innerQuery
+				+ " WHERE relationships.data_source_obj_id IN ( " + datasourceObjIdsCSV + " )"
+				+ " AND ( relationships.account1_id = " + account_id
+				+ "      OR  relationships.account2_id = " + account_id + " )"
+				+ (filterSQL.isEmpty() ? "" : " AND " + filterSQL);
+
 		db.acquireSingleUserCaseReadLock();
 		try (CaseDbConnection connection = db.getConnection();
 				Statement s = connection.createStatement();
@@ -895,11 +937,13 @@ public long getRelationshipSourcesCount(AccountDeviceInstance accountDeviceInsta
 	public Set<Content> getRelationshipSources(Set<AccountDeviceInstance> accountDeviceInstanceList, CommunicationsFilter filter) throws TskCoreException {
 
 		if (accountDeviceInstanceList.isEmpty()) {
-			//log this?
+			LOGGER.log(Level.WARNING, "Empty accountDeviceInstanceList passed to getRelationshipSources");
 			return Collections.emptySet();
 		}
 
-		Map<Long, Set<Long>> accountIdToDatasourceObjIdMap = new HashMap<Long, Set<Long>>();
+		// Build a map of account ids to device ids.  For each account id there
+		// will be a set of device ids.
+		Map<Long, Set<Long>> accountIdToDatasourceObjIdMap = new HashMap<>();
 		for (AccountDeviceInstance accountDeviceInstance : accountDeviceInstanceList) {
 			long accountID = accountDeviceInstance.getAccount().getAccountID();
 			List<Long> dataSourceObjIds = db.getDataSourceObjIds(accountDeviceInstance.getDeviceId());
@@ -907,25 +951,32 @@ public Set<Content> getRelationshipSources(Set<AccountDeviceInstance> accountDev
 			if (accountIdToDatasourceObjIdMap.containsKey(accountID)) {
 				accountIdToDatasourceObjIdMap.get(accountID).addAll(dataSourceObjIds);
 			} else {
-				accountIdToDatasourceObjIdMap.put(accountID, new HashSet<Long>(dataSourceObjIds));
+				accountIdToDatasourceObjIdMap.put(accountID, new HashSet<>(dataSourceObjIds));
 			}
 		}
 
-		List<String> adiSQLClauses = new ArrayList<String>();
+		// Create the OR cause that limits the accounts for a given data source.
+		List<String> adiSQLClauses = new ArrayList<>();
 		for (Map.Entry<Long, Set<Long>> entry : accountIdToDatasourceObjIdMap.entrySet()) {
 			final Long accountID = entry.getKey();
-			String datasourceObjIdsCSV = StringUtils.buildCSVString(entry.getValue());
-
+			String datasourceObjIdsCSV = CommManagerSqlStringUtils.buildCSVString(entry.getValue());
+			
 			adiSQLClauses.add(
-					"( ( relationships.data_source_obj_id IN ( " + datasourceObjIdsCSV + " ) )"
-					+ " AND ( relationships.account1_id = " + accountID
+					"( "
+					+ (!datasourceObjIdsCSV.isEmpty() ? "( relationships.data_source_obj_id IN ( " + datasourceObjIdsCSV + " ) ) AND" : "")
+					+ " ( relationships.account1_id = " + accountID
 					+ " OR relationships.account2_id = " + accountID + " ) )"
 			);
 		}
-		String adiSQLClause = StringUtils.joinAsStrings(adiSQLClauses, " OR ");
+		String adiSQLClause = CommManagerSqlStringUtils.joinAsStrings(adiSQLClauses, " OR ");
+		
+		if(adiSQLClause.isEmpty()) {
+			LOGGER.log(Level.SEVERE, "There set of AccountDeviceInstances had no valid data source ids.");
+			return Collections.emptySet();
+		}
 
-		// set up applicable filters
-		Set<String> applicableFilters = new HashSet<String>(Arrays.asList(
+		// Build the filter part of the query.
+		Set<String> applicableFilters = new HashSet<>(Arrays.asList(
 				CommunicationsFilter.RelationshipTypeFilter.class
 						.getName(),
 				CommunicationsFilter.DateRangeFilter.class
@@ -933,43 +984,39 @@ public Set<Content> getRelationshipSources(Set<AccountDeviceInstance> accountDev
 		));
 		String filterSQL = getCommunicationsFilterSQL(filter, applicableFilters);
 
+		// Basic join.
 		String limitQuery = " account_relationships AS relationships";
+		
+		// If the user set filters expand this to be a subquery that selects
+		// accounts based on the filter.
 		String limitStr = getMostRecentFilterLimitSQL(filter);
 		if (!limitStr.isEmpty()) {
 			limitQuery = "(SELECT * FROM account_relationships as relationships " + limitStr + ") as relationships";
 		}
-		
+
 		String queryStr
-			= "SELECT DISTINCT artifacts.artifact_id AS artifact_id,"
-			+ " artifacts.obj_id AS obj_id,"
-			+ " artifacts.artifact_obj_id AS artifact_obj_id,"
-			+ " artifacts.data_source_obj_id AS data_source_obj_id, "
-			+ " artifacts.artifact_type_id AS artifact_type_id, "
-			+ " artifacts.review_status_id AS review_status_id  "
-			+ " FROM blackboard_artifacts as artifacts"
-			+ " JOIN " + limitQuery
-			+ "	ON artifacts.artifact_obj_id = relationships.relationship_source_obj_id"
-			// append sql to restrict search to specified account device instances 
-			+ " WHERE (" + adiSQLClause + " )"
-			// plus other filters
-			+ (filterSQL.isEmpty() ? "" : " AND (" + filterSQL + " )");
+				= "SELECT DISTINCT artifacts.artifact_id AS artifact_id,"
+				+ " artifacts.obj_id AS obj_id,"
+				+ " artifacts.artifact_obj_id AS artifact_obj_id,"
+				+ " artifacts.data_source_obj_id AS data_source_obj_id, "
+				+ " artifacts.artifact_type_id AS artifact_type_id, "
+				+ " artifacts.review_status_id AS review_status_id,"
+				+ " tsk_data_artifacts.os_account_obj_id as os_account_obj_id"
+				+ " FROM blackboard_artifacts as artifacts"
+				+ " JOIN " + limitQuery
+				+ "	ON artifacts.artifact_obj_id = relationships.relationship_source_obj_id"
+				+ " LEFT JOIN tsk_data_artifacts ON artifacts.artifact_obj_id = tsk_data_artifacts.artifact_obj_id"
+				// append sql to restrict search to specified account device instances 
+				+ " WHERE (" + adiSQLClause + " )"
+				// plus other filters
+				+ (filterSQL.isEmpty() ? "" : " AND (" + filterSQL + " )");
 
-		
 		db.acquireSingleUserCaseReadLock();
 		try (CaseDbConnection connection = db.getConnection();
 				Statement s = connection.createStatement();
 				ResultSet rs = connection.executeQuery(s, queryStr);) { //NON-NLS
-			Set<Content> relationshipSources = new HashSet<Content>();
-			while (rs.next()) {
-				BlackboardArtifact.Type bbartType = db.getArtifactType(rs.getInt("artifact_type_id"));
-				relationshipSources.add(new BlackboardArtifact(db, rs.getLong("artifact_id"),
-						rs.getLong("obj_id"), rs.getLong("artifact_obj_id"),
-						rs.getObject("data_source_obj_id") != null ? rs.getLong("data_source_obj_id") : null,
-						bbartType.getTypeID(),
-						bbartType.getTypeName(), bbartType.getDisplayName(),
-						BlackboardArtifact.ReviewStatus.withID(rs.getInt("review_status_id"))));
-			}
-
+			Set<Content> relationshipSources = new HashSet<>();
+			relationshipSources.addAll(getDataArtifactsFromResult(rs));
 			return relationshipSources;
 		} catch (SQLException ex) {
 			throw new TskCoreException("Error getting relationships for account. " + ex.getMessage(), ex);
@@ -1011,7 +1058,7 @@ public List<AccountDeviceInstance> getRelatedAccountDeviceInstances(AccountDevic
 				+ "		  data_source_obj_id"
 				+ " FROM account_relationships as relationships"
 				+ " WHERE %2$1s = " + accountDeviceInstance.getAccount().getAccountID() + ""
-				+ " AND data_source_obj_id IN (" + StringUtils.buildCSVString(dataSourceObjIds) + ")"
+				+ " AND data_source_obj_id IN (" + CommManagerSqlStringUtils.buildCSVString(dataSourceObjIds) + ")"
 				+ (innerQueryfilterSQL.isEmpty() ? "" : " AND " + innerQueryfilterSQL);
 
 		String innerQuery1 = String.format(innerQueryTemplate, "account1_id", "account2_id");
@@ -1101,7 +1148,7 @@ public List<AccountDeviceInstance> getRelatedAccountDeviceInstances(AccountDevic
 	public List<Content> getRelationshipSources(AccountDeviceInstance account1, AccountDeviceInstance account2, CommunicationsFilter filter) throws TskCoreException {
 
 		//set up applicable filters 
-		Set<String> applicableFilters = new HashSet<String>(Arrays.asList(
+		Set<String> applicableFilters = new HashSet<>(Arrays.asList(
 				CommunicationsFilter.DateRangeFilter.class.getName(),
 				CommunicationsFilter.DeviceFilter.class.getName(),
 				CommunicationsFilter.RelationshipTypeFilter.class.getName()
@@ -1119,10 +1166,12 @@ public List<Content> getRelationshipSources(AccountDeviceInstance account1, Acco
 				+ "		artifacts.artifact_obj_id AS artifact_obj_id,"
 				+ "		artifacts.data_source_obj_id AS data_source_obj_id,"
 				+ "		artifacts.artifact_type_id AS artifact_type_id,"
-				+ "		artifacts.review_status_id AS review_status_id"
+				+ "		artifacts.review_status_id AS review_status_id,"
+				+ "     tsk_data_artifacts.os_account_obj_id AS os_account_obj_id"
 				+ " FROM blackboard_artifacts AS artifacts"
 				+ "	JOIN " + limitQuery
 				+ "		ON artifacts.artifact_obj_id = relationships.relationship_source_obj_id"
+				+ " LEFT JOIN tsk_data_artifacts ON artifacts.artifact_obj_id = tsk_data_artifacts.artifact_obj_id"
 				+ " WHERE (( relationships.account1_id = " + account1.getAccount().getAccountID()
 				+ " AND relationships.account2_id  = " + account2.getAccount().getAccountID()
 				+ " ) OR (	  relationships.account2_id = " + account1.getAccount().getAccountID()
@@ -1134,15 +1183,8 @@ public List<Content> getRelationshipSources(AccountDeviceInstance account1, Acco
 				Statement s = connection.createStatement();
 				ResultSet rs = connection.executeQuery(s, queryString);) {
 
-			ArrayList<Content> artifacts = new ArrayList<Content>();
-			while (rs.next()) {
-				BlackboardArtifact.Type bbartType = db.getArtifactType(rs.getInt("artifact_type_id"));
-				artifacts.add(new BlackboardArtifact(db, rs.getLong("artifact_id"), rs.getLong("obj_id"), rs.getLong("artifact_obj_id"),
-						rs.getObject("data_source_obj_id") != null ? rs.getLong("data_source_obj_id") : null,
-						bbartType.getTypeID(), bbartType.getTypeName(), bbartType.getDisplayName(),
-						BlackboardArtifact.ReviewStatus.withID(rs.getInt("review_status_id"))));
-			}
-
+			ArrayList<Content> artifacts = new ArrayList<>();
+			artifacts.addAll(getDataArtifactsFromResult(rs));
 			return artifacts;
 		} catch (SQLException ex) {
 			throw new TskCoreException("Error getting relationships between accounts. " + ex.getMessage(), ex);
@@ -1163,7 +1205,7 @@ public List<Content> getRelationshipSources(AccountDeviceInstance account1, Acco
 	 */
 	public List<AccountFileInstance> getAccountFileInstances(Account account) throws TskCoreException {
 		List<AccountFileInstance> accountFileInstanceList = new ArrayList<>();
-
+		@SuppressWarnings("deprecation")
 		List<BlackboardArtifact> artifactList = getSleuthkitCase().getBlackboardArtifacts(BlackboardArtifact.ARTIFACT_TYPE.TSK_ACCOUNT, BlackboardAttribute.ATTRIBUTE_TYPE.TSK_ID, account.getTypeSpecificID());
 
 		if (artifactList != null && !artifactList.isEmpty()) {
@@ -1188,10 +1230,10 @@ public List<AccountFileInstance> getAccountFileInstances(Account account) throws
 	 * @throws TskCoreException
 	 */
 	public List<Account.Type> getAccountTypesInUse() throws TskCoreException {
-		
+
 		String query = "SELECT DISTINCT accounts.account_type_id, type_name, display_name FROM accounts JOIN account_types ON accounts.account_type_id = account_types.account_type_id";
 		List<Account.Type> inUseAccounts = new ArrayList<>();
-		
+
 		db.acquireSingleUserCaseReadLock();
 		try (CaseDbConnection connection = db.getConnection();
 				Statement s = connection.createStatement();
@@ -1385,4 +1427,31 @@ private String getMostRecentFilterLimitSQL(CommunicationsFilter filter) {
 
 		return limitStr;
 	}
+
+	/**
+	 * A helper method that will return a set of BlackboardArtifact objects for
+	 * the given ResultSet.
+	 *
+	 * @param resultSet	The results of executing a query.
+	 *
+	 * @return A list of BlackboardArtifact objects.
+	 *
+	 * @throws SQLException
+	 * @throws TskCoreException
+	 */
+	private List<BlackboardArtifact> getDataArtifactsFromResult(ResultSet resultSet) throws SQLException, TskCoreException {
+		List<BlackboardArtifact> artifacts = new ArrayList<>();
+		while (resultSet.next()) {
+			BlackboardArtifact.Type bbartType = db.getBlackboard().getArtifactType(resultSet.getInt("artifact_type_id"));
+			artifacts.add(new DataArtifact(db, resultSet.getLong("artifact_id"),
+					resultSet.getLong("obj_id"), resultSet.getLong("artifact_obj_id"),
+					resultSet.getObject("data_source_obj_id") != null ? resultSet.getLong("data_source_obj_id") : null,
+					bbartType.getTypeID(),
+					bbartType.getTypeName(), bbartType.getDisplayName(),
+					BlackboardArtifact.ReviewStatus.withID(resultSet.getInt("review_status_id")),
+					resultSet.getLong("os_account_obj_id"), false));
+		}
+
+		return artifacts;
+	}
 }
diff --git a/bindings/java/src/org/sleuthkit/datamodel/Content.java b/bindings/java/src/org/sleuthkit/datamodel/Content.java
index 39df2ae626069ff0dec5310be61cb7e55873c011..5993ac460165046abba1e0ec0cf1603f181e397a 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/Content.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/Content.java
@@ -211,7 +211,7 @@ public interface Content extends SleuthkitVisitableItem {
 	 * @param justification	 Justification
 	 * @param attributesList Additional attributes to attach to this analysis
 	 *                       result artifact.
-	 * @param dataDourcrId   The data source for the analysis result
+	 * @param dataSourceId   The data source for the analysis result
 	 *
 	 * @return AnalysisResultAdded The analysis return added and the current
 	 *         aggregate score of content.
diff --git a/bindings/java/src/org/sleuthkit/datamodel/ContentProviderStream.java b/bindings/java/src/org/sleuthkit/datamodel/ContentProviderStream.java
new file mode 100644
index 0000000000000000000000000000000000000000..5617362036bb1a9f2c55991bd7d39072c5e44552
--- /dev/null
+++ b/bindings/java/src/org/sleuthkit/datamodel/ContentProviderStream.java
@@ -0,0 +1,41 @@
+/*
+ * SleuthKit Java Bindings
+ *
+ * Copyright 2023 Basis Technology Corp.
+ * Contact: carrier <at> sleuthkit <dot> org
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.sleuthkit.datamodel;
+
+/**
+ * Custom provider for content bytes.
+ */
+@SuppressWarnings("try")
+public interface ContentProviderStream extends AutoCloseable {
+
+	/**
+	 * Reads data that this content object is associated with (file contents,
+	 * volume contents, etc.).
+	 *
+	 * @param buf    a character array of data (in bytes) to copy read data to
+	 * @param offset byte offset in the content to start reading from
+	 * @param len    number of bytes to read into buf.
+	 *
+	 * @return num of bytes read, or -1 on error
+	 *
+	 * @throws TskCoreException if critical error occurred during read in the
+	 *                          tsk core
+	 */
+	public int read(byte[] buf, long offset, long len) throws TskCoreException;
+}
diff --git a/bindings/java/src/org/sleuthkit/datamodel/ContentStreamProvider.java b/bindings/java/src/org/sleuthkit/datamodel/ContentStreamProvider.java
new file mode 100644
index 0000000000000000000000000000000000000000..c7b09bd2bd46e803ee7d64511d871569bd65efe8
--- /dev/null
+++ b/bindings/java/src/org/sleuthkit/datamodel/ContentStreamProvider.java
@@ -0,0 +1,38 @@
+/*
+ * SleuthKit Java Bindings
+ *
+ * Copyright 2023 Basis Technology Corp.
+ * Contact: carrier <at> sleuthkit <dot> org
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.sleuthkit.datamodel;
+
+import java.util.Optional;
+
+/**
+ * Custom provider for bytes of an abstract file.
+ */
+public interface ContentStreamProvider {
+
+	/**
+	 * Provides a content stream for a content object or empty if this provider
+	 * has none to provide.
+	 *
+	 * @param content The content.
+	 *
+	 * @return The content stream or empty if no stream can be provided for this
+	 *         content.
+	 */
+	Optional<ContentProviderStream> getContentStream(Content content) throws TskCoreException;
+}
diff --git a/bindings/java/src/org/sleuthkit/datamodel/DerivedFile.java b/bindings/java/src/org/sleuthkit/datamodel/DerivedFile.java
index 35a52ab4fd65c202ebd511b3edb56a780d1278d9..22cbba2660f29ffbef4ac96d0afcfba3f42467d7 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/DerivedFile.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/DerivedFile.java
@@ -1,7 +1,7 @@
 /*
  * SleuthKit Java Bindings
  *
- * Copyright 2011-2017 Basis Technology Corp.
+ * Copyright 2011-2022 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -51,6 +51,7 @@ public class DerivedFile extends AbstractFile {
 	 *                           added.
 	 * @param objId              The object id of the file in the case database.
 	 * @param dataSourceObjectId The object id of the data source for the file.
+	 * @param fileSystemObjectId The object id of the file system. May be null.
 	 * @param name               The name of the file.
 	 * @param dirType            The type of the file, usually as reported in
 	 *                           the name structure of the file system. May be
@@ -72,6 +73,8 @@ public class DerivedFile extends AbstractFile {
 	 * @param mtime              The modified time of the file.
 	 * @param md5Hash            The MD5 hash of the file, null if not yet
 	 *                           calculated.
+	 * @param sha256Hash         sha256 hash of the file, or null if not present
+	 * @param sha1Hash           SHA-1 hash of the file, or null if not present
 	 * @param knownState         The known state of the file from a hash
 	 *                           database lookup, null if not yet looked up.
 	 * @param parentPath         The path of the parent of the file.
@@ -90,12 +93,14 @@ public class DerivedFile extends AbstractFile {
 	DerivedFile(SleuthkitCase db,
 			long objId,
 			long dataSourceObjectId,
+			Long fileSystemObjectId, 
 			String name,
 			TSK_FS_NAME_TYPE_ENUM dirType, TSK_FS_META_TYPE_ENUM metaType,
 			TSK_FS_NAME_FLAG_ENUM dirFlag, short metaFlags,
 			long size,
 			long ctime, long crtime, long atime, long mtime,
-			String md5Hash, String sha256Hash, FileKnown knownState,
+			String md5Hash, String sha256Hash, String sha1Hash, 
+			FileKnown knownState,
 			String parentPath,
 			String localPath,
 			long parentId,
@@ -106,9 +111,9 @@ public class DerivedFile extends AbstractFile {
 			Long osAccountObjId) {
 		// TODO (AUT-1904): The parent id should be passed to AbstractContent 
 		// through the class hierarchy contructors.
-		super(db, objId, dataSourceObjectId, TskData.TSK_FS_ATTR_TYPE_ENUM.TSK_FS_ATTR_TYPE_DEFAULT, 0,
+		super(db, objId, dataSourceObjectId, fileSystemObjectId, TskData.TSK_FS_ATTR_TYPE_ENUM.TSK_FS_ATTR_TYPE_DEFAULT, 0,
 				name, TSK_DB_FILES_TYPE_ENUM.DERIVED, 0L, 0, dirType, metaType, dirFlag,
-				metaFlags, size, ctime, crtime, atime, mtime, (short) 0, 0, 0, md5Hash, sha256Hash, knownState, parentPath, mimeType, extension, ownerUid, osAccountObjId, Collections.emptyList());
+				metaFlags, size, ctime, crtime, atime, mtime, (short) 0, 0, 0, md5Hash, sha256Hash, sha1Hash, knownState, parentPath, mimeType, extension, ownerUid, osAccountObjId,  TskData.CollectedStatus.UNKNOWN, Collections.emptyList());
 		setLocalFilePath(localPath);
 		setEncodingType(encodingType);
 	}
@@ -262,57 +267,4 @@ public String toString() {
 			return "DerivedMethod{" + "derived_id=" + derivedId + ", toolName=" + toolName + ", toolVersion=" + toolVersion + ", other=" + other + ", rederiveDetails=" + rederiveDetails + '}'; //NON-NLS
 		}
 	}
-
-	/**
-	 * Constructs a representation of a file or directory that has been derived
-	 * from another file and is stored outside of the data source (e.g., on a
-	 * user's machine). A typical example of a derived file is a file extracted
-	 * from an archive file.
-	 *
-	 * @param db         The case database to which the file has been added.
-	 * @param objId      The object id of the file in the case database.
-	 * @param name       The name of the file.
-	 * @param dirType    The type of the file, usually as reported in the name
-	 *                   structure of the file system. May be set to
-	 *                   TSK_FS_NAME_TYPE_ENUM.UNDEF.
-	 * @param metaType   The type of the file, usually as reported in the
-	 *                   metadata structure of the file system. May be set to
-	 *                   TSK_FS_META_TYPE_ENUM.TSK_FS_META_TYPE_UNDEF.
-	 * @param dirFlag    The allocated status of the file, usually as reported
-	 *                   in the name structure of the file system.
-	 * @param metaFlags  The allocated status of the file, usually as reported
-	 *                   in the metadata structure of the file system.
-	 * @param size       The size of the file.
-	 * @param ctime      The changed time of the file.
-	 * @param crtime     The created time of the file.
-	 * @param atime      The accessed time of the file.
-	 * @param mtime      The modified time of the file.
-	 * @param md5Hash    The MD5 hash of the file, null if not yet calculated.
-	 * @param knownState The known state of the file from a hash database
-	 *                   lookup, null if not yet looked up.
-	 * @param parentPath The path of the parent of the file.
-	 * @param localPath  The absolute path of the file in secondary storage.
-	 * @param parentId   The object id of parent of the file.
-	 *
-	 * @deprecated Do not make subclasses outside of this package.
-	 */
-	@Deprecated
-	@SuppressWarnings("deprecation")
-	protected DerivedFile(SleuthkitCase db,
-			long objId,
-			String name,
-			TSK_FS_NAME_TYPE_ENUM dirType, TSK_FS_META_TYPE_ENUM metaType,
-			TSK_FS_NAME_FLAG_ENUM dirFlag, short metaFlags,
-			long size,
-			long ctime, long crtime, long atime, long mtime,
-			String md5Hash, FileKnown knownState,
-			String parentPath,
-			String localPath,
-			long parentId) {
-		this(db, objId, db.getDataSourceObjectId(objId), name, dirType, metaType, dirFlag, metaFlags, size,
-				ctime, crtime, atime, mtime,
-				md5Hash, null, knownState,
-				parentPath, localPath, parentId, null, TskData.EncodingType.NONE, null, OsAccount.NO_OWNER_ID, OsAccount.NO_ACCOUNT);
-	}
-
 }
diff --git a/bindings/java/src/org/sleuthkit/datamodel/Directory.java b/bindings/java/src/org/sleuthkit/datamodel/Directory.java
index f0d376b381093971ca36fde7624365117502bda0..877a626bedfc0912d9162d0afc215f78adc17e89 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/Directory.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/Directory.java
@@ -1,7 +1,7 @@
 /*
  * SleuthKit Java Bindings
  *
- * Copyright 2011-2017 Basis Technology Corp.
+ * Copyright 2011-2022 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -70,6 +70,8 @@ public class Directory extends FsContent {
 	 * @param gid                The GID for the file.
 	 * @param md5Hash            The MD5 hash of the file, null if not yet
 	 *                           calculated.
+	 * @param sha256Hash         sha256 hash of the file, or null if not present
+	 * @param sha1Hash           SHA-1 hash of the file, or null if not present
 	 * @param knownState         The known state of the file from a hash
 	 *                           database lookup, null if not yet looked up.
 	 * @param parentPath         The path of the parent of the file.
@@ -89,9 +91,10 @@ public class Directory extends FsContent {
 			long size,
 			long ctime, long crtime, long atime, long mtime,
 			short modes, int uid, int gid,
-			String md5Hash, String sha256Hash, FileKnown knownState, String parentPath, 
-			String ownerUid, Long osAccountObjId ) {
-		super(db, objId, dataSourceObjectId, fsObjId, attrType, attrId, name, TskData.TSK_DB_FILES_TYPE_ENUM.FS, metaAddr, metaSeq, dirType, metaType, dirFlag, metaFlags, size, ctime, crtime, atime, mtime, modes, uid, gid, md5Hash, sha256Hash, knownState, parentPath, null, null, ownerUid, osAccountObjId, Collections.emptyList());
+			String md5Hash, String sha256Hash, String sha1Hash, 
+			FileKnown knownState, String parentPath, 
+			String ownerUid, Long osAccountObjId) {
+		super(db, objId, dataSourceObjectId, fsObjId, attrType, attrId, name, TskData.TSK_DB_FILES_TYPE_ENUM.FS, metaAddr, metaSeq, dirType, metaType, dirFlag, metaFlags, size, ctime, crtime, atime, mtime, modes, uid, gid, md5Hash, sha256Hash, sha1Hash, knownState, parentPath, null, null, ownerUid, osAccountObjId, TskData.CollectedStatus.UNKNOWN, Collections.emptyList());
 	}
 
 	/**
@@ -252,6 +255,6 @@ protected Directory(SleuthkitCase db,
 			long ctime, long crtime, long atime, long mtime,
 			short modes, int uid, int gid,
 			String md5Hash, FileKnown knownState, String parentPath) {
-		this(db, objId, dataSourceObjectId, fsObjId, attrType, (int) attrId, name, metaAddr, metaSeq, dirType, metaType, dirFlag, metaFlags, size, ctime, crtime, atime, mtime, modes, uid, gid, md5Hash, null, knownState, parentPath, OsAccount.NO_OWNER_ID, OsAccount.NO_ACCOUNT);
+		this(db, objId, dataSourceObjectId, fsObjId, attrType, (int) attrId, name, metaAddr, metaSeq, dirType, metaType, dirFlag, metaFlags, size, ctime, crtime, atime, mtime, modes, uid, gid, md5Hash, null, null, knownState, parentPath, OsAccount.NO_OWNER_ID, OsAccount.NO_ACCOUNT);
 	}
 }
diff --git a/bindings/java/src/org/sleuthkit/datamodel/File.java b/bindings/java/src/org/sleuthkit/datamodel/File.java
index 659ed70acdee84c323b760f6a1a7f7915be6196b..5ee548df86a5a7532b83fb844328a8250e8b1af6 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/File.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/File.java
@@ -1,7 +1,7 @@
 /*
  * SleuthKit Java Bindings
  *
- * Copyright 2011-2017 Basis Technology Corp.
+ * Copyright 2011-2022 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -71,6 +71,8 @@ public class File extends FsContent {
 	 * @param gid                The GID for the file.
 	 * @param md5Hash            The MD5 hash of the file, null if not yet
 	 *                           calculated.
+	 * @param sha256Hash         sha256 hash of the file, or null if not present
+	 * @param sha1Hash           SHA-1 hash of the file, or null if not present
 	 * @param knownState         The known state of the file from a hash
 	 *                           database lookup, null if not yet looked up.
 	 * @param parentPath         The path of the parent of the file.
@@ -80,7 +82,7 @@ public class File extends FsContent {
 	 *                           including the '.'), can be null.
 	 * @param ownerUid			 UID of the file owner as found in the file
 	 *                           system, can be null.
-	 * @param osAccountObjId	 Obj id of the owner OS account, may be null.
+	 * @param osAccountObjId     Obj id of the owner OS account, may be null.
 	 */
 	File(SleuthkitCase db,
 			long objId,
@@ -94,12 +96,17 @@ public class File extends FsContent {
 			long size,
 			long ctime, long crtime, long atime, long mtime,
 			short modes, int uid, int gid,
-			String md5Hash, String sha256Hash, FileKnown knownState, String parentPath, String mimeType,
+			String md5Hash, String sha256Hash, String sha1Hash, 
+			FileKnown knownState, String parentPath, String mimeType,
 			String extension,
 			String ownerUid,
 			Long osAccountObjId,
+			TskData.CollectedStatus collected,
 			List<Attribute> fileAttributes) {
-		super(db, objId, dataSourceObjectId, fsObjId, attrType, attrId, name, TskData.TSK_DB_FILES_TYPE_ENUM.FS, metaAddr, metaSeq, dirType, metaType, dirFlag, metaFlags, size, ctime, crtime, atime, mtime, modes, uid, gid, md5Hash, sha256Hash, knownState, parentPath, mimeType, extension, ownerUid, osAccountObjId, fileAttributes);
+		super(db, objId, dataSourceObjectId, fsObjId, attrType, attrId, name, TskData.TSK_DB_FILES_TYPE_ENUM.FS, 
+				metaAddr, metaSeq, dirType, metaType, dirFlag, metaFlags, size, ctime, crtime, atime, mtime, 
+				modes, uid, gid, md5Hash, sha256Hash, sha1Hash, knownState, parentPath, mimeType, extension, 
+				ownerUid, osAccountObjId, collected, fileAttributes);
 	}
 
 	/**
@@ -253,6 +260,86 @@ protected File(SleuthkitCase db,
 			String name, long metaAddr, int metaSeq, TSK_FS_NAME_TYPE_ENUM dirType, TSK_FS_META_TYPE_ENUM metaType,
 			TSK_FS_NAME_FLAG_ENUM dirFlag, short metaFlags, long size, long ctime, long crtime, long atime, long mtime,
 			short modes, int uid, int gid, String md5Hash, FileKnown knownState, String parentPath, String mimeType) {
-		this(db, objId, dataSourceObjectId, fsObjId, attrType, (int) attrId, name, metaAddr, metaSeq, dirType, metaType, dirFlag, metaFlags, size, ctime, crtime, atime, mtime, modes, uid, gid, md5Hash, null, knownState, parentPath, mimeType, null, OsAccount.NO_OWNER_ID, OsAccount.NO_ACCOUNT, Collections.emptyList());
+		this(db, objId, dataSourceObjectId, fsObjId, attrType, (int) attrId, name, metaAddr, metaSeq, dirType, metaType, dirFlag, metaFlags, size, ctime, crtime, atime, mtime, modes, uid, gid, md5Hash, null, null, knownState, parentPath, mimeType, null, OsAccount.NO_OWNER_ID, OsAccount.NO_ACCOUNT, Collections.emptyList());
+	}
+	
+		/**
+	 * Constructs a representation of a file system file that has been added to
+	 * the case.
+	 *
+	 * @param db                 The case database to which the file has been
+	 *                           added.
+	 * @param objId              The object id of the file in the case database.
+	 * @param dataSourceObjectId The object id of the data source for the file.
+	 * @param fsObjId            The object id of the file system to which this
+	 *                           file belongs.
+	 * @param attrType           The type attribute given to the file by the
+	 *                           file system.
+	 * @param attrId             The type id given to the file by the file
+	 *                           system.
+	 * @param name               The name of the file.
+	 * @param metaAddr           The meta address of the file.
+	 * @param metaSeq            The meta sequence number of the file.
+	 * @param dirType            The type of the file, usually as reported in
+	 *                           the name structure of the file system. May be
+	 *                           set to TSK_FS_NAME_TYPE_ENUM.UNDEF.
+	 * @param metaType           The type of the file, usually as reported in
+	 *                           the metadata structure of the file system. May
+	 *                           be set to
+	 *                           TSK_FS_META_TYPE_ENUM.TSK_FS_META_TYPE_UNDEF.
+	 * @param dirFlag            The allocated status of the file, usually as
+	 *                           reported in the name structure of the file
+	 *                           system.
+	 * @param metaFlags          The allocated status of the file, usually as
+	 *                           reported in the metadata structure of the file
+	 *                           system.
+	 * @param size               The size of the file.
+	 * @param ctime              The changed time of the file.
+	 * @param crtime             The created time of the file.
+	 * @param atime              The accessed time of the file.
+	 * @param mtime              The modified time of the file.
+	 * @param modes              The modes for the file.
+	 * @param uid                The UID for the file.
+	 * @param gid                The GID for the file.
+	 * @param md5Hash            The MD5 hash of the file, null if not yet
+	 *                           calculated.
+	 * @param sha256Hash         sha256 hash of the file, or null if not present
+	 * @param sha1Hash           SHA-1 hash of the file, or null if not present
+	 * @param knownState         The known state of the file from a hash
+	 *                           database lookup, null if not yet looked up.
+	 * @param parentPath         The path of the parent of the file.
+	 * @param mimeType           The MIME type of the file, null if it has not
+	 *                           yet been determined.
+	 * @param extension	         The extension part of the file name (not
+	 *                           including the '.'), can be null.
+	 * @param ownerUid			 UID of the file owner as found in the file
+	 *                           system, can be null.
+	 * @param osAccountObjId     Obj id of the owner OS account, may be null.
+	 * @deprecated Do not make subclasses outside of this package.
+	 */
+	@Deprecated
+	@SuppressWarnings("deprecation")
+	File(SleuthkitCase db,
+			long objId,
+			long dataSourceObjectId,
+			long fsObjId,
+			TSK_FS_ATTR_TYPE_ENUM attrType, int attrId,
+			String name,
+			long metaAddr, int metaSeq,
+			TSK_FS_NAME_TYPE_ENUM dirType, TSK_FS_META_TYPE_ENUM metaType,
+			TSK_FS_NAME_FLAG_ENUM dirFlag, short metaFlags,
+			long size,
+			long ctime, long crtime, long atime, long mtime,
+			short modes, int uid, int gid,
+			String md5Hash, String sha256Hash, String sha1Hash, 
+			FileKnown knownState, String parentPath, String mimeType,
+			String extension,
+			String ownerUid,
+			Long osAccountObjId,
+			List<Attribute> fileAttributes) {
+		this(db, objId, dataSourceObjectId, fsObjId, attrType, attrId, name,
+				metaAddr, metaSeq, dirType, metaType, dirFlag, metaFlags, size, ctime, crtime, atime, mtime, 
+				modes, uid, gid, md5Hash, sha256Hash, sha1Hash, knownState, parentPath, mimeType, extension, 
+				ownerUid, osAccountObjId, null, fileAttributes);
 	}
 }
diff --git a/bindings/java/src/org/sleuthkit/datamodel/FileManager.java b/bindings/java/src/org/sleuthkit/datamodel/FileManager.java
index 98f04c85586d6955d94628338d2e2e289d7f161b..624e55a88ff08665877971ad322780b76a4ebbae 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/FileManager.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/FileManager.java
@@ -83,16 +83,16 @@ public List<AbstractFile> findFilesExactName(long parentId, String name) throws
     }
 	
 	/**
-     * Find all files with the exact given name and exact parent path.
-     * 
+	 * Find all files with the exact given name and exact parent path.
+	 * 
 	 * @param dataSource The data source to search within.
-     * @param name Exact file name to match.
+	 * @param name Exact file name to match.
 	 * @param path Exact parent path.
-     * 
-     * @return A list of matching files.
-     * 
-     * @throws TskCoreException 
-     */
+	 * 
+	 * @return A list of matching files.
+	 * 
+	 * @throws TskCoreException 
+	 */
 	public List<AbstractFile> findFilesExactNameExactPath(Content dataSource, String name, String path) throws TskCoreException {
 		
 		// Database paths will always start and end with a forward slash, so add those if not present
diff --git a/bindings/java/src/org/sleuthkit/datamodel/FileSystem.java b/bindings/java/src/org/sleuthkit/datamodel/FileSystem.java
index d20f8b7df1b3dc5c8a62bef7677201a5badfca50..45a21b58e30e55f5c554c47d6489f17eee56401a 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/FileSystem.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/FileSystem.java
@@ -195,6 +195,7 @@ public long getLastInum() {
 		return lastInum;
 	}
 
+	@SuppressWarnings("deprecation")
 	@Override
 	public void finalize() throws Throwable {
 		try {
diff --git a/bindings/java/src/org/sleuthkit/datamodel/FsContent.java b/bindings/java/src/org/sleuthkit/datamodel/FsContent.java
index 1e077d83c48f85232730f3e8dca5167d7fa447b2..48607515449a400102fd358c74f1e34dd51854de 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/FsContent.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/FsContent.java
@@ -1,7 +1,7 @@
 /*
  * SleuthKit Java Bindings
  *
- * Copyright 2011-2017 Basis Technology Corp.
+ * Copyright 2011-2022 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -39,16 +39,7 @@
 public abstract class FsContent extends AbstractFile {
 
 	private static final Logger logger = Logger.getLogger(FsContent.class.getName());
-	private volatile String uniquePath;
 	private List<String> metaDataText = null;
-	private volatile FileSystem parentFileSystem;
-
-	/**
-	 * @deprecated Use getFileSystemId instead.
-	 */
-	// TODO: Make private.
-	@Deprecated
-	protected final long fsObjId;
 
 	/**
 	 *
@@ -99,6 +90,8 @@ public abstract class FsContent extends AbstractFile {
 	 * @param gid                The GID for the file.
 	 * @param md5Hash            The MD5 hash of the file, null if not yet
 	 *                           calculated.
+	 * @param sha256Hash         sha256 hash of the file, or null if not present
+	 * @param sha1Hash           SHA-1 hash of the file, or null if not present
 	 * @param knownState         The known state of the file from a hash
 	 *                           database lookup, null if not yet looked up.
 	 * @param parentPath         The path of the parent of the file.
@@ -109,8 +102,8 @@ public abstract class FsContent extends AbstractFile {
 	 * @param ownerUid			 UID of the file owner as found in the file
 	 *                           system, can be null.
 	 * @param osAccountObjId	 Obj id of the owner OS account, may be null.
+	 * @param collected          Collected status of the file data
 	 */
-	@SuppressWarnings("deprecation")
 	FsContent(SleuthkitCase db,
 			long objId,
 			long dataSourceObjectId,
@@ -124,15 +117,16 @@ public abstract class FsContent extends AbstractFile {
 			long size,
 			long ctime, long crtime, long atime, long mtime,
 			short modes, int uid, int gid,
-			String md5Hash, String sha256Hash, FileKnown knownState,
+			String md5Hash, String sha256Hash, String sha1Hash,
+			FileKnown knownState,
 			String parentPath,
 			String mimeType,
 			String extension,
 			String ownerUid,
 			Long osAccountObjId,
+			TskData.CollectedStatus collected,
 			List<Attribute> fileAttributes) {
-		super(db, objId, dataSourceObjectId, attrType, attrId, name, fileType, metaAddr, metaSeq, dirType, metaType, dirFlag, metaFlags, size, ctime, crtime, atime, mtime, modes, uid, gid, md5Hash, sha256Hash, knownState, parentPath, mimeType, extension, ownerUid, osAccountObjId, fileAttributes);
-		this.fsObjId = fsObjId;
+		super(db, objId, dataSourceObjectId, Long.valueOf(fsObjId), attrType, attrId, name, fileType, metaAddr, metaSeq, dirType, metaType, dirFlag, metaFlags, size, ctime, crtime, atime, mtime, modes, uid, gid, md5Hash, sha256Hash, sha1Hash, knownState, parentPath, mimeType, extension, ownerUid, osAccountObjId, collected, fileAttributes);
 	}
 
 	/**
@@ -140,37 +134,8 @@ public abstract class FsContent extends AbstractFile {
 	 *
 	 * @return the parent file system id
 	 */
-	@SuppressWarnings("deprecation")
 	public long getFileSystemId() {
-		return fsObjId;
-	}
-
-	/**
-	 * Sets the parent file system of this file or directory.
-	 *
-	 * @param parent The parent file system object.
-	 */
-	void setFileSystem(FileSystem parent) {
-		parentFileSystem = parent;
-	}
-
-	/**
-	 * Gets the parent file system of this file or directory.
-	 *
-	 * @return the file system object of the parent
-	 *
-	 * @throws org.sleuthkit.datamodel.TskCoreException
-	 */
-	@SuppressWarnings("deprecation")
-	public FileSystem getFileSystem() throws TskCoreException {
-		if (parentFileSystem == null) {
-			synchronized (this) {
-				if (parentFileSystem == null) {
-					parentFileSystem = getSleuthkitCase().getFileSystemById(fsObjId, AbstractContent.UNKNOWN_ID);
-				}
-			}
-		}
-		return parentFileSystem;
+		return getFileSystemObjectId().orElse(0L);
 	}
 
 	/**
@@ -257,28 +222,6 @@ public Content getDataSource() throws TskCoreException {
 		return getFileSystem().getDataSource();
 	}
 
-	/**
-	 * Get the full path to this file or directory, starting with a "/" and the
-	 * image name and then all the other segments in the path.
-	 *
-	 * @return A unique path for this object.
-	 *
-	 * @throws TskCoreException if there is an error querying the case database.
-	 */
-	@Override
-	public String getUniquePath() throws TskCoreException {
-		// It is possible that multiple threads could be doing this calculation
-		// simultaneously, but it's worth the potential extra processing to prevent deadlocks.
-		if (uniquePath == null) {
-			StringBuilder sb = new StringBuilder();
-			sb.append(getFileSystem().getUniquePath());
-			sb.append(getParentPath());
-			sb.append(getName());
-			uniquePath = sb.toString();
-		}
-		return uniquePath;
-	}
-
 	/**
 	 * Gets a text-based description of the file's metadata. This is the same
 	 * content as the TSK istat tool produces and is different information for
@@ -337,13 +280,19 @@ public void finalize() throws Throwable {
 	 *                      representation of this object.
 	 */
 	@Override
-	@SuppressWarnings("deprecation")
 	public String toString(boolean preserveState) {
+		String path = "";
+		try {
+			path = getUniquePath();
+		} catch (TskCoreException ex) {
+			logger.log(Level.SEVERE, "Error loading unique path for object ID: {0}", this.getId());
+		}
+		
 		return super.toString(preserveState)
 				+ "FsContent [\t" //NON-NLS
-				+ "fsObjId " + fsObjId //NON-NLS
-				+ "\t" + "uniquePath " + uniquePath //NON-NLS
-				+ "\t" + "fileHandle " + fileHandle //NON-NLS
+				+ "fsObjId " + getFileSystemId() //NON-NLS
+				+ "\t" + "uniquePath " + path //NON-NLS
+				+ "\t" + "fileHandle " + getFileHandle() //NON-NLS
 				+ "]\t";
 	}
 
@@ -392,7 +341,7 @@ public String toString(boolean preserveState) {
 			String name, long metaAddr, int metaSeq, TSK_FS_NAME_TYPE_ENUM dirType, TSK_FS_META_TYPE_ENUM metaType,
 			TSK_FS_NAME_FLAG_ENUM dirFlag, short metaFlags, long size, long ctime, long crtime, long atime, long mtime,
 			short modes, int uid, int gid, String md5Hash, FileKnown knownState, String parentPath) {
-		this(db, objId, db.getDataSourceObjectId(objId), fsObjId, attrType, (int) attrId, name, TSK_DB_FILES_TYPE_ENUM.FS, metaAddr, metaSeq, dirType, metaType, dirFlag, metaFlags, size, ctime, crtime, atime, mtime, modes, uid, gid, md5Hash, null, knownState, parentPath, null, null, OsAccount.NO_OWNER_ID, OsAccount.NO_ACCOUNT, Collections.emptyList() );
+		this(db, objId, db.getDataSourceObjectId(objId), fsObjId, attrType, (int) attrId, name, TSK_DB_FILES_TYPE_ENUM.FS, metaAddr, metaSeq, dirType, metaType, dirFlag, metaFlags, size, ctime, crtime, atime, mtime, modes, uid, gid, md5Hash, null, null, knownState, parentPath, null, null, OsAccount.NO_OWNER_ID, OsAccount.NO_ACCOUNT, Collections.emptyList());
 	}
 
 	/**
@@ -451,6 +400,88 @@ public String toString(boolean preserveState) {
 			String name, long metaAddr, int metaSeq, TSK_FS_NAME_TYPE_ENUM dirType, TSK_FS_META_TYPE_ENUM metaType,
 			TSK_FS_NAME_FLAG_ENUM dirFlag, short metaFlags, long size, long ctime, long crtime, long atime, long mtime,
 			short modes, int uid, int gid, String md5Hash, FileKnown knownState, String parentPath, String mimeType) {
-		this(db, objId, dataSourceObjectId, fsObjId, attrType, (int) attrId, name, TSK_DB_FILES_TYPE_ENUM.FS, metaAddr, metaSeq, dirType, metaType, dirFlag, metaFlags, size, ctime, crtime, atime, mtime, modes, uid, gid, md5Hash, null, knownState, parentPath, mimeType, null, OsAccount.NO_OWNER_ID, OsAccount.NO_ACCOUNT, Collections.emptyList());
+		this(db, objId, dataSourceObjectId, fsObjId, attrType, (int) attrId, name, TSK_DB_FILES_TYPE_ENUM.FS, metaAddr, metaSeq, dirType, metaType, dirFlag, metaFlags, size, ctime, crtime, atime, mtime, modes, uid, gid, md5Hash, null, null, knownState, parentPath, mimeType, null, OsAccount.NO_OWNER_ID, OsAccount.NO_ACCOUNT, Collections.emptyList());
+	}
+	
+		/**
+	 * Constructs an abstract base class for representations of a file system
+	 * files or directories that have been added to a case.
+	 *
+	 * @param db                 The case database to which the file has been
+	 *                           added.
+	 * @param objId              The object id of the file in the case database.
+	 * @param dataSourceObjectId The object id of the data source for the file.
+	 * @param fsObjId            The object id of the file system to which this
+	 *                           file belongs.
+	 * @param attrType           The type attribute given to the file by the
+	 *                           file system.
+	 * @param attrId             The type id given to the file by the file
+	 *                           system.
+	 * @param name               The name of the file.
+	 * @param fileType           The type of file
+	 * @param metaAddr           The meta address of the file.
+	 * @param metaSeq            The meta sequence number of the file.
+	 * @param dirType            The type of the file, usually as reported in
+	 *                           the name structure of the file system. May be
+	 *                           set to TSK_FS_NAME_TYPE_ENUM.UNDEF.
+	 * @param metaType           The type of the file, usually as reported in
+	 *                           the metadata structure of the file system. May
+	 *                           be set to
+	 *                           TSK_FS_META_TYPE_ENUM.TSK_FS_META_TYPE_UNDEF.
+	 * @param dirFlag            The allocated status of the file, usually as
+	 *                           reported in the name structure of the file
+	 *                           system.
+	 * @param metaFlags          The allocated status of the file, usually as
+	 *                           reported in the metadata structure of the file
+	 *                           system.
+	 * @param size               The size of the file.
+	 * @param ctime              The changed time of the file.
+	 * @param crtime             The created time of the file.
+	 * @param atime              The accessed time of the file.
+	 * @param mtime              The modified time of the file.
+	 * @param modes              The modes for the file.
+	 * @param uid                The UID for the file.
+	 * @param gid                The GID for the file.
+	 * @param md5Hash            The MD5 hash of the file, null if not yet
+	 *                           calculated.
+	 * @param sha256Hash         sha256 hash of the file, or null if not present
+	 * @param sha1Hash           SHA-1 hash of the file, or null if not present
+	 * @param knownState         The known state of the file from a hash
+	 *                           database lookup, null if not yet looked up.
+	 * @param parentPath         The path of the parent of the file.
+	 * @param mimeType           The MIME type of the file, null if it has not
+	 *                           yet been determined.
+	 * @param extension          The extension part of the file name (not
+	 *                           including the '.'), can be null.
+	 * @param ownerUid			 UID of the file owner as found in the file
+	 *                           system, can be null.
+	 * @param osAccountObjId	 Obj id of the owner OS account, may be null.
+
+	 * @deprecated Do not make subclasses outside of this package.
+	 */
+	@Deprecated
+	@SuppressWarnings("deprecation")
+	FsContent(SleuthkitCase db,
+			long objId,
+			long dataSourceObjectId,
+			long fsObjId,
+			TSK_FS_ATTR_TYPE_ENUM attrType, int attrId,
+			String name,
+			TSK_DB_FILES_TYPE_ENUM fileType,
+			long metaAddr, int metaSeq,
+			TSK_FS_NAME_TYPE_ENUM dirType, TSK_FS_META_TYPE_ENUM metaType,
+			TSK_FS_NAME_FLAG_ENUM dirFlag, short metaFlags,
+			long size,
+			long ctime, long crtime, long atime, long mtime,
+			short modes, int uid, int gid,
+			String md5Hash, String sha256Hash, String sha1Hash,
+			FileKnown knownState,
+			String parentPath,
+			String mimeType,
+			String extension,
+			String ownerUid,
+			Long osAccountObjId,
+			List<Attribute> fileAttributes) {
+		this(db, objId, dataSourceObjectId, fsObjId, attrType, attrId, name, TSK_DB_FILES_TYPE_ENUM.FS, metaAddr, metaSeq, dirType, metaType, dirFlag, metaFlags, size, ctime, crtime, atime, mtime, modes, uid, gid, md5Hash, null, null, knownState, parentPath, mimeType, null, OsAccount.NO_OWNER_ID, OsAccount.NO_ACCOUNT, TskData.CollectedStatus.UNKNOWN, Collections.emptyList());
 	}
 }
diff --git a/bindings/java/src/org/sleuthkit/datamodel/HostAddressManager.java b/bindings/java/src/org/sleuthkit/datamodel/HostAddressManager.java
index 06381ff85c8c512e201639a4dafc001880aefa22..da259858280e0fd8b3285d88673e2ba60bcf8af7 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/HostAddressManager.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/HostAddressManager.java
@@ -42,6 +42,7 @@ public class HostAddressManager {
 	private static final Logger LOGGER = Logger.getLogger(HostAddressManager.class.getName());
 
 	private final SleuthkitCase db;
+	private final static byte DEFAULT_MAPPING_CACHE_VALUE = 1;
 
 	/**
 	 * An HostAddress Object Id entry is maintained in this cache when a
@@ -117,19 +118,20 @@ private Optional<HostAddress> getHostAddress(HostAddress.HostAddressType type, S
 		if (type.equals(HostAddress.HostAddressType.DNS_AUTO)) {
 			addressType = getDNSType(address);
 		}
+		String normalizedAddress = getNormalizedAddress(address);
 		String queryString = "SELECT * FROM tsk_host_addresses"
 				+ " WHERE address = ?  AND address_type = ?";			
 		try {
 			PreparedStatement query = connection.getPreparedStatement(queryString, Statement.NO_GENERATED_KEYS);
 			query.clearParameters();
-			query.setString(1, address.toLowerCase());
+			query.setString(1, normalizedAddress.toLowerCase());
 			query.setInt(2, addressType.getId());
 			try (ResultSet rs = query.executeQuery()) {
 				if (!rs.next()) {
 					return Optional.empty();	// no match found
 				} else {
-					HostAddress newHostAddress = new HostAddress(db, rs.getLong("id"), HostAddressType.fromID(rs.getInt("address_type")), address);
-					recentHostAddressCache.put(createRecentHostAddressKey(newHostAddress.getAddressType(), address), newHostAddress);
+					HostAddress newHostAddress = new HostAddress(db, rs.getLong("id"), HostAddressType.fromID(rs.getInt("address_type")), rs.getString("address"));
+					recentHostAddressCache.put(createRecentHostAddressKey(newHostAddress.getAddressType(), normalizedAddress), newHostAddress);
 					return Optional.of(newHostAddress);					
 				}
 			}
@@ -209,6 +211,7 @@ private HostAddress newHostAddress(HostAddress.HostAddressType type, String addr
 			addressType = getDNSType(address);
 		}
 		
+		String normalizedAddress = getNormalizedAddress(address);
 		try {
 
 			// TODO: need to get the correct parent obj id.  
@@ -223,11 +226,11 @@ private HostAddress newHostAddress(HostAddress.HostAddressType type, String addr
 			preparedStatement.clearParameters();
 			preparedStatement.setLong(1, objId);
 			preparedStatement.setInt(2, addressType.getId());
-			preparedStatement.setString(3, address.toLowerCase());
+			preparedStatement.setString(3, normalizedAddress.toLowerCase());
 
 			connection.executeUpdate(preparedStatement);
-			HostAddress hostAddress =  new HostAddress(db, objId, addressType, address);
-			recentHostAddressCache.put(createRecentHostAddressKey(addressType, address), hostAddress);
+			HostAddress hostAddress =  new HostAddress(db, objId, addressType, normalizedAddress);
+			recentHostAddressCache.put(createRecentHostAddressKey(addressType, normalizedAddress), hostAddress);
 			return hostAddress;
 		} catch (SQLException ex) {
 			throw new TskCoreException(String.format("Error adding host address of type = %s, with address = %s", type.getName(), address), ex);
@@ -439,8 +442,8 @@ private void addHostNameAndIpMapping(HostAddress dnsNameAddress, HostAddress ipA
 			preparedStatement.setNull(4, java.sql.Types.BIGINT);
 		}
 		connection.executeUpdate(preparedStatement);
-		recentHostNameAndIpMappingCache.put(ipAddress.getId(), new Byte((byte) 1));
-		recentHostNameAndIpMappingCache.put(dnsNameAddress.getId(), new Byte((byte) 1));
+		recentHostNameAndIpMappingCache.put(ipAddress.getId(), DEFAULT_MAPPING_CACHE_VALUE);
+		recentHostNameAndIpMappingCache.put(dnsNameAddress.getId(), DEFAULT_MAPPING_CACHE_VALUE);
 	}
 
 	/**
@@ -477,7 +480,7 @@ public boolean hostNameAndIpMappingExists(long addressObjectId) throws TskCoreEx
 				} else {
 					boolean status = rs.getLong("mappingCount") > 0;
 					if (status) {
-						recentHostNameAndIpMappingCache.put(addressObjectId, new Byte((byte) 1));
+						recentHostNameAndIpMappingCache.put(addressObjectId, DEFAULT_MAPPING_CACHE_VALUE);
 					}
 					return status;
 				}
@@ -513,6 +516,7 @@ public Optional<Long> hostAddressExists(HostAddress.HostAddressType type, String
 		if (type.equals(HostAddress.HostAddressType.DNS_AUTO)) {
 			addressType = getDNSType(address);
 		} 
+		String normalizedAddress = getNormalizedAddress(address);
 		
 		String queryString = "SELECT id, address_type, address FROM tsk_host_addresses"
 				+ " WHERE address = ?  AND address_type = ?"; 
@@ -521,7 +525,7 @@ public Optional<Long> hostAddressExists(HostAddress.HostAddressType type, String
 		try (CaseDbConnection connection = this.db.getConnection();
 				PreparedStatement query = connection.getPreparedStatement(queryString, Statement.NO_GENERATED_KEYS);) {
 			query.clearParameters();
-			query.setString(1, address.toLowerCase());
+			query.setString(1, normalizedAddress.toLowerCase());
 			query.setInt(2, addressType.getId());
 			try (ResultSet rs = query.executeQuery()) {
 				if (!rs.next()) {
@@ -531,7 +535,7 @@ public Optional<Long> hostAddressExists(HostAddress.HostAddressType type, String
 					int addrType = rs.getInt("address_type");
 					String addr = rs.getString("address");
 					HostAddress hostAddr = new HostAddress(db, objId, HostAddress.HostAddressType.fromID(addrType), addr);
-					recentHostAddressCache.put(createRecentHostAddressKey(addrType, address), hostAddr);					
+					recentHostAddressCache.put(createRecentHostAddressKey(addrType, normalizedAddress), hostAddr);					
 					return Optional.of(objId);
 				}
 			}
@@ -568,7 +572,7 @@ public List<HostAddress> getIpAddress(String hostname) throws TskCoreException {
 				while (rs.next()) {
 					long ipAddressObjId = rs.getLong("ip_address_id");
 					IpAddresses.add(HostAddressManager.this.getHostAddress(ipAddressObjId, connection));
-					recentHostNameAndIpMappingCache.put(ipAddressObjId, new Byte((byte) 1));
+					recentHostNameAndIpMappingCache.put(ipAddressObjId, DEFAULT_MAPPING_CACHE_VALUE);
 				}
 				return IpAddresses;
 			}
@@ -606,7 +610,7 @@ List<HostAddress> getHostNameByIp(String ipAddress) throws TskCoreException {
 				while (rs.next()) {
 					long dnsAddressId = rs.getLong("dns_address_id");
 					dnsNames.add(HostAddressManager.this.getHostAddress(dnsAddressId, connection));
-					recentHostNameAndIpMappingCache.put(dnsAddressId, new Byte((byte) 1));
+					recentHostNameAndIpMappingCache.put(dnsAddressId, DEFAULT_MAPPING_CACHE_VALUE);
 				}
 				return dnsNames;
 			}
@@ -746,12 +750,17 @@ private static boolean isIPv4(String ipAddress) {
 	}
 
 	
+	// IPV6 address examples:
+	//		Standard: 684D:1111:222:3333:4444:5555:6:77
+	//		Compressed: 1234:fd2:5621:1:89::4500
+	//		With zone/interface specifier: fe80::1ff:fe23:4567:890a%eth2 
+	//									   fe80::1ff:fe23:4567:890a%3
 	private static final Pattern IPV6_STD_PATTERN = 
-            Pattern.compile("^(?:[0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}$");
+            Pattern.compile("^(?:[0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}(%.+)?$");
     private static final Pattern IPV6_HEX_COMPRESSED_PATTERN = 
-            Pattern.compile("^((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?)::((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?)$");
+            Pattern.compile("^((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?)::((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?)(%.+)?$");
 
-   
+	
     private static boolean isIPv6StdAddress(final String input) {
         return IPV6_STD_PATTERN.matcher(input).matches();
     }
@@ -774,4 +783,47 @@ private static boolean isIPv6(String ipAddress) {
 
 		return false;
 	}
+	
+	/**
+	 * Normalizes an address.
+	 * 
+	 * It intentionally does NOT convert to lowercase so that the case may be
+	 * preserved, and only converted where needed.
+	 *
+	 * @param address
+	 *
+	 * @return Normalized address.
+	 */
+	private static String getNormalizedAddress(String address) {
+		
+		String normalizedAddress = address;
+		
+		if (isIPv6(address)) {
+			normalizedAddress = getNormalizedIPV6Address(address);
+		}
+		
+		return normalizedAddress;
+	}
+	
+	/**
+	 * Normalize an IPv6 address:
+	 *  - removing the zone/interface specifier if one exists.
+	 *
+	 * It intentionally does NOT convert to lowercase so that the case may be
+	 * preserved, and only converted where needed.
+	 *
+	 * @param address Address to normalize. 
+	 *
+	 * @return Normalized IPv6 address.
+	 */
+	private static String getNormalizedIPV6Address(String address) {
+		
+		String normalizedAddress = address;
+		if ( normalizedAddress.contains("%") ) {
+			normalizedAddress = normalizedAddress.substring(0, normalizedAddress.indexOf("%"));
+		}
+		
+		return normalizedAddress;
+	}
 }
+
diff --git a/bindings/java/src/org/sleuthkit/datamodel/HostManager.java b/bindings/java/src/org/sleuthkit/datamodel/HostManager.java
index d8255b2e7823a93e715bb3264b3f2794ac95f6ab..fb8d2eda00053413b525f4a5df7dd11c087f6b37 100755
--- a/bindings/java/src/org/sleuthkit/datamodel/HostManager.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/HostManager.java
@@ -295,7 +295,7 @@ public List<DataSource> getDataSourcesForHost(Host host) throws TskCoreException
 
 			return dataSources;
 		} catch (SQLException | TskDataException ex) {
-			throw new TskCoreException(String.format("Error getting data sources for host " + host.getName()), ex);
+			throw new TskCoreException(String.format("Error getting data sources for host %s", host.getName()), ex);
 		} finally {
 			db.releaseSingleUserCaseReadLock();
 		}
diff --git a/bindings/java/src/org/sleuthkit/datamodel/Image.java b/bindings/java/src/org/sleuthkit/datamodel/Image.java
index 7e2d793a29e1749b0d4b4aa192334d73fa4def6b..f51eac30a399d09f063ceae1688eff564f1cb82d 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/Image.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/Image.java
@@ -145,6 +145,7 @@ public void close() {
 		//frees nothing, as we are caching image handles
 	}
 
+	@SuppressWarnings("deprecation")
 	@Override
 	public void finalize() throws Throwable {
 		try {
diff --git a/bindings/java/src/org/sleuthkit/datamodel/IngestModuleInfo.java b/bindings/java/src/org/sleuthkit/datamodel/IngestModuleInfo.java
index bec082446464559c1b1c4b89aababacb46e3f919..5f30c39efad41e4d2b4db18a4a0010604b68e9a8 100755
--- a/bindings/java/src/org/sleuthkit/datamodel/IngestModuleInfo.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/IngestModuleInfo.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit Data Model
  *
- * Copyright 2011-2016 Basis Technology Corp.
+ * Copyright 2014-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -21,7 +21,7 @@
 import java.util.ResourceBundle;
 
 /**
- * Class representing information about an ingest module, used in ingest job
+ * Represents information about an ingest module factory, used in ingest job
  * info to show which ingest modules were run.
  */
 public final class IngestModuleInfo {
@@ -32,12 +32,18 @@ public final class IngestModuleInfo {
 	 * Used to keep track of the module types
 	 */
 	public static enum IngestModuleType {
-		//DO NOT CHANGE ORDER
+		/*
+		 * IMPORTANT: DO NOT CHANGE ORDER, THE ORDINAL VALUES OF THE ENUM ARE
+		 * STORED IN THE CASE DATABASE
+		 */
 		DATA_SOURCE_LEVEL(bundle.getString("IngestModuleInfo.IngestModuleType.DataSourceLevel.displayName")),
-		FILE_LEVEL(bundle.getString("IngestModuleInfo.IngestModuleType.FileLevel.displayName"));
-		
-		private String displayName;
-		
+		FILE_LEVEL(bundle.getString("IngestModuleInfo.IngestModuleType.FileLevel.displayName")),
+		DATA_ARTIFACT(bundle.getString("IngestModuleInfo.IngestModuleType.DataArtifact.displayName")),
+		MULTIPLE("IngestModuleInfo.IngestModuleType.Multiple.displayName"),
+		ANALYSIS_RESULT(bundle.getString("IngestModuleInfo.IngestModuleType.AnalysisResult.displayName"));
+
+		private final String displayName;
+
 		private IngestModuleType(String displayName) {
 			this.displayName = displayName;
 		}
diff --git a/bindings/java/src/org/sleuthkit/datamodel/LayoutFile.java b/bindings/java/src/org/sleuthkit/datamodel/LayoutFile.java
index dea60c1329b396f7e1e817c3e84f387e6c1802ed..938f57124be052ef9873731edc5365dfc1c53c8e 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/LayoutFile.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/LayoutFile.java
@@ -1,7 +1,7 @@
 /*
  * SleuthKit Java Bindings
  *
- * Copyright 2011-2017 Basis Technology Corp.
+ * Copyright 2011-2022 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -55,6 +55,7 @@ public class LayoutFile extends AbstractFile {
 	 *                           added.
 	 * @param objId              The object id of the file in the case database.
 	 * @param dataSourceObjectId The object id of the data source for the file.
+	 * @param fileSystemObjectId The object id of the file system. May be null.
 	 * @param name               The name of the file.
 	 * @param fileType           The type of the file.
 	 * @param dirType            The type of the file, usually as reported in
@@ -77,6 +78,8 @@ public class LayoutFile extends AbstractFile {
 	 * @param mtime              The modified time of the file.
 	 * @param md5Hash            The MD5 hash of the file, null if not yet
 	 *                           calculated.
+	 * @param sha256Hash         sha256 hash of the file, or null if not present
+	 * @param sha1Hash           SHA-1 hash of the file, or null if not present
 	 * @param knownState         The known state of the file from a hash
 	 *                           database lookup, null if not yet looked up.
 	 * @param parentPath         The path of the parent of the file.
@@ -89,18 +92,20 @@ public class LayoutFile extends AbstractFile {
 	LayoutFile(SleuthkitCase db,
 			long objId,
 			long dataSourceObjectId,
+			Long fileSystemObjectId,
 			String name,
 			TSK_DB_FILES_TYPE_ENUM fileType,
 			TSK_FS_NAME_TYPE_ENUM dirType, TSK_FS_META_TYPE_ENUM metaType,
 			TSK_FS_NAME_FLAG_ENUM dirFlag, short metaFlags,
 			long size,
 			long ctime, long crtime, long atime, long mtime,
-			String md5Hash, String sha256Hash, FileKnown knownState,
+			String md5Hash, String sha256Hash, String sha1Hash,
+			FileKnown knownState,
 			String parentPath, String mimeType,
 			String ownerUid,
 			Long osAccountObjId) {
 			
-		super(db, objId, dataSourceObjectId, TSK_FS_ATTR_TYPE_ENUM.TSK_FS_ATTR_TYPE_DEFAULT, 0, name, fileType, 0L, 0, dirType, metaType, dirFlag, metaFlags, size, ctime, crtime, atime, mtime, (short) 0, 0, 0, md5Hash, sha256Hash, knownState, parentPath, mimeType, SleuthkitCase.extractExtension(name), ownerUid, osAccountObjId, Collections.emptyList());
+		super(db, objId, dataSourceObjectId, fileSystemObjectId, TSK_FS_ATTR_TYPE_ENUM.TSK_FS_ATTR_TYPE_DEFAULT, 0, name, fileType, 0L, 0, dirType, metaType, dirFlag, metaFlags, size, ctime, crtime, atime, mtime, (short) 0, 0, 0, md5Hash, sha256Hash, sha1Hash, knownState, parentPath, mimeType, SleuthkitCase.extractExtension(name), ownerUid, osAccountObjId, TskData.CollectedStatus.UNKNOWN, Collections.emptyList());
 	}
 
 	/**
@@ -251,42 +256,4 @@ public <T> T accept(SleuthkitItemVisitor<T> visitor) {
 	public String toString(boolean preserveState) {
 		return super.toString(preserveState) + "LayoutFile [\t" + "]\t"; //NON-NLS
 	}
-
-	/**
-	 * Constructs a representation of a layout file that has been added to a
-	 * case. Layout files are not file system files, but "virtual" files created
-	 * from blocks of data (e.g. unallocated) that are treated as files for
-	 * convenience and uniformity.
-	 *
-	 * @param db         The case database to which the file has been added.
-	 * @param objId      The object id of the file in the case database.
-	 * @param name       The name of the file.
-	 * @param fileType   The type of the file.
-	 * @param dirType    The type of the file, usually as reported in the name
-	 *                   structure of the file system. May be set to
-	 *                   TSK_FS_NAME_TYPE_ENUM.UNDEF.
-	 * @param metaType   The type of the file, usually as reported in the
-	 *                   metadata structure of the file system. May be set to
-	 *                   TSK_FS_META_TYPE_ENUM.TSK_FS_META_TYPE_UNDEF.
-	 * @param dirFlag    The allocated status of the file, usually as reported
-	 *                   in the name structure of the file system.
-	 * @param metaFlags  The allocated status of the file, usually as reported
-	 *                   in the metadata structure of the file system.
-	 * @param size       The size of the file.
-	 * @param md5Hash    The MD5 hash of the file, null if not yet calculated.
-	 * @param knownState The known state of the file from a hash database
-	 *                   lookup, null if not yet looked up.
-	 * @param parentPath The path of the parent of the file.
-	 *
-	 * @deprecated Do not make subclasses outside of this package.
-	 */
-	@Deprecated
-	@SuppressWarnings("deprecation")
-	protected LayoutFile(SleuthkitCase db, long objId, String name,
-			TSK_DB_FILES_TYPE_ENUM fileType,
-			TSK_FS_NAME_TYPE_ENUM dirType, TSK_FS_META_TYPE_ENUM metaType,
-			TSK_FS_NAME_FLAG_ENUM dirFlag, short metaFlags,
-			long size, String md5Hash, FileKnown knownState, String parentPath) {
-		this(db, objId, db.getDataSourceObjectId(objId), name, fileType, dirType, metaType, dirFlag, metaFlags, size, 0L, 0L, 0L, 0L, md5Hash, null, knownState, parentPath, null, OsAccount.NO_OWNER_ID, OsAccount.NO_ACCOUNT);
-	}
 }
diff --git a/bindings/java/src/org/sleuthkit/datamodel/LocalDirectory.java b/bindings/java/src/org/sleuthkit/datamodel/LocalDirectory.java
index 8eafbefdbf6f4ea45cd4677d9107701fbf6ef4a3..b3eed95ed10f0c91f772f87ce4016afe5cf61491 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/LocalDirectory.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/LocalDirectory.java
@@ -1,7 +1,7 @@
 /*
  * SleuthKit Java Bindings
  * 
- * Copyright 2011-2017 Basis Technology Corp.
+ * Copyright 2011-2022 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  * 
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -53,6 +53,8 @@ public class LocalDirectory extends SpecialDirectory {
 	 * @param size               The size of the local directory, should be
 	 *                           zero.
 	 * @param md5Hash            The MD5 hash for the local directory.
+	 * @param sha256Hash         sha256 hash of the file, or null if not present
+	 * @param sha1Hash           SHA-1 hash of the file, or null if not present
 	 * @param knownState         The known state for the local directory
 	 * @param parentPath         The parent path for the local directory
 	 */
@@ -62,11 +64,12 @@ public class LocalDirectory extends SpecialDirectory {
 			String name,
 			TSK_FS_NAME_TYPE_ENUM dirType, TSK_FS_META_TYPE_ENUM metaType,
 			TSK_FS_NAME_FLAG_ENUM dirFlag, short metaFlags,
-			String md5Hash, String sha256Hash, FileKnown knownState,
+			String md5Hash, String sha256Hash, String sha1Hash, 
+			FileKnown knownState,
 			String parentPath) {
-		super(db, objId, dataSourceObjectId, TSK_FS_ATTR_TYPE_ENUM.TSK_FS_ATTR_TYPE_DEFAULT, 0, name,
+		super(db, objId, dataSourceObjectId, null, TSK_FS_ATTR_TYPE_ENUM.TSK_FS_ATTR_TYPE_DEFAULT, 0, name,
 				TskData.TSK_DB_FILES_TYPE_ENUM.LOCAL_DIR, 0L, 0, dirType, metaType, dirFlag,
-				metaFlags, 0L, 0L, 0L, 0L, 0L, (short) 0, 0, 0, md5Hash, sha256Hash, knownState, parentPath, null);
+				metaFlags, 0L, 0L, 0L, 0L, 0L, (short) 0, 0, 0, md5Hash, sha256Hash, sha1Hash, knownState, parentPath, null);
 	}
 	
 	/**
@@ -74,6 +77,7 @@ public class LocalDirectory extends SpecialDirectory {
 	 * Will always be false.
 	 * @return false
 	 */
+	@Override
 	public boolean isDataSource() {
 		return false;
 	}
diff --git a/bindings/java/src/org/sleuthkit/datamodel/LocalFile.java b/bindings/java/src/org/sleuthkit/datamodel/LocalFile.java
index 1b749b080ce91cc5624194e6920abdc4f5f2362e..8a7948ce64ed12f7a9a9741a7ee5d2f5cfbd682b 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/LocalFile.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/LocalFile.java
@@ -1,7 +1,7 @@
 /*
  * SleuthKit Java Bindings
  *
- * Copyright 2011-2017 Basis Technology Corp.
+ * Copyright 2011-2022 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -64,6 +64,8 @@ public class LocalFile extends AbstractFile {
 	 *                           yet been determined.
 	 * @param md5Hash            The MD5 hash of the file, null if not yet
 	 *                           calculated.
+	 * @param sha256Hash         sha256 hash of the file, or null if not present
+	 * @param sha1Hash           SHA-1 hash of the file, or null if not present
 	 * @param knownState         The known state of the file from a hash
 	 *                           database lookup, null if not yet looked up.
 	 * @param parentId           The object id of parent of the file.
@@ -86,7 +88,8 @@ public class LocalFile extends AbstractFile {
 			TSK_FS_NAME_FLAG_ENUM dirFlag, short metaFlags,
 			long size,
 			long ctime, long crtime, long atime, long mtime,
-			String mimeType, String md5Hash, String sha256Hash, FileKnown knownState,
+			String mimeType, String md5Hash, String sha256Hash, String sha1Hash, 
+			FileKnown knownState,
 			long parentId, String parentPath,
 			long dataSourceObjectId,
 			String localPath,
@@ -94,9 +97,9 @@ public class LocalFile extends AbstractFile {
 			String extension,
 			String ownerUid,
 			Long osAccountObjId) {
-		super(db, objId, dataSourceObjectId, TSK_FS_ATTR_TYPE_ENUM.TSK_FS_ATTR_TYPE_DEFAULT, 0,
+		super(db, objId, dataSourceObjectId, null, TSK_FS_ATTR_TYPE_ENUM.TSK_FS_ATTR_TYPE_DEFAULT, 0,
 				name, fileType, 0L, 0, dirType, metaType, dirFlag,
-				metaFlags, size, ctime, crtime, atime, mtime, (short) 0, 0, 0, md5Hash, sha256Hash, knownState, parentPath, mimeType, extension, ownerUid, osAccountObjId, Collections.emptyList());
+				metaFlags, size, ctime, crtime, atime, mtime, (short) 0, 0, 0, md5Hash, sha256Hash, sha1Hash, knownState, parentPath, mimeType, extension, ownerUid, osAccountObjId, TskData.CollectedStatus.UNKNOWN, Collections.emptyList());
 		// TODO (AUT-1904): The parent id should be passed to AbstractContent 
 		// through the class hierarchy contructors, using 
 		// AbstractContent.UNKNOWN_ID as needed.
@@ -223,7 +226,7 @@ protected LocalFile(SleuthkitCase db,
 				dirFlag, metaFlags,
 				size,
 				ctime, crtime, atime, mtime,
-				null, md5Hash, null, knownState,
+				null, md5Hash, null, null, knownState,
 				AbstractContent.UNKNOWN_ID, parentPath,
 				db.getDataSourceObjectId(objId),
 				localPath,
diff --git a/bindings/java/src/org/sleuthkit/datamodel/LocalFilesDataSource.java b/bindings/java/src/org/sleuthkit/datamodel/LocalFilesDataSource.java
index ae5652c88eb550555c887a69db33822a162f9b3a..3107c4b7108a8220bcaabf59451fd3f6cf8a3839 100755
--- a/bindings/java/src/org/sleuthkit/datamodel/LocalFilesDataSource.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/LocalFilesDataSource.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit Data Model
  *
- * Copyright 2011-2021 Basis Technology Corp.
+ * Copyright 2011-2022 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -63,13 +63,14 @@ public class LocalFilesDataSource extends VirtualDirectory implements DataSource
 	 * @param timezone           The timezone for the data source.
 	 * @param md5Hash            The MD5 hash for the virtual directory.
 	 * @param sha256Hash         The SHA-256 hash for the virtual directory.
+	 * @param sha1Hash           SHA-1 hash of the file, or null if not present
 	 * @param knownState         The known state for the virtual directory
 	 * @param parentPath         The parent path for the virtual directory,
 	 *                           should be "/" if the virtual directory is a
 	 *                           data source.
 	 */
-	public LocalFilesDataSource(SleuthkitCase db, long objId, long dataSourceObjectId, String deviceId, String name, TskData.TSK_FS_NAME_TYPE_ENUM dirType, TskData.TSK_FS_META_TYPE_ENUM metaType, TskData.TSK_FS_NAME_FLAG_ENUM dirFlag, short metaFlags, String timezone, String md5Hash, String sha256Hash, TskData.FileKnown knownState, String parentPath) {
-		super(db, objId, dataSourceObjectId, name, dirType, metaType, dirFlag, metaFlags, md5Hash, sha256Hash, knownState, parentPath);
+	public LocalFilesDataSource(SleuthkitCase db, long objId, long dataSourceObjectId, String deviceId, String name, TskData.TSK_FS_NAME_TYPE_ENUM dirType, TskData.TSK_FS_META_TYPE_ENUM metaType, TskData.TSK_FS_NAME_FLAG_ENUM dirFlag, short metaFlags, String timezone, String md5Hash, String sha256Hash, String sha1Hash, TskData.FileKnown knownState, String parentPath) {
+		super(db, objId, dataSourceObjectId, null, name, dirType, metaType, dirFlag, metaFlags, md5Hash, sha256Hash, sha1Hash, knownState, parentPath);
 		this.objectId = objId;
 		this.deviceId = deviceId;
 		this.timezone = timezone;
@@ -181,6 +182,11 @@ static long getContentSize(SleuthkitCase sleuthkitCase, long dataSourceObjId) th
 
 		return contentSize;
 	}
+	
+	@Override
+	public String getUniquePath() throws TskCoreException {
+		return "/" + getName();
+	}
 
 	/**
 	 * Sets the acquisition details field in the case database.
@@ -370,6 +376,6 @@ public <T> T accept(SleuthkitItemVisitor<T> visitor) {
 	 */
 	@Deprecated
 	public LocalFilesDataSource(SleuthkitCase db, long objId, long dataSourceObjectId, String deviceId, String name, TskData.TSK_FS_NAME_TYPE_ENUM dirType, TskData.TSK_FS_META_TYPE_ENUM metaType, TskData.TSK_FS_NAME_FLAG_ENUM dirFlag, short metaFlags, String timezone, String md5Hash, TskData.FileKnown knownState, String parentPath) {
-		this(db, objId, dataSourceObjectId, deviceId, name, dirType, metaType, dirFlag, metaFlags, timezone, md5Hash, null, knownState, parentPath);
+		this(db, objId, dataSourceObjectId, deviceId, name, dirType, metaType, dirFlag, metaFlags, timezone, md5Hash, null, null, knownState, parentPath);
 	}	
 }
diff --git a/bindings/java/src/org/sleuthkit/datamodel/OsAccount.java b/bindings/java/src/org/sleuthkit/datamodel/OsAccount.java
index d78434f4c3e7efcc660b4b4df9ca32d33b6c770a..3549228c49e287e24f8cbd2c3517a204eda73947 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/OsAccount.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/OsAccount.java
@@ -67,7 +67,9 @@ public enum OsAccountStatus {
 		UNKNOWN(0, bundle.getString("OsAccountStatus.Unknown.text")),
 		ACTIVE(1, bundle.getString("OsAccountStatus.Active.text")),
 		DISABLED(2, bundle.getString("OsAccountStatus.Disabled.text")),
-		DELETED(3, bundle.getString("OsAccountStatus.Deleted.text"));
+		@Deprecated /** Use NON_EXISTENT **/
+		DELETED(3, bundle.getString("OsAccountStatus.Deleted.text")),
+		NON_EXISTENT(4, bundle.getString("OsAccountStatus.NonExistent.text"));
 
 		private final int id;
 		private final String name;
diff --git a/bindings/java/src/org/sleuthkit/datamodel/OsAccountInstance.java b/bindings/java/src/org/sleuthkit/datamodel/OsAccountInstance.java
index 0a39baaf982e51fb8a541f54b77cd1ec6a52de3e..615e584f29cce44d7bdd94b41da6d3351ab92b03 100755
--- a/bindings/java/src/org/sleuthkit/datamodel/OsAccountInstance.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/OsAccountInstance.java
@@ -18,6 +18,7 @@
  */
 package org.sleuthkit.datamodel;
 
+import java.util.Arrays;
 import java.util.Objects;
 import java.util.ResourceBundle;
 
@@ -165,16 +166,26 @@ public boolean equals(Object obj) {
 			return false;
 		}
 		final OsAccountInstance other = (OsAccountInstance) obj;
+		
+		if(this.instanceId != other.instanceId) {
+			return false;
+		}
+		
 		if (this.accountId != other.accountId) {
 			return false;
 		}
-
-		return this.dataSourceId != other.dataSourceId;
+		
+		if(this.instanceType != other.instanceType) {
+			return false;
+		}
+		
+		return this.dataSourceId == other.getDataSourceId();
 	}
 
 	@Override
 	public int hashCode() {
 		int hash = 7;
+		hash = 67 * hash + Objects.hashCode(this.instanceId);
 		hash = 67 * hash + Objects.hashCode(this.dataSourceId);
 		hash = 67 * hash + Objects.hashCode(this.accountId);
 		hash = 67 * hash + Objects.hashCode(this.instanceType);
@@ -182,16 +193,16 @@ public int hashCode() {
 	}
 
 	/**
-	 * Describes the relationship between an os account instance and the host
-	 * where the instance was found.
-	 *
-	 * Whether an os account actually performed any action on the host or if
-	 * just a reference to it was found on the host (such as in a log file)
+	 * Describes what is known about what an OS Account did on an specific host. 
+     *
+	 * Note: lower ordinal value is more significant than higher ordinal value. 
+	 * Order of significance: LAUNCHED > ACCESSED > REFERENCED.
 	 */
 	public enum OsAccountInstanceType {
-		LAUNCHED(0, bundle.getString("OsAccountInstanceType.Launched.text"), bundle.getString("OsAccountInstanceType.Launched.descr.text")), // the user launched a program on the host
-		ACCESSED(1, bundle.getString("OsAccountInstanceType.Accessed.text"), bundle.getString("OsAccountInstanceType.Accessed.descr.text")), // user accesed a resource for read/write
-		REFERENCED(2, bundle.getString("OsAccountInstanceType.Referenced.text"), bundle.getString("OsAccountInstanceType.Referenced.descr.text"));	// user was referenced, e.g. in a event log.
+		LAUNCHED(0, bundle.getString("OsAccountInstanceType.Launched.text"), bundle.getString("OsAccountInstanceType.Launched.descr.text")), // user had an interactive session or launched a program on the host
+		ACCESSED(1, bundle.getString("OsAccountInstanceType.Accessed.text"), bundle.getString("OsAccountInstanceType.Accessed.descr.text")), // user accesed a resource/file for read/write. Could have been via a service (such as a file share) or a SID on a random file from an unknown location.  NOTE: Because Windows event logs do not show if an authentication was for an interactive login or accessing a service, we mark a user as ACCESSED based on authentication. They become LAUNCHED if we have proof of them starting a program or getting an interactive login. 
+		REFERENCED(2, bundle.getString("OsAccountInstanceType.Referenced.text"), bundle.getString("OsAccountInstanceType.Referenced.descr.text"));	// user was referenced in a log file (e.g. in a event log) or registry, but there was no evidence of activity or ownership on the host. Examples include an account that was never used and entries on a log server. 
+        
 
 		private final int id;
 		private final String name;
@@ -245,5 +256,18 @@ public static OsAccountInstanceType fromID(int typeId) {
 			}
 			return null;
 		}
+		
+		/**
+		 * Gets account instance type enum from name.
+		 *
+		 * @param name Name to look for.
+		 *
+		 * @return Account instance type enum, null if no match is found.
+		 */
+		public static OsAccountInstanceType fromString(String name) {
+			return Arrays.stream(values())
+					.filter(val -> val.getName().equals(name))
+					.findFirst().orElse(null);
+		}
 	}
 }
diff --git a/bindings/java/src/org/sleuthkit/datamodel/OsAccountManager.java b/bindings/java/src/org/sleuthkit/datamodel/OsAccountManager.java
index baf2a3e4fe4d8cf26f7dfa52795f2e9986e162eb..19d10064b854566cccba009646acff3a92368ddf 100755
--- a/bindings/java/src/org/sleuthkit/datamodel/OsAccountManager.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/OsAccountManager.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit Data Model
  *
- * Copyright 2020-2021 Basis Technology Corp.
+ * Copyright 2020-2022 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -28,19 +28,23 @@
 import java.util.Collections;
 import java.util.ArrayList;
 import java.util.List;
-import java.util.NavigableSet;
+import java.util.NavigableMap;
 import java.util.Objects;
 import java.util.Optional;
 import java.util.UUID;
-import java.util.concurrent.ConcurrentSkipListSet;
+import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.stream.Collectors;
 import org.sleuthkit.datamodel.BlackboardAttribute.TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE;
 import org.sleuthkit.datamodel.OsAccount.OsAccountStatus;
 import org.sleuthkit.datamodel.OsAccount.OsAccountType;
 import org.sleuthkit.datamodel.OsAccount.OsAccountAttribute;
+import org.sleuthkit.datamodel.OsAccountRealmManager.OsRealmUpdateResult;
+import org.sleuthkit.datamodel.OsAccountRealmManager.OsRealmUpdateStatus;
 import org.sleuthkit.datamodel.SleuthkitCase.CaseDbConnection;
 import org.sleuthkit.datamodel.SleuthkitCase.CaseDbTransaction;
 import org.sleuthkit.datamodel.TskEvent.OsAccountsUpdatedTskEvent;
+import static org.sleuthkit.datamodel.WindowsAccountUtils.isWindowsWellKnownSid;
+import static org.sleuthkit.datamodel.WindowsAccountUtils.getWindowsWellKnownSidFullName;
 
 /**
  * Responsible for creating/updating/retrieving the OS accounts for files and
@@ -50,7 +54,7 @@ public final class OsAccountManager {
 
 	private final SleuthkitCase db;
 	private final Object osAcctInstancesCacheLock;
-	private final NavigableSet<OsAccountInstance> osAccountInstanceCache;
+	private final NavigableMap<OsAccountInstanceKey, OsAccountInstance> osAccountInstanceCache;
 
 	/**
 	 * Construct a OsUserManager for the given SleuthkitCase.
@@ -61,7 +65,7 @@ public final class OsAccountManager {
 	OsAccountManager(SleuthkitCase skCase) {
 		db = skCase;
 		osAcctInstancesCacheLock = new Object();
-		osAccountInstanceCache = new ConcurrentSkipListSet<>();
+		osAccountInstanceCache = new ConcurrentSkipListMap<>();
 	}
 
 	/**
@@ -151,31 +155,80 @@ public OsAccount newWindowsOsAccount(String sid, String loginName, String realmN
 			throw new TskCoreException("A referring host is required to create an account.");
 		}
 
-		// ensure at least one of the two is supplied - unique id or a login name
-		if (StringUtils.isBlank(sid) && StringUtils.isBlank(loginName)) {
+		// ensure at least one of the two is supplied - a non-null unique id or a login name
+		if ((StringUtils.isBlank(sid) || sid.equalsIgnoreCase(WindowsAccountUtils.WINDOWS_NULL_SID)) 
+				&& StringUtils.isBlank(loginName)) {
 			throw new TskCoreException("Cannot create OS account with both uniqueId and loginName as null.");
 		}
 		// Realm name is required if the sid is null. 
-		if (StringUtils.isBlank(sid) && StringUtils.isBlank(realmName)) {
+		if ((StringUtils.isBlank(sid) || sid.equalsIgnoreCase(WindowsAccountUtils.WINDOWS_NULL_SID)) 
+				&& StringUtils.isBlank(realmName)) {
 			throw new TskCoreException("Realm name or SID is required to create a Windows account.");
 		}
 
-		if (!StringUtils.isBlank(sid) && !WindowsAccountUtils.isWindowsUserSid(sid)) {
+		if (!StringUtils.isBlank(sid) && !sid.equalsIgnoreCase(WindowsAccountUtils.WINDOWS_NULL_SID) && !WindowsAccountUtils.isWindowsUserSid(sid)) {
 			throw new OsAccountManager.NotUserSIDException(String.format("SID = %s is not a user SID.", sid));
 		}
 
+		// If no SID is given and the given realm/login names is a well known account, get and use the well known SID
+		if (StringUtils.isBlank(sid) 
+			&& !StringUtils.isBlank(loginName) && !StringUtils.isBlank(realmName) 
+				&& WindowsAccountUtils.isWindowsWellKnownAccountName(loginName, realmName)) {
+			sid = WindowsAccountUtils.getWindowsWellKnownAccountSid(loginName, realmName);
+		}
+		
+		
+		OsRealmUpdateResult realmUpdateResult;
+		Optional<OsAccountRealm> anotherRealmWithSameName = Optional.empty();
+		Optional<OsAccountRealm> anotherRealmWithSameAddr = Optional.empty();
+		
 		// get the realm for the account, and update it if it is missing addr or name.
-		Optional<OsAccountRealm> realmOptional;
+		OsAccountRealm realm = null;
 		try (CaseDbConnection connection = db.getConnection()) {
-			realmOptional = db.getOsAccountRealmManager().getAndUpdateWindowsRealm(sid, realmName, referringHost, connection);
+			realmUpdateResult = db.getOsAccountRealmManager().getAndUpdateWindowsRealm(sid, realmName, referringHost, connection);
+			
+			Optional<OsAccountRealm> realmOptional = realmUpdateResult.getUpdatedRealm();
+			if (realmOptional.isPresent()) {
+				realm = realmOptional.get();
+				
+				if (realmUpdateResult.getUpdateStatus() == OsRealmUpdateStatus.UPDATED) {
+
+					// Check if update of the realm triggers a merge with any other realm, 
+					// say another realm with same name but no SID, or same SID but no name
+					
+					//1. Check if there is any OTHER realm with the same name, same host but no addr
+					anotherRealmWithSameName = db.getOsAccountRealmManager().getAnotherRealmByName(realmOptional.get(), realmName, referringHost, connection);
+					
+					// 2. Check if there is any OTHER realm with same addr and host, but NO name
+					anotherRealmWithSameAddr = db.getOsAccountRealmManager().getAnotherRealmByAddr(realmOptional.get(), realmName, referringHost, connection);
+				}
+			}
 		}
-		OsAccountRealm realm;
-		if (realmOptional.isPresent()) {
-			realm = realmOptional.get();
-		} else {
+		
+		if (null == realm) {
 			// realm was not found, create it.
 			realm = db.getOsAccountRealmManager().newWindowsRealm(sid, realmName, referringHost, realmScope);
+		} else if (realmUpdateResult.getUpdateStatus() == OsRealmUpdateStatus.UPDATED) {
+			// if the realm already existed and was updated, and there are other realms with same  name or addr that should now be merged into the updated realm
+			if (anotherRealmWithSameName.isPresent() || anotherRealmWithSameAddr.isPresent()) {
+
+				CaseDbTransaction trans = this.db.beginTransaction();
+				try {
+					if (anotherRealmWithSameName.isPresent()) {
+						db.getOsAccountRealmManager().mergeRealms(anotherRealmWithSameName.get(), realm, trans);
+					}
+					if (anotherRealmWithSameAddr.isPresent()) {
+						db.getOsAccountRealmManager().mergeRealms(anotherRealmWithSameAddr.get(), realm, trans);
+					}
+
+					trans.commit();
+				} catch (TskCoreException ex) {
+					trans.rollback();
+					throw ex;	// rethrow
+				}
+			}
 		}
+		
 
 		return newWindowsOsAccount(sid, loginName, realm);
 	}
@@ -199,20 +252,44 @@ public OsAccount newWindowsOsAccount(String sid, String loginName, String realmN
 	 */
 	public OsAccount newWindowsOsAccount(String sid, String loginName, OsAccountRealm realm) throws TskCoreException, NotUserSIDException {
 
-		// ensure at least one of the two is supplied - unique id or a login name
-		if (StringUtils.isBlank(sid) && StringUtils.isBlank(loginName)) {
+		// ensure at least one of the two is supplied - a non-null unique id or a login name
+		if ((StringUtils.isBlank(sid) || sid.equalsIgnoreCase(WindowsAccountUtils.WINDOWS_NULL_SID)) 
+				&& StringUtils.isBlank(loginName)) {
 			throw new TskCoreException("Cannot create OS account with both uniqueId and loginName as null.");
 		}
 
-		if (!StringUtils.isBlank(sid) && !WindowsAccountUtils.isWindowsUserSid(sid)) {
+		if (!StringUtils.isBlank(sid) && !sid.equalsIgnoreCase(WindowsAccountUtils.WINDOWS_NULL_SID) && !WindowsAccountUtils.isWindowsUserSid(sid)) {
 			throw new OsAccountManager.NotUserSIDException(String.format("SID = %s is not a user SID.", sid));
 		}
 
+		// If the login name is well known, we use the well known english name. 
+		String resolvedLoginName = WindowsAccountUtils.toWellknownEnglishLoginName(loginName);
+		
 		CaseDbTransaction trans = db.beginTransaction();
 		try {
 			// try to create account
 			try {
-				OsAccount account = newOsAccount(sid, loginName, realm, OsAccount.OsAccountStatus.UNKNOWN, trans);
+				String uniqueId = (!StringUtils.isBlank(sid) && !sid.equalsIgnoreCase(WindowsAccountUtils.WINDOWS_NULL_SID)) ?  sid : null;
+				if (!StringUtils.isBlank(sid) && !sid.equalsIgnoreCase(WindowsAccountUtils.WINDOWS_NULL_SID) && isWindowsWellKnownSid(sid)) {
+					// if the SID is a Windows well known SID, then prefer to use the default well known login name
+					String wellKnownLoginName = WindowsAccountUtils.getWindowsWellKnownSidLoginName(sid);
+					if (!StringUtils.isEmpty(wellKnownLoginName)) {
+						resolvedLoginName = wellKnownLoginName;
+					}
+				}
+					
+				OsAccount account = newOsAccount(uniqueId, resolvedLoginName, realm, OsAccount.OsAccountStatus.UNKNOWN, trans);
+
+				// If the SID indicates a special windows account, then set its full name. 
+				if (!StringUtils.isBlank(sid) && !sid.equalsIgnoreCase(WindowsAccountUtils.WINDOWS_NULL_SID) && isWindowsWellKnownSid(sid)) {
+					String fullName = getWindowsWellKnownSidFullName(sid);
+					if (StringUtils.isNotBlank(fullName)) {
+						OsAccountUpdateResult updateResult = updateStandardOsAccountAttributes(account, fullName, null, null, null, trans);
+						if (updateResult.getUpdatedAccount().isPresent()) {
+							account = updateResult.getUpdatedAccount().get();
+						}
+					}
+				}
 				trans.commit();
 				trans = null;
 				return account;
@@ -233,8 +310,8 @@ public OsAccount newWindowsOsAccount(String sid, String loginName, OsAccountReal
 				}
 
 				// search by loginName
-				if (!Strings.isNullOrEmpty(loginName)) {
-					osAccount = getOsAccountByLoginName(loginName, realm);
+				if (!Strings.isNullOrEmpty(resolvedLoginName)) {
+					osAccount = getOsAccountByLoginName(resolvedLoginName, realm);
 					if (osAccount.isPresent()) {
 						return osAccount.get();
 					}
@@ -243,7 +320,7 @@ public OsAccount newWindowsOsAccount(String sid, String loginName, OsAccountReal
 				// create failed for some other reason, throw an exception
 				throw new TskCoreException(String.format("Error creating OsAccount with sid = %s, loginName = %s, realm = %s, referring host = %s",
 						(sid != null) ? sid : "Null",
-						(loginName != null) ? loginName : "Null",
+						(resolvedLoginName != null) ? resolvedLoginName : "Null",
 						(!realm.getRealmNames().isEmpty()) ? realm.getRealmNames().get(0) : "Null",
 						realm.getScopeHost().isPresent() ? realm.getScopeHost().get().getName() : "Null"), ex);
 
@@ -510,10 +587,12 @@ OsAccount getOsAccountByObjectId(long osAccountObjId, CaseDbConnection connectio
 	 * @param dataSource   Data source where the instance is found.
 	 * @param instanceType Instance type.
 	 *
+	 * @return OsAccountInstance Existing or newly created account instance.
+	 *
 	 * @throws TskCoreException If there is an error creating the account
 	 *                          instance.
 	 */
-	public void newOsAccountInstance(OsAccount osAccount, DataSource dataSource, OsAccountInstance.OsAccountInstanceType instanceType) throws TskCoreException {
+	public OsAccountInstance newOsAccountInstance(OsAccount osAccount, DataSource dataSource, OsAccountInstance.OsAccountInstanceType instanceType) throws TskCoreException {
 		if (osAccount == null) {
 			throw new TskCoreException("Cannot create account instance with null account.");
 		}
@@ -521,21 +600,14 @@ public void newOsAccountInstance(OsAccount osAccount, DataSource dataSource, OsA
 			throw new TskCoreException("Cannot create account instance with null data source.");
 		}
 
-		/*
-		 * Check the cache of OS account instances for an existing instance for
-		 * this OS account and data source. Note that the account instance
-		 * created here has a bogus instance ID. This is possible since the
-		 * instance ID is not considered in the equals() and hashCode() methods
-		 * of this class.
-		 */
-		synchronized (osAcctInstancesCacheLock) {
-			if (osAccountInstanceCache.contains(new OsAccountInstance(db, 0, osAccount.getId(), dataSource.getId(), instanceType))) {
-				return;
-			}
+		// check the cache first 
+		Optional<OsAccountInstance> existingInstance = cachedAccountInstance(osAccount.getId(), dataSource.getId(), instanceType);
+		if (existingInstance.isPresent()) {
+			return existingInstance.get();
 		}
 
 		try (CaseDbConnection connection = this.db.getConnection()) {
-			newOsAccountInstance(osAccount.getId(), dataSource.getId(), instanceType, connection);
+			return newOsAccountInstance(osAccount.getId(), dataSource.getId(), instanceType, connection);
 		}
 	}
 
@@ -549,21 +621,16 @@ public void newOsAccountInstance(OsAccount osAccount, DataSource dataSource, OsA
 	 * @param instanceType    Instance type.
 	 * @param connection      The current database connection.
 	 *
+	 * @return OsAccountInstance Existing or newly created account instance.
+	 *
 	 * @throws TskCoreException If there is an error creating the account
 	 *                          instance.
 	 */
-	void newOsAccountInstance(long osAccountId, long dataSourceObjId, OsAccountInstance.OsAccountInstanceType instanceType, CaseDbConnection connection) throws TskCoreException {
-		/*
-		 * Check the cache of OS account instances for an existing instance for
-		 * this OS account and data source. Note that the account instance
-		 * created here has a bogus instance ID. This is possible since the
-		 * instance ID is not considered in the equals() and hashCode() methods
-		 * of this class.
-		 */
-		synchronized (osAcctInstancesCacheLock) {
-			if (osAccountInstanceCache.contains(new OsAccountInstance(db, 0, osAccountId, dataSourceObjId, instanceType))) {
-				return;
-			}
+	OsAccountInstance newOsAccountInstance(long osAccountId, long dataSourceObjId, OsAccountInstance.OsAccountInstanceType instanceType, CaseDbConnection connection) throws TskCoreException {
+
+		Optional<OsAccountInstance> existingInstance = cachedAccountInstance(osAccountId, dataSourceObjId, instanceType);
+		if (existingInstance.isPresent()) {
+			return existingInstance.get();
 		}
 
 		/*
@@ -583,7 +650,15 @@ void newOsAccountInstance(long osAccountId, long dataSourceObjId, OsAccountInsta
 				if (resultSet.next()) {
 					OsAccountInstance accountInstance = new OsAccountInstance(db, resultSet.getLong(1), osAccountId, dataSourceObjId, instanceType);
 					synchronized (osAcctInstancesCacheLock) {
-						osAccountInstanceCache.add(accountInstance);
+						OsAccountInstanceKey key = new OsAccountInstanceKey(osAccountId, dataSourceObjId);
+						// remove from cache any instances less significant (higher ordinal) than this instance
+						for (OsAccountInstance.OsAccountInstanceType type : OsAccountInstance.OsAccountInstanceType.values()) {
+							if (accountInstance.getInstanceType().compareTo(type) < 0) {
+								osAccountInstanceCache.remove(key);
+							}
+						}
+						// add the new most significant instance to the cache
+						osAccountInstanceCache.put(key, accountInstance);
 					}
 					/*
 					 * There is a potential issue here. The cache of OS account
@@ -601,6 +676,16 @@ void newOsAccountInstance(long osAccountId, long dataSourceObjId, OsAccountInsta
 					 * from time to time.
 					 */
 					db.fireTSKEvent(new TskEvent.OsAcctInstancesAddedTskEvent(Collections.singletonList(accountInstance)));
+
+					return accountInstance;
+				} else {
+					// there is the possibility that another thread may be adding the same os account instance at the same time
+					// the database may be updated prior to the cache being updated so this provides an extra opportunity to check
+					// the cache before throwing the exception
+					Optional<OsAccountInstance> existingInstanceRetry = cachedAccountInstance(osAccountId, dataSourceObjId, instanceType);
+					if (existingInstanceRetry.isPresent()) {
+						return existingInstanceRetry.get();
+					}
 				}
 			}
 		} catch (SQLException ex) {
@@ -608,6 +693,68 @@ void newOsAccountInstance(long osAccountId, long dataSourceObjId, OsAccountInsta
 		} finally {
 			db.releaseSingleUserCaseWriteLock();
 		}
+		
+		// It's possible that we weren't able to load the account instance because it
+		// is already in the database but the instance cache was cleared during an account merge.
+		// Try loading it here and re-adding to the cache.
+		String whereClause = "tsk_os_account_instances.os_account_obj_id = " + osAccountId
+				+ "AND tsk_os_account_instances.data_source_obj_id = " + dataSourceObjId;
+		List<OsAccountInstance> instances = getOsAccountInstances(whereClause);
+		if (instances.isEmpty()) {
+			throw new TskCoreException(String.format("Could not get autogen key after row insert or reload instance for OS account instance. OS account object id = %d, data source object id = %d", osAccountId, dataSourceObjId));
+		}
+		
+		OsAccountInstance accountInstance = instances.get(0);
+		synchronized (osAcctInstancesCacheLock) {
+			OsAccountInstanceKey key = new OsAccountInstanceKey(osAccountId, dataSourceObjId);
+			// remove from cache any instances less significant (higher ordinal) than this instance
+			for (OsAccountInstance.OsAccountInstanceType type : OsAccountInstance.OsAccountInstanceType.values()) {
+				if (accountInstance.getInstanceType().compareTo(type) < 0) {
+					osAccountInstanceCache.remove(key);
+				}
+			}
+			// add the most significant instance to the cache
+			osAccountInstanceCache.put(key, accountInstance);
+		}
+		return accountInstance;
+	}
+
+	/**
+	 * Check if an account instance for exists in the cache for given account
+	 * id, data source and instance type.
+	 *
+	 * Instance type does not need to be an exact match - an existing instance
+	 * with an instance type more significant than the specified type is
+	 * considered a match.
+	 *
+	 * @param osAccountId     Account id.
+	 * @param dataSourceObjId Data source object id.
+	 * @param instanceType    Account instance type.
+	 *
+	 * @return Optional with OsAccountInstance, Optional.empty if there is no
+	 *         matching instance in cache.
+	 *
+	 */
+	private Optional<OsAccountInstance> cachedAccountInstance(long osAccountId, long dataSourceObjId, OsAccountInstance.OsAccountInstanceType instanceType) {
+
+		/*
+		 * Check the cache of OS account instances for an existing instance for
+		 * this OS account and data source. Note that the account instance
+		 * created here has a bogus instance ID. This is possible since the
+		 * instance ID is not considered in the equals() and hashCode() methods
+		 * of this class.
+		 */
+		synchronized (osAcctInstancesCacheLock) {
+			OsAccountInstanceKey key = new OsAccountInstanceKey(osAccountId, dataSourceObjId);
+			OsAccountInstance instance = osAccountInstanceCache.get(key);
+			if (instance != null) {
+				// if the new instance type same or less significant than the existing instance (i.e. same or higher ordinal value) it's a match. 
+				if (instanceType.compareTo(instance.getInstanceType()) >= 0) {
+					return Optional.of(instance);
+				}
+			}
+			return Optional.empty();
+		}
 	}
 
 	/**
@@ -620,14 +767,13 @@ void newOsAccountInstance(long osAccountId, long dataSourceObjId, OsAccountInsta
 	 * @throws org.sleuthkit.datamodel.TskCoreException
 	 */
 	public List<OsAccount> getOsAccounts(Host host) throws TskCoreException {
-
-		String queryString = "SELECT * FROM tsk_os_accounts as accounts "
-				+ " JOIN tsk_os_account_instances as instances "
-				+ "		ON instances.os_account_obj_id = accounts.os_account_obj_id "
-				+ " JOIN data_source_info as datasources "
-				+ "		ON datasources.obj_id = instances.data_source_obj_id "
-				+ " WHERE datasources.host_id = " + host.getHostId()
-				+ " AND accounts.db_status = " + OsAccount.OsAccountDbStatus.ACTIVE.getId();
+		String queryString = "SELECT * FROM tsk_os_accounts accounts "
+				+ "WHERE accounts.os_account_obj_id IN "
+				+ "(SELECT instances.os_account_obj_id "
+				+ "FROM tsk_os_account_instances instances "
+				+ "INNER JOIN data_source_info datasources ON datasources.obj_id = instances.data_source_obj_id "
+				+ "WHERE datasources.host_id = " + host.getHostId() + ") "
+				+ "AND accounts.db_status = " + OsAccount.OsAccountDbStatus.ACTIVE.getId();
 
 		db.acquireSingleUserCaseReadLock();
 		try (CaseDbConnection connection = this.db.getConnection();
@@ -646,6 +792,40 @@ public List<OsAccount> getOsAccounts(Host host) throws TskCoreException {
 		}
 	}
 
+	/**
+	 * Get all accounts that had an instance on the specified data source.
+	 *
+	 * @param dataSourceId Data source id for which to look accounts for.
+	 *
+	 * @return Set of OsAccounts, may be empty.
+	 *
+	 * @throws org.sleuthkit.datamodel.TskCoreException
+	 */
+	public List<OsAccount> getOsAccountsByDataSourceObjId(long dataSourceId) throws TskCoreException {
+		String queryString = "SELECT * FROM tsk_os_accounts acc "
+				+ "WHERE acc.os_account_obj_id IN "
+				+ "(SELECT instance.os_account_obj_id "
+				+ "FROM tsk_os_account_instances instance "
+				+ "WHERE instance.data_source_obj_id = " + dataSourceId + ") "
+				+ "AND acc.db_status = " + OsAccount.OsAccountDbStatus.ACTIVE.getId();
+
+		db.acquireSingleUserCaseReadLock();
+		try (CaseDbConnection connection = this.db.getConnection();
+				Statement s = connection.createStatement();
+				ResultSet rs = connection.executeQuery(s, queryString)) {
+
+			List<OsAccount> accounts = new ArrayList<>();
+			while (rs.next()) {
+				accounts.add(osAccountFromResultSet(rs));
+			}
+			return accounts;
+		} catch (SQLException ex) {
+			throw new TskCoreException(String.format("Error getting OS accounts for data source id = %d", dataSourceId), ex);
+		} finally {
+			db.releaseSingleUserCaseReadLock();
+		}
+	}
+
 	/**
 	 * Merge all OS accounts from sourceRealm into destRealm. After this call: -
 	 * sourceRealm's accounts will have been moved or merged - References to
@@ -684,35 +864,11 @@ void mergeOsAccountsForRealms(OsAccountRealm sourceRealm, OsAccountRealm destRea
 			}
 
 			// Look for matching destination account
-			OsAccount matchingDestAccount = null;
-
-			// First look for matching unique id
-			if (sourceAccount.getAddr().isPresent()) {
-				List<OsAccount> matchingDestAccounts = destinationAccounts.stream()
-						.filter(p -> p.getAddr().equals(sourceAccount.getAddr()))
-						.collect(Collectors.toList());
-				if (!matchingDestAccounts.isEmpty()) {
-					matchingDestAccount = matchingDestAccounts.get(0);
-				}
-			}
-
-			// If a match wasn't found yet, look for a matching login name.
-			// We will merge only if:
-			// - We didn't already find a unique ID match
-			// - The source account has no unique ID OR the destination account has no unique ID
-			if (matchingDestAccount == null && sourceAccount.getLoginName().isPresent()) {
-				List<OsAccount> matchingDestAccounts = destinationAccounts.stream()
-						.filter(p -> (p.getLoginName().equals(sourceAccount.getLoginName())
-						&& ((!sourceAccount.getAddr().isPresent()) || (!p.getAddr().isPresent()))))
-						.collect(Collectors.toList());
-				if (!matchingDestAccounts.isEmpty()) {
-					matchingDestAccount = matchingDestAccounts.get(0);
-				}
-			}
+			Optional<OsAccount> matchingDestAccount = getMatchingAccountForMerge(sourceAccount, destinationAccounts);
 
 			// If we found a match, merge the accounts. Otherwise simply update the realm id
-			if (matchingDestAccount != null) {
-				mergeOsAccounts(sourceAccount, matchingDestAccount, trans);
+			if (matchingDestAccount.isPresent()) {
+				mergeOsAccounts(sourceAccount, matchingDestAccount.get(), trans);
 			} else {
 				String query = "UPDATE tsk_os_accounts SET realm_id = " + destRealm.getRealmId() + " WHERE os_account_obj_id = " + sourceAccount.getId();
 				try (Statement s = trans.getConnection().createStatement()) {
@@ -724,6 +880,70 @@ void mergeOsAccountsForRealms(OsAccountRealm sourceRealm, OsAccountRealm destRea
 			}
 		}
 	}
+	
+	/**
+	 * Checks for matching account in a list of accounts for merging
+	 * @param sourceAccount The account to find matches for
+	 * @param destinationAccounts List of accounts to match against
+	 * @return Optional with OsAccount, Optional.empty if no matching OsAccount is found.
+	 */
+	private Optional<OsAccount> getMatchingAccountForMerge(OsAccount sourceAccount, List<OsAccount> destinationAccounts) {
+		// Look for matching destination account
+		OsAccount matchingDestAccount = null;
+
+		// First look for matching unique id
+		if (sourceAccount.getAddr().isPresent()) {
+			List<OsAccount> matchingDestAccounts = destinationAccounts.stream()
+					.filter(p -> p.getAddr().equals(sourceAccount.getAddr()))
+					.collect(Collectors.toList());
+			if (!matchingDestAccounts.isEmpty()) {
+				matchingDestAccount = matchingDestAccounts.get(0);
+			}
+		}
+
+		// If a match wasn't found yet, look for a matching login name.
+		// We will merge only if:
+		// - We didn't already find a unique ID match
+		// - The source account has no unique ID OR the destination account has no unique ID
+		// - destination account has a login name and matches the source account login name
+		if (matchingDestAccount == null && sourceAccount.getLoginName().isPresent()) {
+			List<OsAccount> matchingDestAccounts = destinationAccounts.stream()
+					.filter(p -> p.getLoginName().isPresent())
+					.filter(p -> (p.getLoginName().get().equalsIgnoreCase(sourceAccount.getLoginName().get())
+					&& ((!sourceAccount.getAddr().isPresent()) || (!p.getAddr().isPresent()))))
+					.collect(Collectors.toList());
+			if (!matchingDestAccounts.isEmpty()) {
+				matchingDestAccount = matchingDestAccounts.get(0);
+			}
+		}
+		
+		return Optional.ofNullable(matchingDestAccount);
+	}
+	
+	/**
+	 * Checks for matching accounts in the same realm 
+	 * and then merges the accounts if a match is found
+	 * @param account The account to find matches for
+	 * @param trans The current transaction.
+	 * @throws TskCoreException 
+	 */
+	private void mergeOsAccount(OsAccount account, CaseDbTransaction trans) throws TskCoreException {
+		// Get the realm for the account
+		Long realmId = account.getRealmId();
+		OsAccountRealm realm = db.getOsAccountRealmManager().getRealmByRealmId(realmId,  trans.getConnection());
+		
+		// Get all users in the realm (excluding the account)
+		List<OsAccount> osAccounts = getOsAccounts(realm, trans.getConnection());
+		osAccounts.removeIf(acc -> Objects.equals(acc.getId(), account.getId()));
+		
+		// Look for matching account
+		Optional<OsAccount> matchingAccount = getMatchingAccountForMerge(account, osAccounts);
+		
+		// If we find a match, merge the accounts.
+		if (matchingAccount.isPresent()) {
+			mergeOsAccounts(matchingAccount.get(), account, trans);
+		}
+	}
 
 	/**
 	 * Merges data between two accounts so that only one is active at the end
@@ -748,7 +968,7 @@ private void mergeOsAccounts(OsAccount sourceAccount, OsAccount destAccount, Cas
 			query = makeOsAccountUpdateQuery("tsk_os_account_attributes", sourceAccount, destAccount);
 			s.executeUpdate(query);
 
-			// tsk_os_account_instances has a unique constraint on os_account_obj_id, data_source_obj_id, host_id,
+			// tsk_os_account_instances has a unique constraint on os_account_obj_id, data_source_obj_id, and instance_type,
 			// so delete any rows that would be duplicates.
 			query = "DELETE FROM tsk_os_account_instances "
 					+ "WHERE id IN ( "
@@ -758,7 +978,9 @@ private void mergeOsAccounts(OsAccount sourceAccount, OsAccount destAccount, Cas
 					+ "  tsk_os_account_instances destAccountInstance "
 					+ "INNER JOIN tsk_os_account_instances sourceAccountInstance ON destAccountInstance.data_source_obj_id = sourceAccountInstance.data_source_obj_id "
 					+ "WHERE destAccountInstance.os_account_obj_id = " + destAccount.getId()
-					+ " AND sourceAccountInstance.os_account_obj_id = " + sourceAccount.getId() + " )";
+					+ " AND sourceAccountInstance.os_account_obj_id = " + sourceAccount.getId()
+					+ " AND sourceAccountInstance.instance_type = destAccountInstance.instance_type" + ")";
+
 			s.executeUpdate(query);
 
 			query = makeOsAccountUpdateQuery("tsk_os_account_instances", sourceAccount, destAccount);
@@ -773,6 +995,10 @@ private void mergeOsAccounts(OsAccount sourceAccount, OsAccount destAccount, Cas
 			query = makeOsAccountUpdateQuery("tsk_data_artifacts", sourceAccount, destAccount);
 			s.executeUpdate(query);
 
+			
+			// register the merged accounts with the transaction to fire off an event
+			trans.registerMergedOsAccount(sourceAccount.getId(), destAccount.getId());
+			
 			// Update the source account. Make a dummy signature to prevent problems with the unique constraint.
 			String mergedSignature = makeMergedOsAccountSignature();
 			query = "UPDATE tsk_os_accounts SET merged_into = " + destAccount.getId()
@@ -946,11 +1172,19 @@ public Optional<OsAccount> getWindowsOsAccount(String sid, String loginName, Str
 			throw new TskCoreException("A referring host is required to get an account.");
 		}
 
-		// ensure at least one of the two is supplied - sid or a login name
-		if (StringUtils.isBlank(sid) && StringUtils.isBlank(loginName)) {
+		// ensure at least one of the two is supplied - a non-null sid or a login name
+		if ((StringUtils.isBlank(sid) || (sid.equalsIgnoreCase(WindowsAccountUtils.WINDOWS_NULL_SID)) ) && StringUtils.isBlank(loginName)) {
 			throw new TskCoreException("Cannot get an OS account with both SID and loginName as null.");
 		}
 
+		// If no SID is given and the given realm/login names is a well known account, get and use the well known SID
+		if (StringUtils.isBlank(sid) 
+			&& !StringUtils.isBlank(loginName) && !StringUtils.isBlank(realmName) 
+				&& WindowsAccountUtils.isWindowsWellKnownAccountName(loginName, realmName)) {
+			sid = WindowsAccountUtils.getWindowsWellKnownAccountSid(loginName, realmName);
+			
+		}
+			
 		// first get the realm for the given sid
 		Optional<OsAccountRealm> realm = db.getOsAccountRealmManager().getWindowsRealm(sid, realmName, referringHost);
 		if (!realm.isPresent()) {
@@ -958,16 +1192,24 @@ public Optional<OsAccount> getWindowsOsAccount(String sid, String loginName, Str
 		}
 
 		// search by SID
-		if (!Strings.isNullOrEmpty(sid)) {
+		if (!Strings.isNullOrEmpty(sid) && !(sid.equalsIgnoreCase(WindowsAccountUtils.WINDOWS_NULL_SID))) {
 			if (!WindowsAccountUtils.isWindowsUserSid(sid)) {
 				throw new OsAccountManager.NotUserSIDException(String.format("SID = %s is not a user SID.", sid));
 			}
 
-			return this.getOsAccountByAddr(sid, realm.get());
+			Optional<OsAccount> account = this.getOsAccountByAddr(sid, realm.get());
+			if (account.isPresent()) {
+				return account;
+			}
 		}
 
 		// search by login name
-		return this.getOsAccountByLoginName(loginName, realm.get());
+		if (!Strings.isNullOrEmpty(loginName)) {
+			String resolvedLoginName = WindowsAccountUtils.toWellknownEnglishLoginName(loginName);
+			return this.getOsAccountByLoginName(resolvedLoginName, realm.get());
+		} else {
+			return Optional.empty();
+		}
 	}
 
 	/**
@@ -1092,7 +1334,7 @@ List<OsAccountAttribute> getOsAccountAttributes(OsAccount account) throws TskCor
 				if (!rs.wasNull()) {
 					sourceContent = this.db.getContentById(sourceObjId);
 				}
-				BlackboardAttribute.Type attributeType = db.getAttributeType(rs.getInt("attribute_type_id"));
+				BlackboardAttribute.Type attributeType = db.getBlackboard().getAttributeType(rs.getInt("attribute_type_id"));
 				OsAccountAttribute attribute = account.new OsAccountAttribute(attributeType, rs.getInt("value_int32"), rs.getLong("value_int64"),
 						rs.getDouble("value_double"), rs.getString("value_text"), rs.getBytes("value_byte"),
 						db, account, host, sourceContent);
@@ -1116,7 +1358,7 @@ List<OsAccountAttribute> getOsAccountAttributes(OsAccount account) throws TskCor
 	 *
 	 * @throws TskCoreException
 	 */
-	List<OsAccountInstance> getOsAccountInstances(OsAccount account) throws TskCoreException {
+	public List<OsAccountInstance> getOsAccountInstances(OsAccount account) throws TskCoreException {
 		String whereClause = "tsk_os_account_instances.os_account_obj_id = " + account.getId();
 		return getOsAccountInstances(whereClause);
 	}
@@ -1133,13 +1375,33 @@ List<OsAccountInstance> getOsAccountInstances(OsAccount account) throws TskCoreE
 	 */
 	public List<OsAccountInstance> getOsAccountInstances(List<Long> instanceIDs) throws TskCoreException {
 		String instanceIds = instanceIDs.stream().map(id -> id.toString()).collect(Collectors.joining(","));
-		String whereClause = "tsk_os_account_instances.id IN (" + instanceIds + ")";
-		return getOsAccountInstances(whereClause);
+
+		List<OsAccountInstance> osAcctInstances = new ArrayList<>();
+
+		String querySQL = "SELECT * FROM tsk_os_account_instances "
+				+ "	WHERE tsk_os_account_instances.id IN (" + instanceIds + ")";
+
+		db.acquireSingleUserCaseReadLock();
+		try (CaseDbConnection connection = db.getConnection();
+				PreparedStatement preparedStatement = connection.getPreparedStatement(querySQL, Statement.NO_GENERATED_KEYS);
+				ResultSet results = connection.executeQuery(preparedStatement)) {
+
+			osAcctInstances = getOsAccountInstancesFromResultSet(results);
+
+		} catch (SQLException ex) {
+			throw new TskCoreException("Failed to get OsAccountInstances (SQL = " + querySQL + ")", ex);
+		} finally {
+			db.releaseSingleUserCaseReadLock();
+		}
+		return osAcctInstances;
 	}
 
 	/**
 	 * Gets the OS account instances that satisfy the given SQL WHERE clause.
 	 *
+	 * Note: this query returns only the most significant instance type (least
+	 * ordinal) for each instance, that matches the specified WHERE clause.
+	 *
 	 * @param whereClause The SQL WHERE clause.
 	 *
 	 * @return The OS account instances.
@@ -1149,18 +1411,24 @@ public List<OsAccountInstance> getOsAccountInstances(List<Long> instanceIDs) thr
 	 */
 	private List<OsAccountInstance> getOsAccountInstances(String whereClause) throws TskCoreException {
 		List<OsAccountInstance> osAcctInstances = new ArrayList<>();
-		String querySQL = "SELECT * FROM tsk_os_account_instances WHERE " + whereClause;
+
+		String querySQL
+				= "SELECT tsk_os_account_instances.* "
+				+ " FROM tsk_os_account_instances "
+				+ " INNER JOIN ( SELECT os_account_obj_id,  data_source_obj_id, MIN(instance_type) AS min_instance_type "
+				+ "					FROM tsk_os_account_instances"
+				+ "					GROUP BY os_account_obj_id, data_source_obj_id ) grouped_instances "
+				+ " ON tsk_os_account_instances.os_account_obj_id = grouped_instances.os_account_obj_id "
+				+ " AND tsk_os_account_instances.instance_type = grouped_instances.min_instance_type "
+				+ " WHERE " + whereClause;
+
 		db.acquireSingleUserCaseReadLock();
 		try (CaseDbConnection connection = db.getConnection();
 				PreparedStatement preparedStatement = connection.getPreparedStatement(querySQL, Statement.NO_GENERATED_KEYS);
 				ResultSet results = connection.executeQuery(preparedStatement)) {
-			while (results.next()) {
-				long instanceId = results.getLong("id");
-				long osAccountObjID = results.getLong("os_account_obj_id");
-				long dataSourceObjId = results.getLong("data_source_obj_id");
-				int instanceType = results.getInt("instance_type");
-				osAcctInstances.add(new OsAccountInstance(db, instanceId, osAccountObjID, dataSourceObjId, OsAccountInstance.OsAccountInstanceType.fromID(instanceType)));
-			}
+
+			osAcctInstances = getOsAccountInstancesFromResultSet(results);
+
 		} catch (SQLException ex) {
 			throw new TskCoreException("Failed to get OsAccountInstances (SQL = " + querySQL + ")", ex);
 		} finally {
@@ -1169,6 +1437,29 @@ private List<OsAccountInstance> getOsAccountInstances(String whereClause) throws
 		return osAcctInstances;
 	}
 
+	/**
+	 * Returns list of OS account instances from the given result set.
+	 *
+	 * @param results Result set from a SELECT tsk_os_account_instances.* query.
+	 *
+	 * @return List of OS account instances.
+	 *
+	 * @throws SQLException
+	 */
+	private List<OsAccountInstance> getOsAccountInstancesFromResultSet(ResultSet results) throws SQLException {
+
+		List<OsAccountInstance> osAcctInstances = new ArrayList<>();
+		while (results.next()) {
+			long instanceId = results.getLong("id");
+			long osAccountObjID = results.getLong("os_account_obj_id");
+			long dataSourceObjId = results.getLong("data_source_obj_id");
+			int instanceType = results.getInt("instance_type");
+			osAcctInstances.add(new OsAccountInstance(db, instanceId, osAccountObjID, dataSourceObjId, OsAccountInstance.OsAccountInstanceType.fromID(instanceType)));
+		}
+
+		return osAcctInstances;
+	}
+
 	/**
 	 * Updates the properties of the specified account in the database.
 	 *
@@ -1232,12 +1523,12 @@ OsAccountUpdateResult updateStandardOsAccountAttributes(OsAccount osAccount, Str
 			}
 
 			if (Objects.nonNull(accountType)) {
-				updateAccountColumn(osAccount.getId(), "type", accountType, connection);
+				updateAccountColumn(osAccount.getId(), "type", accountType.getId(), connection);
 				updateStatusCode = OsAccountUpdateStatus.UPDATED;
 			}
 
 			if (Objects.nonNull(accountStatus)) {
-				updateAccountColumn(osAccount.getId(), "status", accountStatus, connection);
+				updateAccountColumn(osAccount.getId(), "status", accountStatus.getId(), connection);
 				updateStatusCode = OsAccountUpdateStatus.UPDATED;
 			}
 
@@ -1393,13 +1684,49 @@ public OsAccountUpdateResult updateCoreWindowsOsAccountAttributes(OsAccount osAc
 	private OsAccountUpdateResult updateCoreWindowsOsAccountAttributes(OsAccount osAccount, String accountSid, String loginName, String realmName, Host referringHost, CaseDbTransaction trans) throws TskCoreException, NotUserSIDException {
 
 		// first get and update the realm - if we have the info to find the realm
-		if (!StringUtils.isBlank(accountSid) || !StringUtils.isBlank(realmName)) {
-			db.getOsAccountRealmManager().getAndUpdateWindowsRealm(accountSid, realmName, referringHost, trans.getConnection());
+		
+		if ((!StringUtils.isBlank(accountSid) && !accountSid.equalsIgnoreCase(WindowsAccountUtils.WINDOWS_NULL_SID)) || !StringUtils.isBlank(realmName)) {
+			// If the SID is a well known SID, ensure we use the well known english name
+			String resolvedRealmName = WindowsAccountUtils.toWellknownEnglishRealmName(realmName);
+			
+			
+			OsRealmUpdateResult realmUpdateResult = db.getOsAccountRealmManager().getAndUpdateWindowsRealm(accountSid, resolvedRealmName, referringHost, trans.getConnection());
+			
+			
+			Optional<OsAccountRealm> realmOptional = realmUpdateResult.getUpdatedRealm();
+
+			if (realmOptional.isPresent()) {
+				
+				if (realmUpdateResult.getUpdateStatus() == OsRealmUpdateStatus.UPDATED) {
+
+					// Check if update of the realm triggers a merge with any other realm, 
+					// say another realm with same name but no SID, or same SID but no name
+					//1. Check if there is any OTHER realm with the same name, same host but no addr
+					Optional<OsAccountRealm> anotherRealmWithSameName = db.getOsAccountRealmManager().getAnotherRealmByName(realmOptional.get(), realmName, referringHost, trans.getConnection());
+
+					// 2. Check if there is any OTHER realm with same addr and host, but NO name
+					Optional<OsAccountRealm> anotherRealmWithSameAddr = db.getOsAccountRealmManager().getAnotherRealmByAddr(realmOptional.get(), realmName, referringHost, trans.getConnection());
+
+					if (anotherRealmWithSameName.isPresent()) {
+						db.getOsAccountRealmManager().mergeRealms(anotherRealmWithSameName.get(), realmOptional.get(), trans);
+					}
+					if (anotherRealmWithSameAddr.isPresent()) {
+						db.getOsAccountRealmManager().mergeRealms(anotherRealmWithSameAddr.get(), realmOptional.get(), trans);
+					}
+				}
+			}
 		}
 
 		// now update the account core data
-		OsAccountUpdateResult updateStatus = this.updateOsAccountCore(osAccount, accountSid, loginName, trans);
+		String resolvedLoginName = WindowsAccountUtils.toWellknownEnglishLoginName(loginName);
+		OsAccountUpdateResult updateStatus = this.updateOsAccountCore(osAccount, accountSid, resolvedLoginName, trans);
 
+		Optional<OsAccount> updatedAccount = updateStatus.getUpdatedAccount();
+		if (updatedAccount.isPresent()) {
+			// After updating account data, check if there is matching account to merge
+			mergeOsAccount(updatedAccount.get(), trans);
+		}
+		
 		return updateStatus;
 	}
 
@@ -1433,17 +1760,12 @@ private OsAccountUpdateResult updateOsAccountCore(OsAccount osAccount, String ad
 		try {
 			CaseDbConnection connection = trans.getConnection();
 
-			// if a new addr is provided and the account already has an address, and they are not the same, throw an exception
-			if (!StringUtils.isBlank(address) && !StringUtils.isBlank(osAccount.getAddr().orElse(null)) && !address.equalsIgnoreCase(osAccount.getAddr().orElse(""))) {
+			// if a new non-null addr is provided and the account already has an address, and they are not the same, throw an exception
+			if (!StringUtils.isBlank(address) && !address.equalsIgnoreCase(WindowsAccountUtils.WINDOWS_NULL_SID) && !StringUtils.isBlank(osAccount.getAddr().orElse(null)) && !address.equalsIgnoreCase(osAccount.getAddr().orElse(""))) {
 				throw new TskCoreException(String.format("Account (%d) already has an address (%s), address cannot be updated.", osAccount.getId(), osAccount.getAddr().orElse("NULL")));
 			}
 
-			// if a new login name is provided and the account already has a loginname and they are not the same, throw an exception
-			if (!StringUtils.isBlank(loginName) && !StringUtils.isBlank(osAccount.getLoginName().orElse(null)) && !loginName.equalsIgnoreCase(osAccount.getLoginName().orElse(""))) {
-				throw new TskCoreException(String.format("Account (%d) already has a login name (%s), login name cannot be updated.", osAccount.getId(), osAccount.getLoginName().orElse("NULL")));
-			}
-
-			if (StringUtils.isBlank(osAccount.getAddr().orElse(null)) && !StringUtils.isBlank(address)) {
+			if (StringUtils.isBlank(osAccount.getAddr().orElse(null)) && !StringUtils.isBlank(address) && !address.equalsIgnoreCase(WindowsAccountUtils.WINDOWS_NULL_SID)) {
 				updateAccountColumn(osAccount.getId(), "addr", address, connection);
 				updateStatusCode = OsAccountUpdateStatus.UPDATED;
 			}
@@ -1647,4 +1969,61 @@ public Optional<OsAccount> getUpdatedAccount() {
 			return Optional.ofNullable(updatedAccount);
 		}
 	}
+
+	/**
+	 * Represents the osAccountId\dataSourceId pair for use with the cache of
+	 * OsAccountInstances.
+	 */
+	private class OsAccountInstanceKey implements Comparable<OsAccountInstanceKey>{
+
+		private final long osAccountId;
+		private final long dataSourceId;
+
+		OsAccountInstanceKey(long osAccountId, long dataSourceId) {
+			this.osAccountId = osAccountId;
+			this.dataSourceId = dataSourceId;
+		}
+
+		@Override
+		public boolean equals(Object other) {
+			if (this == other) {
+				return true;
+			}
+			if (other == null) {
+				return false;
+			}
+			if (getClass() != other.getClass()) {
+				return false;
+			}
+
+			final OsAccountInstanceKey otherKey = (OsAccountInstanceKey) other;
+
+			if (osAccountId != otherKey.osAccountId) {
+				return false;
+			}
+
+			return dataSourceId == otherKey.dataSourceId;
+		}
+
+		@Override
+		public int hashCode() {
+			int hash = 5;
+			hash = 53 * hash + (int) (this.osAccountId ^ (this.osAccountId >>> 32));
+			hash = 53 * hash + (int) (this.dataSourceId ^ (this.dataSourceId >>> 32));
+			return hash;
+		}
+
+		@Override
+		public int compareTo(OsAccountInstanceKey other) {
+			if(this.equals(other)) {
+				return 0;
+			}
+			
+			if (dataSourceId != other.dataSourceId) {
+				return Long.compare(dataSourceId, other.dataSourceId);
+			}
+
+			return Long.compare(osAccountId, other.osAccountId);
+		}
+	}
 }
diff --git a/bindings/java/src/org/sleuthkit/datamodel/OsAccountRealm.java b/bindings/java/src/org/sleuthkit/datamodel/OsAccountRealm.java
index 79995728adb0cba555a38fe9f0382706839f745f..54b827da50cf02ba72b73d924364b2c25a55c764 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/OsAccountRealm.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/OsAccountRealm.java
@@ -47,7 +47,7 @@ public final class OsAccountRealm {
 	private final String realmName; // realm name
 	
 	private final String realmAddr; // realm address
-	private String signature; // either realm address or name (if address is not known)
+	private String signature; // either realm address or name (if address is not known), plus a scope indicator
 	private final Host host;	// if the realm consists of a single host.  Will be null if the realm is domain scoped. 
 	private final ScopeConfidence scopeConfidence; // confidence in realm scope.
 	private final RealmDbStatus dbStatus; // Status of row in database.
@@ -59,7 +59,7 @@ public final class OsAccountRealm {
 	 * @param realmName       Realm name, may be null.
 	 * @param realmAddr       Unique numeric address for realm, may be null only
 	 *                        if realm name is not null.
-	 * @param signature       Either the address or the name.
+	 * @param signature       Either the address or the name, plus a scope indicator.
 	 * @param host            Host if the realm is host scoped.
 	 * @param scopeConfidence Scope confidence.
 	 */
diff --git a/bindings/java/src/org/sleuthkit/datamodel/OsAccountRealmManager.java b/bindings/java/src/org/sleuthkit/datamodel/OsAccountRealmManager.java
index f3b3751415e00671e658fad7adef03e87590109c..959eb1cc8ab130a5b314e89d694bc59f5a0341fd 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/OsAccountRealmManager.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/OsAccountRealmManager.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit Data Model
  *
- * Copyright 2020-2021 Basis Technology Corp.
+ * Copyright 2020-2022 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -82,7 +82,8 @@ public OsAccountRealm newWindowsRealm(String accountSid, String realmName, Host
 		if (referringHost == null) {
 			throw new TskCoreException("A referring host is required to create a realm.");
 		}
-		if (StringUtils.isBlank(accountSid) && StringUtils.isBlank(realmName)) {
+		if ((StringUtils.isBlank(accountSid) || accountSid.equalsIgnoreCase(WindowsAccountUtils.WINDOWS_NULL_SID)) 
+			&& StringUtils.isBlank(realmName)) {
 			throw new TskCoreException("Either an address or a name is required to create a realm.");
 		}
 		
@@ -101,6 +102,7 @@ public OsAccountRealm newWindowsRealm(String accountSid, String realmName, Host
 
 			case UNKNOWN:
 			default:
+				// NOTE: if there's a well known SID, the scope will be changed to LOCAL later. 
 				// check if the referring host already has a realm
 				boolean isHostRealmKnown = isHostRealmKnown(referringHost);
 				if (isHostRealmKnown) {
@@ -116,7 +118,8 @@ public OsAccountRealm newWindowsRealm(String accountSid, String realmName, Host
 		
 		// get windows realm address from sid
 		String realmAddr = null;
-		if (!Strings.isNullOrEmpty(accountSid)) {
+		String resolvedRealmName = WindowsAccountUtils.toWellknownEnglishRealmName(realmName);
+		if (!Strings.isNullOrEmpty(accountSid) && !accountSid.equalsIgnoreCase(WindowsAccountUtils.WINDOWS_NULL_SID)) {
 			
 			if (!WindowsAccountUtils.isWindowsUserSid(accountSid)) {
 				throw new OsAccountManager.NotUserSIDException(String.format("SID = %s is not a user SID.", accountSid ));
@@ -124,17 +127,25 @@ public OsAccountRealm newWindowsRealm(String accountSid, String realmName, Host
 			
 			realmAddr = WindowsAccountUtils.getWindowsRealmAddress(accountSid);
 			
-			// if the account is special windows account, create a local realm for it.
-			if (realmAddr.equals(WindowsAccountUtils.SPECIAL_WINDOWS_REALM_ADDR)) {
+			
+			if (WindowsAccountUtils.isWindowsWellKnownSid(accountSid)) {
+				
+				// if the sid is a Windows well known SID, create a local realm for it.
 				scopeHost = referringHost;
 				scopeConfidence = OsAccountRealm.ScopeConfidence.KNOWN;
+				
+				// if the SID is a Windows well known SID, then prefer to use the default well known name to create the realm 
+				String wellKnownRealmName = WindowsAccountUtils.getWindowsWellKnownSidRealmName(accountSid);
+				if (!StringUtils.isEmpty(wellKnownRealmName)) {
+					resolvedRealmName = wellKnownRealmName;
+				}
 			}
 		}
 		
-		String signature = makeRealmSignature(realmAddr, realmName, scopeHost);
+		String signature = makeRealmSignature(realmAddr, resolvedRealmName, scopeHost);
 		
 		// create a realm
-		return newRealm(realmName, realmAddr, signature, scopeHost, scopeConfidence);
+		return newRealm(resolvedRealmName, realmAddr, signature, scopeHost, scopeConfidence);
 	}
 	
 	/**
@@ -161,7 +172,8 @@ public Optional<OsAccountRealm> getWindowsRealm(String accountSid, String realmN
 		}
 		
 		// need at least one of the two, the addr or name to look up
-		if (Strings.isNullOrEmpty(accountSid) && Strings.isNullOrEmpty(realmName)) {
+		if ((Strings.isNullOrEmpty(accountSid) || accountSid.equalsIgnoreCase(WindowsAccountUtils.WINDOWS_NULL_SID) )
+				&& Strings.isNullOrEmpty(realmName)) {
 			throw new TskCoreException("Realm address or name is required get a realm.");
 		}
 		
@@ -192,12 +204,13 @@ Optional<OsAccountRealm> getWindowsRealm(String accountSid, String realmName, Ho
 		}
 		
 		// need at least one of the two, the addr or name to look up
-		if (StringUtils.isBlank(accountSid) && StringUtils.isBlank(realmName)) {
+		if ((StringUtils.isBlank(accountSid) || accountSid.equalsIgnoreCase(WindowsAccountUtils.WINDOWS_NULL_SID)) 
+				&& StringUtils.isBlank(realmName)) {
 			throw new TskCoreException("Realm address or name is required get a realm.");
 		}
 		
-		// If an accountSID is provided search for realm by addr.
-		if (!Strings.isNullOrEmpty(accountSid)) {
+		// If a non null accountSID is provided search for realm by addr.
+		if (!Strings.isNullOrEmpty(accountSid) && !accountSid.equalsIgnoreCase(WindowsAccountUtils.WINDOWS_NULL_SID)) {
 			
 			if (!WindowsAccountUtils.isWindowsUserSid(accountSid)) {
 				throw new OsAccountManager.NotUserSIDException(String.format("SID = %s is not a user SID.", accountSid ));
@@ -210,10 +223,13 @@ Optional<OsAccountRealm> getWindowsRealm(String accountSid, String realmName, Ho
 			}
 		}
 
+		// ensure we are using English names for any well known SIDs. 
+		String resolvedRealmName = WindowsAccountUtils.toWellknownEnglishRealmName(realmName);
+		
 		// No realm addr so search by name.
-		Optional<OsAccountRealm> realm = getRealmByName(realmName, referringHost, connection);
-		if (realm.isPresent() && !Strings.isNullOrEmpty(accountSid)) {
-			// If we were given an accountSID, make sure there isn't one set on the matching realm.
+		Optional<OsAccountRealm> realm = getRealmByName(resolvedRealmName, referringHost, connection);
+		if (realm.isPresent() && !Strings.isNullOrEmpty(accountSid) && !accountSid.equalsIgnoreCase(WindowsAccountUtils.WINDOWS_NULL_SID)) {
+			// If we were given a non-null accountSID, make sure there isn't one set on the matching realm.
 			// We know it won't match because the previous search by SID failed.
 			if (realm.get().getRealmAddr().isPresent()) {
 				return Optional.empty();
@@ -237,28 +253,26 @@ Optional<OsAccountRealm> getWindowsRealm(String accountSid, String realmName, Ho
 	 * @param referringHost Referring Host.
 	 * @param connection    Database connection to use.
 	 *
-	 * @return Optional with OsAccountRealm, Optional.empty if no matching realm
-	 *         is found.
+	 * @return OsRealmUpdateResult account update result. 
 	 *
 	 * @throws TskCoreException
 	 */
-	Optional<OsAccountRealm> getAndUpdateWindowsRealm(String accountSid, String realmName, Host referringHost, CaseDbConnection connection) throws TskCoreException, OsAccountManager.NotUserSIDException {
+	OsRealmUpdateResult getAndUpdateWindowsRealm(String accountSid, String realmName, Host referringHost, CaseDbConnection connection) throws TskCoreException, OsAccountManager.NotUserSIDException {
 		
 		// get realm
-		Optional<OsAccountRealm> realmOptional =  getWindowsRealm(accountSid, realmName, referringHost, connection );
-		
+		Optional<OsAccountRealm> realmOptional = getWindowsRealm(accountSid, realmName, referringHost, connection);
+
 		// if found, update it if needed
 		if (realmOptional.isPresent()) {
-			String realmAddr = StringUtils.isNotBlank(accountSid) ? WindowsAccountUtils.getWindowsRealmAddress(accountSid) : null;
+			String realmAddr = (StringUtils.isNotBlank(accountSid) && !accountSid.equalsIgnoreCase(WindowsAccountUtils.WINDOWS_NULL_SID)) ? WindowsAccountUtils.getWindowsRealmAddress(accountSid) : null;
 			OsRealmUpdateResult realmUpdateResult = updateRealm(realmOptional.get(), realmAddr, realmName, connection);
-			
-			// if realm was updated, return the updated realm
-			if (realmUpdateResult.getUpdateStatus() == OsRealmUpdateStatus.UPDATED) {
-				return realmUpdateResult.getUpdatedRealm();
-			} 
-		} 
-		
-		return realmOptional; // return the found realm as is, if any
+
+			return realmUpdateResult;
+
+		} else {
+			return new OsRealmUpdateResult(OsRealmUpdateStatus.NO_CHANGE, null);
+		}
+
 	}
 	
 	
@@ -292,6 +306,9 @@ public OsRealmUpdateResult updateRealm(OsAccountRealm realm, String realmAddr, S
 	/**
 	 * Updates the realm address and/or name, if a non blank address/name is
 	 * specified and the current address/name is blank.
+	 * 
+	 * The realm name will not be updated regardless of the value in realmName
+	 * if the passed in realm has an address equal to SPECIAL_WINDOWS_REALM_ADDR.
 	 *
 	 * @param realm      Realm to update.
 	 * @param realmAddr  Realm address, may be null if the address doesn't need
@@ -308,7 +325,8 @@ public OsRealmUpdateResult updateRealm(OsAccountRealm realm, String realmAddr, S
 	private OsRealmUpdateResult updateRealm(OsAccountRealm realm, String realmAddr, String realmName, CaseDbConnection connection) throws TskCoreException {
 
 		// need at least one of the two
-		if (StringUtils.isBlank(realmAddr) && StringUtils.isBlank(realmName)) {
+		if ( (StringUtils.isBlank(realmAddr) || realmAddr.equalsIgnoreCase(WindowsAccountUtils.WINDOWS_NULL_SID))  
+				&& StringUtils.isBlank(realmName)) {
 			throw new TskCoreException("Realm address or name is required to update realm.");
 		}
 
@@ -317,21 +335,26 @@ private OsRealmUpdateResult updateRealm(OsAccountRealm realm, String realmAddr,
 
 		db.acquireSingleUserCaseWriteLock();
 		try {
-			List<String> realmNames = realm.getRealmNames();
-			String currRealmName = realmNames.isEmpty() ? null : realmNames.get(0);	// currently there is only one name.
 			String currRealmAddr = realm.getRealmAddr().orElse(null);
 
 			// set name and address to new values only if the current value is blank and the new value isn't.		
-			if ((StringUtils.isBlank(currRealmAddr) && StringUtils.isNotBlank(realmAddr))) {
+			if ((StringUtils.isBlank(currRealmAddr) && StringUtils.isNotBlank(realmAddr) && !realmAddr.equalsIgnoreCase(WindowsAccountUtils.WINDOWS_NULL_SID))) {
 				updateRealmColumn(realm.getRealmId(), "realm_addr", realmAddr, connection);
+				currRealmAddr = realmAddr;
 				updateStatusCode = OsRealmUpdateStatus.UPDATED;
 			}
-
+			
+			List<String> realmNames = realm.getRealmNames();
+			String currRealmName = realmNames.isEmpty() ? null : realmNames.get(0);	// currently there is only one name.
+			
+			// Update realm name if:
+			//	 Current realm name is empty
+			//	 The passed in realm name is not empty
 			if (StringUtils.isBlank(currRealmName) && StringUtils.isNotBlank(realmName)) {
 				updateRealmColumn(realm.getRealmId(), "realm_name", realmName, connection);
 				updateStatusCode = OsRealmUpdateStatus.UPDATED;
 			}
-
+			
 			// if nothing is to be changed, return
 			if (updateStatusCode == OsRealmUpdateStatus.NO_CHANGE) {
 				return new OsRealmUpdateResult(updateStatusCode, realm);
@@ -493,34 +516,36 @@ Optional<OsAccountRealm> getRealmByAddr(String realmAddr, Host host, CaseDbConne
 				        + " AND realms.db_status = " + OsAccountRealm.RealmDbStatus.ACTIVE.getId()
 						+ " ORDER BY realms.scope_host_id IS NOT NULL, realms.scope_host_id";	// ensure that non null host_id is at the front
 				    
-		db.acquireSingleUserCaseReadLock();
-		try (	Statement s = connection.createStatement();
-				ResultSet rs = connection.executeQuery(s, queryString)) {
-
-			OsAccountRealm accountRealm = null;
-			if (rs.next()) {
-				Host realmHost = null;
-				long hostId = rs.getLong("scope_host_id");
-				if (!rs.wasNull()) {
-					if (host != null ) {
-						realmHost = host; // exact match on given host
-					} else {
-						realmHost = new Host(hostId, rs.getString("host_name"));
-					}
-				}
-				
-				accountRealm = new OsAccountRealm(rs.getLong("realm_id"), rs.getString("realm_name"), 
-												rs.getString("realm_addr"), rs.getString("realm_signature"), 
-												realmHost, ScopeConfidence.fromID(rs.getInt("scope_confidence")),
-												OsAccountRealm.RealmDbStatus.fromID(rs.getInt("db_status")));
-			} 
-			return Optional.ofNullable(accountRealm);
-		} catch (SQLException ex) {
-			throw new TskCoreException(String.format("Error running the realms query = %s with realmaddr = %s and host name = %s",
-					queryString, realmAddr, (host != null ? host.getName() : "Null")), ex);
-		} finally {
-			db.releaseSingleUserCaseReadLock();
-		}
+		return getRealmUsingQuery(queryString, host,  connection);
+	}
+	
+	/**
+	 * Get another realm with the given addr
+	 * that's different from the specified realm.
+	 * 
+	 * @param realm A known realm, the returned realm should be different from this. 
+	 * @param host Host for realm, may be null.
+	 * @param connection Database connection to use.
+	 * 
+	 * @return Optional with OsAccountRealm, Optional.empty if no realm found with matching real address.
+	 * 
+	 * @throws TskCoreException.
+	 */
+	Optional<OsAccountRealm> getAnotherRealmByAddr(OsAccountRealm realm, String realmAddr, Host host, CaseDbConnection connection) throws TskCoreException {
+			
+		// If the given realm has a host id, then the other realm should have the same host id
+		// If the given realm has no host id,  then the other realm should have no host id
+		String whereHostClause = realm.getScopeHost().isPresent() 
+							? " ( realms.scope_host_id = " + realm.getScopeHost().get().getHostId() + " ) " 
+							: " realms.scope_host_id IS NULL ";
+		String queryString = REALM_QUERY_STRING
+						+ " WHERE LOWER(realms.realm_addr) = LOWER('"+ realmAddr + "') "
+						+ " AND " + whereHostClause
+						+ " AND realms.id <> " + realm.getRealmId()
+				        + " AND realms.db_status = " + OsAccountRealm.RealmDbStatus.ACTIVE.getId()
+						+ " ORDER BY realms.scope_host_id IS NOT NULL, realms.scope_host_id";	// ensure that non null host_id is at the front
+				    
+		return getRealmUsingQuery(queryString, host,  connection);
 	}
 	
 	/**
@@ -546,6 +571,52 @@ Optional<OsAccountRealm> getRealmByName(String realmName, Host host, CaseDbConne
 				+ " AND realms.db_status = " + OsAccountRealm.RealmDbStatus.ACTIVE.getId()
 				+ " ORDER BY realms.scope_host_id IS NOT NULL, realms.scope_host_id";	// ensure that non null host_id are at the front
 
+		return getRealmUsingQuery(queryString, host,  connection);
+	}
+	
+	/**
+	 * Get another realm with the given name
+	 * that's different from the specified realm.
+	 * 
+	 * @param realm A known realm, the returned realm should be different from this. 
+	 * @param realmName Realm name.
+	 * @param host Host for realm, may be null.
+	 * @param connection Database connection to use.
+	 * 
+	 * @return Optional with OsAccountRealm, Optional.empty if no matching realm is found.
+	 * @throws TskCoreException.
+	 */
+	Optional<OsAccountRealm> getAnotherRealmByName(OsAccountRealm realm, String realmName, Host host, CaseDbConnection connection) throws TskCoreException {
+		
+		// If the given realm has a host id, then the other realm should have the same host id
+		// If the given realm has no host id,  then the other realm should have no host id
+		String whereHostClause = realm.getScopeHost().isPresent()
+							? " ( realms.scope_host_id = " + realm.getScopeHost().get().getHostId() + " ) " 
+							: " realms.scope_host_id IS NULL ";
+		String queryString = REALM_QUERY_STRING
+				+ " WHERE LOWER(realms.realm_name) = LOWER('" + realmName + "')"
+				+ " AND " + whereHostClause
+				+ " AND realms.id <> " + realm.getRealmId()
+				+ " AND realms.db_status = " + OsAccountRealm.RealmDbStatus.ACTIVE.getId()
+				+ " ORDER BY realms.scope_host_id IS NOT NULL, realms.scope_host_id";	// ensure that non null host_id are at the front
+
+		return getRealmUsingQuery(queryString, host,  connection);
+		
+	}
+	
+	/**
+	 * Get the realm using the given realm query. 
+	 * 
+	 * @param queryString Query string
+	 * 
+	  * @param host Host for realm, may be null.
+	 * @param connection Database connection to use.
+	 * 
+	 * @return Optional with OsAccountRealm, Optional.empty if no matching realm is found.
+	 * @throws TskCoreException 
+	 */
+	private Optional<OsAccountRealm> getRealmUsingQuery(String queryString,  Host host, CaseDbConnection connection) throws TskCoreException {
+		
 		db.acquireSingleUserCaseReadLock();
 		try (Statement s = connection.createStatement();
 				ResultSet rs = connection.executeQuery(s, queryString)) {
@@ -570,30 +641,34 @@ Optional<OsAccountRealm> getRealmByName(String realmName, Host host, CaseDbConne
 			} 
 			return Optional.ofNullable(accountRealm);
 		} catch (SQLException ex) {
-			throw new TskCoreException(String.format("Error getting account realm for with name = %s", realmName), ex);
+			throw new TskCoreException(String.format("Error getting realm using query = %s", queryString), ex);
 		} finally {
 			db.releaseSingleUserCaseReadLock();
 		}
 	}
 	
 	/**
-	 * Check is there is any realm with a host-scope and KNOWN confidence for the given host.  
+	 * Check if there is any realm with a host-scope and KNOWN confidence for the given host.  
 	 * If we can assume that a host will have only a single host-scoped realm, then you can 
 	 * assume a new realm is domain-scoped when this method returns true.  I.e. once we know
 	 * the host-scoped realm, then everything else is domain-scoped. 
 	 * 
+	 * NOTE: a host may now have several local realms for Windows well known SIDs.  
+	 *       The above assumption only holds for a non well known SID. 
+	 *       Caller must take the account SID into consideration when using this method. 
+	 * 
 	 * @param host Host for which to look for a realm.
 	 * 
 	 * @return True if there exists a a realm with the host scope matching the host. False otherwise
 	 */
+	
 	private boolean isHostRealmKnown(Host host) throws TskCoreException {
 	
 		// check if this host has a local known realm aleady, other than the special windows realm.
 		String queryString = REALM_QUERY_STRING
 				+ " WHERE realms.scope_host_id = " + host.getHostId()
 				+ " AND realms.scope_confidence = " + OsAccountRealm.ScopeConfidence.KNOWN.getId()
-				+ " AND realms.db_status = " + OsAccountRealm.RealmDbStatus.ACTIVE.getId()
-				+ " AND LOWER(realms.realm_addr) <> LOWER('"+ WindowsAccountUtils.SPECIAL_WINDOWS_REALM_ADDR + "') ";
+				+ " AND realms.db_status = " + OsAccountRealm.RealmDbStatus.ACTIVE.getId();
 
 		db.acquireSingleUserCaseReadLock();
 		try (CaseDbConnection connection = this.db.getConnection();
diff --git a/bindings/java/src/org/sleuthkit/datamodel/Pool.java b/bindings/java/src/org/sleuthkit/datamodel/Pool.java
index 2a6e712e73b78ab596db12640e3b52d01e6f2c2e..98681759fd86791e7152ca2de1a0247113d7b737 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/Pool.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/Pool.java
@@ -114,7 +114,13 @@ private long getPoolOffset(Image image) throws TskCoreException {
 		} else if (this.getParent() instanceof Volume) {
 			// If the parent is a volume, then the pool starts at the volume offset
 			Volume parent = (Volume)this.getParent();
-			return parent.getStart() * image.getSsize(); // Offset needs to be in bytes
+			if (parent.getParent() instanceof VolumeSystem) {
+				// uses block size from parent volume system
+				return parent.getStart() * ((VolumeSystem) parent.getParent()).getBlockSize(); // Offset needs to be in bytes
+			} else {
+				// uses sector size from parent image (old behavior fallback)
+				return parent.getStart() * image.getSsize(); // Offset needs to be in bytes
+			}
 		}
 		throw new TskCoreException("Pool with object ID " + this.getId() + " does not have Image or Volume parent");
 	}
@@ -124,6 +130,7 @@ public void close() {
 		// Pools will be closed during case closing by the JNI code.
 	}
 
+	@SuppressWarnings("deprecation")
 	@Override
 	protected void finalize() throws Throwable {
 		try {
diff --git a/bindings/java/src/org/sleuthkit/datamodel/Report.java b/bindings/java/src/org/sleuthkit/datamodel/Report.java
index 51d6c930e75826befbcb324e33bd120d0f145d7a..afc07dfacab807eb7cb53bf95444dc33279e396a 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/Report.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/Report.java
@@ -254,9 +254,12 @@ public BlackboardArtifact newArtifact(int artifactTypeID) throws TskCoreExceptio
 
 	@Override
 	public AnalysisResultAdded newAnalysisResult(BlackboardArtifact.Type artifactType, Score score, String conclusion, String configuration, String justification, Collection<BlackboardAttribute> attributesList) throws TskCoreException {
+		// Get the data source before opening the transaction
+		long dataSourceObjId = getDataSource().getId();
+		
 		CaseDbTransaction trans = db.beginTransaction();
 		try {
-			AnalysisResultAdded resultAdded = db.getBlackboard().newAnalysisResult(artifactType, objectId, this.getDataSource().getId(), score, conclusion, configuration, justification, attributesList, trans);
+			AnalysisResultAdded resultAdded = db.getBlackboard().newAnalysisResult(artifactType, objectId, dataSourceObjId, score, conclusion, configuration, justification, attributesList, trans);
 
 			trans.commit();
 			return resultAdded;
@@ -283,7 +286,8 @@ public AnalysisResultAdded newAnalysisResult(BlackboardArtifact.Type artifactTyp
 	@Override
 	public DataArtifact newDataArtifact(BlackboardArtifact.Type artifactType, Collection<BlackboardAttribute> attributesList, Long osAccountId) throws TskCoreException {
 
-		if (artifactType.getTypeID() != BlackboardArtifact.ARTIFACT_TYPE.TSK_KEYWORD_HIT.getTypeID()) {
+		if (artifactType.getTypeID() != BlackboardArtifact.ARTIFACT_TYPE.TSK_KEYWORD_HIT.getTypeID() &&
+				artifactType.getTypeID() != BlackboardArtifact.ARTIFACT_TYPE.TSK_ACCOUNT.getTypeID()) {
 			throw new TskCoreException("Reports can only have keyword hit artifacts.");
 		}
 		
@@ -293,10 +297,10 @@ public DataArtifact newDataArtifact(BlackboardArtifact.Type artifactType, Collec
 	@Override
 	public DataArtifact newDataArtifact(BlackboardArtifact.Type artifactType, Collection<BlackboardAttribute> attributesList, Long osAccountId, long dataSourceId) throws TskCoreException {
 
-		if (artifactType.getTypeID() != BlackboardArtifact.ARTIFACT_TYPE.TSK_KEYWORD_HIT.getTypeID()) {
+		if (artifactType.getTypeID() != BlackboardArtifact.ARTIFACT_TYPE.TSK_KEYWORD_HIT.getTypeID() &&
+				artifactType.getTypeID() != BlackboardArtifact.ARTIFACT_TYPE.TSK_ACCOUNT.getTypeID()) {
 			throw new TskCoreException("Reports can only have keyword hit artifacts.");
-		}
-		
+		}		
 		return db.getBlackboard().newDataArtifact(artifactType, objectId, dataSourceId, attributesList, osAccountId);
 	}
 
@@ -314,7 +318,7 @@ public BlackboardArtifact newArtifact(BlackboardArtifact.ARTIFACT_TYPE type) thr
 
 	@Override
 	public ArrayList<BlackboardArtifact> getArtifacts(String artifactTypeName) throws TskCoreException {
-		return getArtifacts(db.getArtifactType(artifactTypeName).getTypeID());
+		return getArtifacts(db.getBlackboard().getArtifactType(artifactTypeName).getTypeID());
 	}
 
 	@Override
@@ -380,7 +384,7 @@ public Set<String> getHashSetNames() throws TskCoreException {
 
 	@Override
 	public long getArtifactsCount(String artifactTypeName) throws TskCoreException {
-		return getArtifactsCount(db.getArtifactType(artifactTypeName).getTypeID());
+		return getArtifactsCount(db.getBlackboard().getArtifactType(artifactTypeName).getTypeID());
 	}
 
 	@Override
diff --git a/bindings/java/src/org/sleuthkit/datamodel/SlackFile.java b/bindings/java/src/org/sleuthkit/datamodel/SlackFile.java
index 8cf9407e5c3209787c76fbf27684b59824041c33..f53f63aa79748cf033fa0c667342ccf031e6504f 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/SlackFile.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/SlackFile.java
@@ -1,7 +1,7 @@
 /*
  * SleuthKit Java Bindings
  *
- * Copyright 2011-2017 Basis Technology Corp.
+ * Copyright 2011-2022 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -70,6 +70,8 @@ public class SlackFile extends FsContent {
 	 * @param gid                The GID for the file.
 	 * @param md5Hash            The MD5 hash of the file, null if not yet
 	 *                           calculated.
+	 * @param sha256Hash         sha256 hash of the file, or null if not present
+	 * @param sha1Hash           SHA-1 hash of the file, or null if not present
 	 * @param knownState         The known state of the file from a hash
 	 *                           database lookup, null if not yet looked up.
 	 * @param parentPath         The path of the parent of the file.
@@ -94,11 +96,12 @@ public class SlackFile extends FsContent {
 			long size,
 			long ctime, long crtime, long atime, long mtime,
 			short modes, int uid, int gid,
-			String md5Hash, String sha256Hash, FileKnown knownState, String parentPath, String mimeType,
+			String md5Hash, String sha256Hash, String sha1Hash, 
+			FileKnown knownState, String parentPath, String mimeType,
 			String extension,
 			String ownerUid,
 			Long osAccountObjId) {
-		super(db, objId, dataSourceObjectId, fsObjId, attrType, attrId, name, TskData.TSK_DB_FILES_TYPE_ENUM.SLACK, metaAddr, metaSeq, dirType, metaType, dirFlag, metaFlags, size, ctime, crtime, atime, mtime, modes, uid, gid, md5Hash, sha256Hash, knownState, parentPath, mimeType, extension, ownerUid, osAccountObjId, Collections.emptyList());
+		super(db, objId, dataSourceObjectId, fsObjId, attrType, attrId, name, TskData.TSK_DB_FILES_TYPE_ENUM.SLACK, metaAddr, metaSeq, dirType, metaType, dirFlag, metaFlags, size, ctime, crtime, atime, mtime, modes, uid, gid, md5Hash, sha256Hash, sha1Hash, knownState, parentPath, mimeType, extension, ownerUid, osAccountObjId, TskData.CollectedStatus.UNKNOWN, Collections.emptyList());
 	}
 
 	/**
@@ -114,7 +117,7 @@ public class SlackFile extends FsContent {
 	 */
 	@Override
 	@SuppressWarnings("deprecation")
-	protected int readInt(byte[] buf, long offset, long len) throws TskCoreException {
+	protected synchronized int readInt(byte[] buf, long offset, long len) throws TskCoreException {
 		if (offset == 0 && size == 0) {
 			//special case for 0-size file
 			return 0;
diff --git a/bindings/java/src/org/sleuthkit/datamodel/SleuthkitCase.java b/bindings/java/src/org/sleuthkit/datamodel/SleuthkitCase.java
index 32326ed21baf4cf23831a6bf61519d889e7375bd..5cbde51eccbbba7ca93bb8d4c8546e867ddfea01 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/SleuthkitCase.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/SleuthkitCase.java
@@ -18,6 +18,7 @@
  */
 package org.sleuthkit.datamodel;
 
+import com.google.common.annotations.Beta;
 import com.google.common.cache.Cache;
 import com.google.common.cache.CacheBuilder;
 import com.google.common.collect.ImmutableSet;
@@ -51,6 +52,8 @@
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
 import java.util.Date;
 import java.util.EnumMap;
 import java.util.HashMap;
@@ -64,7 +67,6 @@
 import java.util.ResourceBundle;
 import java.util.Set;
 import java.util.UUID;
-import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.logging.Level;
@@ -72,6 +74,7 @@
 import java.util.stream.Collectors;
 import org.apache.commons.lang3.StringUtils;
 import org.postgresql.util.PSQLState;
+import org.sleuthkit.datamodel.Blackboard.BlackboardException;
 import org.sleuthkit.datamodel.BlackboardArtifact.ARTIFACT_TYPE;
 import org.sleuthkit.datamodel.BlackboardArtifact.Category;
 import org.sleuthkit.datamodel.BlackboardAttribute.ATTRIBUTE_TYPE;
@@ -79,6 +82,7 @@
 import org.sleuthkit.datamodel.IngestJobInfo.IngestJobStatusType;
 import org.sleuthkit.datamodel.IngestModuleInfo.IngestModuleType;
 import org.sleuthkit.datamodel.SleuthkitJNI.CaseDbHandle.AddImageProcess;
+import org.sleuthkit.datamodel.TimelineManager.TimelineEventAddedEvent;
 import org.sleuthkit.datamodel.TskData.DbType;
 import org.sleuthkit.datamodel.TskData.FileKnown;
 import org.sleuthkit.datamodel.TskData.ObjectType;
@@ -100,12 +104,8 @@ public class SleuthkitCase {
 
 	private static final int MAX_DB_NAME_LEN_BEFORE_TIMESTAMP = 47;
 
-	/**
-	 * This must be the same as TSK_SCHEMA_VER and TSK_SCHEMA_MINOR_VER in
-	 * tsk/auto/tsk_db.h.
-	 */
 	static final CaseDbSchemaVersionNumber CURRENT_DB_SCHEMA_VERSION
-			= new CaseDbSchemaVersionNumber(9, 1);
+			= new CaseDbSchemaVersionNumber(9, 4);
 
 	private static final long BASE_ARTIFACT_ID = Long.MIN_VALUE; // Artifact ids will start at the lowest negative value
 	private static final Logger logger = Logger.getLogger(SleuthkitCase.class.getName());
@@ -117,7 +117,6 @@ public class SleuthkitCase {
 	private static final String SQL_ERROR_RESOURCE_GROUP = "53";
 	private static final String SQL_ERROR_LIMIT_GROUP = "54";
 	private static final String SQL_ERROR_INTERNAL_GROUP = "xx";
-	private static final int MIN_USER_DEFINED_TYPE_ID = 10000;
 
 	private static final Set<String> CORE_TABLE_NAMES = ImmutableSet.of(
 			"tsk_events",
@@ -144,6 +143,7 @@ public class SleuthkitCase {
 			"blackboard_attribute_types",
 			"data_source_info",
 			"file_encoding_types",
+			"file_collection_status_types",
 			"ingest_module_types",
 			"ingest_job_status_types",
 			"ingest_modules",
@@ -184,7 +184,8 @@ public class SleuthkitCase {
 
 	private final ConnectionPool connections;
 	private final Object carvedFileDirsLock = new Object();
-	private final Map<Long, VirtualDirectory> rootIdsToCarvedFileDirs = new HashMap<>();
+	private final static int MAX_CARVED_FILES_PER_FOLDER = 2000;
+	private final Map<Long, CarvedFileDirInfo> rootIdsToCarvedFileDirs = new HashMap<>();
 	private final Map<Long, FileSystem> fileSystemIdMap = new HashMap<>(); // Cache for file system files.
 	private final List<ErrorObserver> sleuthkitCaseErrorObservers = new ArrayList<>();
 	private final String databaseName;
@@ -194,10 +195,8 @@ public class SleuthkitCase {
 	private SleuthkitJNI.CaseDbHandle caseHandle;
 	private final String caseHandleIdentifier; // Used to identify this case in the JNI cache.
 	private String dbBackupPath;
-	private Map<Integer, BlackboardArtifact.Type> typeIdToArtifactTypeMap;
-	private Map<Integer, BlackboardAttribute.Type> typeIdToAttributeTypeMap;
-	private Map<String, BlackboardArtifact.Type> typeNameToArtifactTypeMap;
-	private Map<String, BlackboardAttribute.Type> typeNameToAttributeTypeMap;
+	private AtomicBoolean timelineEventsDisabled = new AtomicBoolean(false);
+
 	private CaseDbSchemaVersionNumber caseDBSchemaCreationVersion;
 
 	// Objects for caching the result of isRootDirectory(). Lock is for visibility only.
@@ -205,7 +204,9 @@ public class SleuthkitCase {
 	private final Map<RootDirectoryKey, Long> rootDirectoryMap = new HashMap<>();
 	private final Cache<Long, Boolean> isRootDirectoryCache
 			= CacheBuilder.newBuilder().maximumSize(200000).expireAfterAccess(5, TimeUnit.MINUTES).build();
-
+	// custom provider for file bytes (can be null)
+	private final ContentStreamProvider contentProvider;
+	
 	/*
 	 * First parameter is used to specify the SparseBitSet to use, as object IDs
 	 * can be larger than the max size of a SparseBitSet
@@ -334,10 +335,11 @@ public static void tryConnect(CaseDbConnectionInfo info) throws TskCoreException
 	 * @param caseHandle A handle to a case database object in the native code
 	 *                   SleuthKit layer.
 	 * @param dbType     The type of database we're dealing with
+	 * @param contentProvider Custom provider for file content (can be null).
 	 *
 	 * @throws Exception
 	 */
-	private SleuthkitCase(String dbPath, SleuthkitJNI.CaseDbHandle caseHandle, DbType dbType) throws Exception {
+	private SleuthkitCase(String dbPath, SleuthkitJNI.CaseDbHandle caseHandle, DbType dbType, ContentStreamProvider contentProvider) throws Exception {
 		Class.forName("org.sqlite.JDBC");
 		this.dbPath = dbPath;
 		this.dbType = dbType;
@@ -347,6 +349,7 @@ private SleuthkitCase(String dbPath, SleuthkitJNI.CaseDbHandle caseHandle, DbTyp
 		this.connections = new SQLiteConnections(dbPath);
 		this.caseHandle = caseHandle;
 		this.caseHandleIdentifier = caseHandle.getCaseDbIdentifier();
+		this.contentProvider = contentProvider;
 		init();
 		logSQLiteJDBCDriverInfo();
 	}
@@ -365,10 +368,11 @@ private SleuthkitCase(String dbPath, SleuthkitJNI.CaseDbHandle caseHandle, DbTyp
 	 * @param dbType      The type of database we're dealing with SleuthKit
 	 *                    layer.
 	 * @param caseDirPath The path to the root case directory.
+	 * @param contentProvider Custom provider for file content (can be null).
 	 *
 	 * @throws Exception
 	 */
-	private SleuthkitCase(String host, int port, String dbName, String userName, String password, SleuthkitJNI.CaseDbHandle caseHandle, String caseDirPath, DbType dbType) throws Exception {
+	private SleuthkitCase(String host, int port, String dbName, String userName, String password, SleuthkitJNI.CaseDbHandle caseHandle, String caseDirPath, DbType dbType, ContentStreamProvider contentProvider) throws Exception {
 		this.dbPath = "";
 		this.databaseName = dbName;
 		this.dbType = dbType;
@@ -376,35 +380,27 @@ private SleuthkitCase(String host, int port, String dbName, String userName, Str
 		this.connections = new PostgreSQLConnections(host, port, dbName, userName, password);
 		this.caseHandle = caseHandle;
 		this.caseHandleIdentifier = caseHandle.getCaseDbIdentifier();
+		this.contentProvider = contentProvider;
 		init();
 	}
 
 	private void init() throws Exception {
-		typeIdToArtifactTypeMap = new ConcurrentHashMap<>();
-		typeIdToAttributeTypeMap = new ConcurrentHashMap<>();
-		typeNameToArtifactTypeMap = new ConcurrentHashMap<>();
-		typeNameToAttributeTypeMap = new ConcurrentHashMap<>();
-
-		/*
-		 * The database schema must be updated before loading blackboard
-		 * artifact/attribute types
-		 */
+		blackboard = new Blackboard(this);
 		updateDatabaseSchema(null);
-		initBlackboardArtifactTypes();
-		initBlackboardAttributeTypes();
-		initNextArtifactId();
-
 		try (CaseDbConnection connection = connections.getConnection()) {
+			blackboard.initBlackboardArtifactTypes(connection);
+			blackboard.initBlackboardAttributeTypes(connection);
+			initNextArtifactId(connection);
 			initIngestModuleTypes(connection);
 			initIngestStatusTypes(connection);
 			initReviewStatuses(connection);
 			initEncodingTypes(connection);
+			initCollectedStatusTypes(connection);
 			populateHasChildrenMap(connection);
 			updateExaminers(connection);
 			initDBSchemaCreationVersion(connection);
 		}
 
-		blackboard = new Blackboard(this);
 		fileManager = new FileManager(this);
 		communicationsMgr = new CommunicationsManager(this);
 		timelineMgr = new TimelineManager(this);
@@ -417,6 +413,17 @@ private void init() throws Exception {
 		personManager = new PersonManager(this);
 		hostAddressManager = new HostAddressManager(this);
 	}
+	
+	/**
+	 * Returns the custom content provider for this case if one exists.
+	 * Otherwise, returns null.
+	 *
+	 * @return The custom content provider for this case if one exists.
+	 *         Otherwise, returns null.
+	 */
+	ContentStreamProvider getContentProvider() {
+		return this.contentProvider;
+	}
 
 	/**
 	 * Returns a set of core table names in the SleuthKit Case database.
@@ -424,7 +431,7 @@ private void init() throws Exception {
 	 * @return set of core table names
 	 */
 	static Set<String> getCoreTableNames() {
-		return CORE_TABLE_NAMES;
+		return Collections.unmodifiableSet(CORE_TABLE_NAMES);
 	}
 
 	/**
@@ -433,7 +440,7 @@ static Set<String> getCoreTableNames() {
 	 * @return set of core index names
 	 */
 	static Set<String> getCoreIndexNames() {
-		return CORE_INDEX_NAMES;
+		return Collections.unmodifiableSet(CORE_INDEX_NAMES);
 	}
 
 	/**
@@ -496,10 +503,10 @@ public CommunicationsManager getCommunicationsManager() throws TskCoreException
 	public Blackboard getBlackboard() {
 		return blackboard;
 	}
-	
+
 	/**
 	 * Gets the file manager for this case.
-	 * 
+	 *
 	 * @return The per case FileManager object.
 	 */
 	public FileManager getFileManager() {
@@ -604,99 +611,24 @@ public HostAddressManager getHostAddressManager() throws TskCoreException {
 	}
 
 	/**
-	 * Make sure the predefined artifact types are in the artifact types table.
-	 *
-	 * @throws SQLException
-	 * @throws TskCoreException
-	 */
-	private void initBlackboardArtifactTypes() throws SQLException, TskCoreException {
-		acquireSingleUserCaseWriteLock();
-		try (CaseDbConnection connection = connections.getConnection();
-			Statement statement = connection.createStatement();) {
-			for (ARTIFACT_TYPE type : ARTIFACT_TYPE.values()) {
-				try {
-					statement.execute("INSERT INTO blackboard_artifact_types (artifact_type_id, type_name, display_name, category_type) VALUES (" + type.getTypeID() + " , '" + type.getLabel() + "', '" + type.getDisplayName() + "' , " + type.getCategory().getID() + ")"); //NON-NLS
-				} catch (SQLException ex) {
-					try (ResultSet resultSet = connection.executeQuery(statement, "SELECT COUNT(*) AS count FROM blackboard_artifact_types WHERE artifact_type_id = '" + type.getTypeID() + "'")) { //NON-NLS
-						resultSet.next();
-						if (resultSet.getLong("count") == 0) {
-							throw ex;
-						}
-					}
-				}
-				this.typeIdToArtifactTypeMap.put(type.getTypeID(), new BlackboardArtifact.Type(type));
-				this.typeNameToArtifactTypeMap.put(type.getLabel(), new BlackboardArtifact.Type(type));
-			}
-			if (dbType == DbType.POSTGRESQL) {
-				int newPrimaryKeyIndex = Collections.max(Arrays.asList(ARTIFACT_TYPE.values())).getTypeID() + 1;
-				statement.execute("ALTER SEQUENCE blackboard_artifact_types_artifact_type_id_seq RESTART WITH " + newPrimaryKeyIndex); //NON-NLS
-			}
-		} finally {
-			releaseSingleUserCaseWriteLock();
-		}
-	}
-
-	/**
-	 * Make sure the predefined artifact attribute types are in the artifact
-	 * attribute types table.
-	 *
-	 * @throws SQLException
-	 * @throws TskCoreException
-	 */
-	private void initBlackboardAttributeTypes() throws SQLException, TskCoreException {
-		acquireSingleUserCaseWriteLock();
-		try (CaseDbConnection connection = connections.getConnection();
-			Statement statement = connection.createStatement();) {
-			for (ATTRIBUTE_TYPE type : ATTRIBUTE_TYPE.values()) {
-				try {
-					statement.execute("INSERT INTO blackboard_attribute_types (attribute_type_id, type_name, display_name, value_type) VALUES (" + type.getTypeID() + ", '" + type.getLabel() + "', '" + type.getDisplayName() + "', '" + type.getValueType().getType() + "')"); //NON-NLS
-				} catch (SQLException ex) {
-					try (ResultSet resultSet = connection.executeQuery(statement, "SELECT COUNT(*) AS count FROM blackboard_attribute_types WHERE attribute_type_id = '" + type.getTypeID() + "'")) { //NON-NLS
-						resultSet.next();
-						if (resultSet.getLong("count") == 0) {
-							throw ex;
-						}
-					}
-				}
-				this.typeIdToAttributeTypeMap.put(type.getTypeID(), new BlackboardAttribute.Type(type));
-				this.typeNameToAttributeTypeMap.put(type.getLabel(), new BlackboardAttribute.Type(type));
-			}
-			if (this.dbType == DbType.POSTGRESQL) {
-				int newPrimaryKeyIndex = Collections.max(Arrays.asList(ATTRIBUTE_TYPE.values())).getTypeID() + 1;
-				statement.execute("ALTER SEQUENCE blackboard_attribute_types_attribute_type_id_seq RESTART WITH " + newPrimaryKeyIndex); //NON-NLS
-			}
-		} finally {
-			releaseSingleUserCaseWriteLock();
-		}
-	}
-
-	/**
-	 * Initialize the next artifact id. If there are entries in the
+	 * Initializes the next artifact id. If there are entries in the
 	 * blackboard_artifacts table we will use max(artifact_id) + 1 otherwise we
 	 * will initialize the value to 0x8000000000000000 (the maximum negative
 	 * signed long).
 	 *
-	 * @throws SQLException
-	 * @throws TskCoreException
+	 * @throws SQLException Thrown if there is an error querying the
+	 *                      blackboard_artifacts table.
 	 */
-	private void initNextArtifactId() throws SQLException, TskCoreException {
-		CaseDbConnection connection = null;
-		Statement statement = null;
-		ResultSet resultSet = null;
+	private void initNextArtifactId(CaseDbConnection connection) throws SQLException {
 		acquireSingleUserCaseReadLock();
-		try {
-			connection = connections.getConnection();
-			statement = connection.createStatement();
-			resultSet = connection.executeQuery(statement, "SELECT MAX(artifact_id) AS max_artifact_id FROM blackboard_artifacts"); //NON-NLS
+		try (Statement statement = connection.createStatement()) {
+			ResultSet resultSet = connection.executeQuery(statement, "SELECT MAX(artifact_id) AS max_artifact_id FROM blackboard_artifacts"); //NON-NLS
 			resultSet.next();
-			this.nextArtifactId = resultSet.getLong("max_artifact_id") + 1;
-			if (this.nextArtifactId == 1) {
-				this.nextArtifactId = BASE_ARTIFACT_ID;
+			nextArtifactId = resultSet.getLong("max_artifact_id") + 1;
+			if (nextArtifactId == 1) {
+				nextArtifactId = BASE_ARTIFACT_ID;
 			}
 		} finally {
-			closeResultSet(resultSet);
-			closeStatement(statement);
-			closeConnection(connection);
 			releaseSingleUserCaseReadLock();
 		}
 	}
@@ -716,7 +648,11 @@ private void initIngestModuleTypes(CaseDbConnection connection) throws SQLExcept
 			statement = connection.createStatement();
 			for (IngestModuleType type : IngestModuleType.values()) {
 				try {
-					statement.execute("INSERT INTO ingest_module_types (type_id, type_name) VALUES (" + type.ordinal() + ", '" + type.toString() + "');"); //NON-NLS
+					String query = "INSERT INTO ingest_module_types (type_id, type_name) VALUES (" + type.ordinal() + ", '" + type.toString() + "')"; // NON-NLS
+					if (getDatabaseType().equals(DbType.POSTGRESQL)) {
+						query += " ON CONFLICT ON CONSTRAINT ingest_module_types_pkey DO NOTHING"; // NON-NLS
+					}
+					statement.execute(query);
 				} catch (SQLException ex) {
 					resultSet = connection.executeQuery(statement, "SELECT COUNT(*) as count FROM ingest_module_types WHERE type_id = " + type.ordinal() + ";"); //NON-NLS
 					resultSet.next();
@@ -749,7 +685,11 @@ private void initIngestStatusTypes(CaseDbConnection connection) throws SQLExcept
 			statement = connection.createStatement();
 			for (IngestJobStatusType type : IngestJobStatusType.values()) {
 				try {
-					statement.execute("INSERT INTO ingest_job_status_types (type_id, type_name) VALUES (" + type.ordinal() + ", '" + type.toString() + "');"); //NON-NLS
+					String query = "INSERT INTO ingest_job_status_types (type_id, type_name) VALUES (" + type.ordinal() + ", '" + type.toString() + "')"; // NON-NLS
+					if (getDatabaseType().equals(DbType.POSTGRESQL)) {
+						query += " ON CONFLICT ON CONSTRAINT ingest_job_status_types_pkey DO NOTHING"; // NON-NLS
+					}
+					statement.execute(query);
 				} catch (SQLException ex) {
 					resultSet = connection.executeQuery(statement, "SELECT COUNT(*) as count FROM ingest_job_status_types WHERE type_id = " + type.ordinal() + ";"); //NON-NLS
 					resultSet.next();
@@ -781,8 +721,12 @@ private void initReviewStatuses(CaseDbConnection connection) throws SQLException
 			statement = connection.createStatement();
 			for (BlackboardArtifact.ReviewStatus status : BlackboardArtifact.ReviewStatus.values()) {
 				try {
-					statement.execute("INSERT INTO review_statuses (review_status_id, review_status_name, display_name) " //NON-NLS
-							+ "VALUES (" + status.getID() + ",'" + status.getName() + "','" + status.getDisplayName() + "')"); //NON-NLS
+					String query = "INSERT INTO review_statuses (review_status_id, review_status_name, display_name) " //NON-NLS
+							+ "VALUES (" + status.getID() + ",'" + status.getName() + "','" + status.getDisplayName() + "')";
+					if (getDatabaseType().equals(DbType.POSTGRESQL)) {
+						query += " ON CONFLICT ON CONSTRAINT review_statuses_pkey DO NOTHING"; // NON-NLS
+					}
+					statement.execute(query);
 				} catch (SQLException ex) {
 					resultSet = connection.executeQuery(statement, "SELECT COUNT(*) as count FROM review_statuses WHERE review_status_id = " + status.getID()); //NON-NLS
 					resultSet.next();
@@ -815,7 +759,11 @@ private void initEncodingTypes(CaseDbConnection connection) throws SQLException,
 			statement = connection.createStatement();
 			for (TskData.EncodingType type : TskData.EncodingType.values()) {
 				try {
-					statement.execute("INSERT INTO file_encoding_types (encoding_type, name) VALUES (" + type.getType() + " , '" + type.name() + "')"); //NON-NLS
+					String query = "INSERT INTO file_encoding_types (encoding_type, name) VALUES (" + type.getType() + " , '" + type.name() + "')"; // NON-NLS
+					if (getDatabaseType().equals(DbType.POSTGRESQL)) {
+						query += " ON CONFLICT ON CONSTRAINT file_encoding_types_pkey DO NOTHING"; // NON-NLS
+					}
+					statement.execute(query);
 				} catch (SQLException ex) {
 					resultSet = connection.executeQuery(statement, "SELECT COUNT(*) as count FROM file_encoding_types WHERE encoding_type = " + type.getType()); //NON-NLS
 					resultSet.next();
@@ -833,6 +781,43 @@ private void initEncodingTypes(CaseDbConnection connection) throws SQLException,
 		}
 	}
 
+	/**
+	 * Put the collected status types into the table. This must be called after the
+	 * database upgrades or the file_collection_status_types table will not exist.
+	 *
+	 * @throws SQLException
+	 * @throws TskCoreException
+	 */
+	private void initCollectedStatusTypes(CaseDbConnection connection) throws SQLException, TskCoreException {
+		Statement statement = null;
+		ResultSet resultSet = null;
+		acquireSingleUserCaseWriteLock();
+		try {
+			statement = connection.createStatement();
+			for (TskData.CollectedStatus type : TskData.CollectedStatus.values()) {
+				try {
+					String query = "INSERT INTO file_collection_status_types (collection_status_type, name) VALUES (" + type.getType() + " , '" + type.name() + "')"; // NON-NLS
+					if (getDatabaseType().equals(DbType.POSTGRESQL)) {
+						query += " ON CONFLICT ON CONSTRAINT file_collection_status_types_pkey DO NOTHING"; // NON-NLS
+					}
+					statement.execute(query);
+				} catch (SQLException ex) {
+					resultSet = connection.executeQuery(statement, "SELECT COUNT(*) as count FROM file_collection_status_types WHERE collection_status_type = " + type.getType()); //NON-NLS
+					resultSet.next();
+					if (resultSet.getLong("count") == 0) {
+						throw ex;
+					}
+					resultSet.close();
+					resultSet = null;
+				}
+			}
+		} finally {
+			closeResultSet(resultSet);
+			closeStatement(statement);
+			releaseSingleUserCaseWriteLock();
+		}
+	}
+
 	/**
 	 * Records the current examiner name in the tsk_examiners table
 	 *
@@ -1006,6 +991,11 @@ private void updateDatabaseSchema(String dbPath) throws Exception {
 				dbSchemaVersion = updateFromSchema8dot5toSchema8dot6(dbSchemaVersion, connection);
 				dbSchemaVersion = updateFromSchema8dot6toSchema9dot0(dbSchemaVersion, connection);
 				dbSchemaVersion = updateFromSchema9dot0toSchema9dot1(dbSchemaVersion, connection);
+				dbSchemaVersion = updateFromSchema9dot1toSchema9dot2(dbSchemaVersion, connection);
+				dbSchemaVersion = updateFromSchema9dot2toSchema9dot3(dbSchemaVersion, connection);
+				dbSchemaVersion = updateFromSchema9dot3toSchema9dot4(dbSchemaVersion, connection);
+				
+				
 
 				statement = connection.createStatement();
 				connection.executeUpdate(statement, "UPDATE tsk_db_info SET schema_ver = " + dbSchemaVersion.getMajor() + ", schema_minor_ver = " + dbSchemaVersion.getMinor()); //NON-NLS
@@ -1356,7 +1346,7 @@ private CaseDbSchemaVersionNumber updateFromSchema3toSchema4(CaseDbSchemaVersion
 			while (resultSet.next()) {
 				int attributeTypeId = resultSet.getInt("attribute_type_id");
 				String attributeLabel = resultSet.getString("type_name");
-				if (attributeTypeId < MIN_USER_DEFINED_TYPE_ID) {
+				if (attributeTypeId < Blackboard.MIN_USER_DEFINED_TYPE_ID) {
 					updateStatement.executeUpdate(
 							"UPDATE blackboard_attribute_types " //NON-NLS
 							+ "SET value_type = " + ATTRIBUTE_TYPE.fromLabel(attributeLabel).getValueType().getType() + " " //NON-NLS
@@ -2425,9 +2415,12 @@ private CaseDbSchemaVersionNumber updateFromSchema8dot6toSchema9dot0(CaseDbSchem
 			statement.execute("CREATE TABLE tsk_analysis_results (artifact_obj_id " + bigIntDataType + " PRIMARY KEY, "
 					+ "conclusion TEXT, "
 					+ "significance INTEGER NOT NULL, "
-					/* method_category was a column in a little distributed version of 9.0. 
-					 * It was renamed to priority before public release. The 9.1 upgrade code
-					 * will add the priority column. This is commented out since it was never used. */ 
+					/*
+					 * method_category was a column in a little distributed
+					 * version of 9.0. It was renamed to priority before public
+					 * release. The 9.1 upgrade code will add the priority
+					 * column. This is commented out since it was never used.
+					 */
 					// + "method_category INTEGER NOT NULL, "
 					+ "configuration TEXT, justification TEXT, "
 					+ "ignore_score INTEGER DEFAULT 0 " // boolean	
@@ -2643,7 +2636,6 @@ private CaseDbSchemaVersionNumber updateFromSchema9dot0toSchema9dot1(CaseDbSchem
 								+ "FOREIGN KEY(obj_id) REFERENCES tsk_objects(obj_id) ON DELETE CASCADE, "
 								+ "FOREIGN KEY(data_source_obj_id) REFERENCES tsk_objects(obj_id) ON DELETE CASCADE "
 								+ ")");
-						
 
 						// Copy the data
 						statement.execute("INSERT INTO temp_tsk_analysis_results(artifact_obj_id, "
@@ -2653,17 +2645,14 @@ private CaseDbSchemaVersionNumber updateFromSchema9dot0toSchema9dot1(CaseDbSchem
 								+ "data_source_obj_id, significance) "
 								+ "SELECT obj_id, data_source_obj_id, significance FROM tsk_aggregate_score");
 
-						
-						
 						// Drop the old tables
 						statement.execute("DROP TABLE tsk_analysis_results");
 						statement.execute("DROP TABLE tsk_aggregate_score");
-						
 
 						// Rename the new tables
 						statement.execute("ALTER TABLE temp_tsk_analysis_results RENAME TO tsk_analysis_results");
 						statement.execute("ALTER TABLE temp_tsk_aggregate_score RENAME TO tsk_aggregate_score");
-						
+
 					}
 					break;
 				default:
@@ -2672,12 +2661,12 @@ private CaseDbSchemaVersionNumber updateFromSchema9dot0toSchema9dot1(CaseDbSchem
 
 			// add an index on tsk_file_attributes table.
 			statement.execute("CREATE INDEX tsk_file_attributes_obj_id ON tsk_file_attributes(obj_id)");
-			
+
 			statement.execute("ALTER TABLE tsk_analysis_results ADD COLUMN priority INTEGER NOT NULL DEFAULT " + Score.Priority.NORMAL.getId());
 			statement.execute("ALTER TABLE tsk_aggregate_score ADD COLUMN priority INTEGER NOT NULL DEFAULT " + Score.Priority.NORMAL.getId());
-			
+
 			statement.execute("UPDATE blackboard_artifact_types SET category_type = 1 WHERE artifact_type_id = 16");
-			
+
 			return new CaseDbSchemaVersionNumber(9, 1);
 		} finally {
 			closeResultSet(results);
@@ -2686,6 +2675,122 @@ private CaseDbSchemaVersionNumber updateFromSchema9dot0toSchema9dot1(CaseDbSchem
 		}
 	}
 
+	/**
+	 * Upgrade the database schema from 9.1 to 9.2 This upgrade includes: -
+	 * modify the UNIQUE constraint on tsk_os_account_instances to include the
+	 * instance_type column.
+	 *
+	 * @param schemaVersion Current schema version - must be 9.1
+	 * @param connection    Database connection to use.
+	 *
+	 * @return New schema version
+	 *
+	 * @throws SQLException
+	 * @throws TskCoreException
+	 */
+	private CaseDbSchemaVersionNumber updateFromSchema9dot1toSchema9dot2(CaseDbSchemaVersionNumber schemaVersion, CaseDbConnection connection) throws SQLException, TskCoreException {
+		if (schemaVersion.getMajor() != 9) {
+			return schemaVersion;
+		}
+
+		if (schemaVersion.getMinor() != 1) {
+			return schemaVersion;
+		}
+
+		Statement updateSchemaStatement = connection.createStatement();
+		ResultSet results = null;
+		acquireSingleUserCaseWriteLock();
+		try {
+
+			String bigIntDataType = "BIGINT";
+			String primaryKeyType = "BIGSERIAL";
+
+			if (this.dbType.equals(DbType.SQLITE)) {
+				bigIntDataType = "INTEGER";
+				primaryKeyType = "INTEGER";
+			}
+
+			// In 9.2 we modified the UNIQUE constraint on tsk_os_account_instances to include instance_type column.
+			// Since SQLite does not allow to drop or alter constraints, we will create a new table, copy the data and delete the old table.
+			// Rename existing table
+			updateSchemaStatement.execute("ALTER TABLE tsk_os_account_instances RENAME TO old_tsk_os_account_instances");
+
+			// New table
+			updateSchemaStatement.execute("CREATE TABLE tsk_os_account_instances (id " + primaryKeyType + " PRIMARY KEY, "
+					+ "os_account_obj_id " + bigIntDataType + " NOT NULL, "
+					+ "data_source_obj_id " + bigIntDataType + " NOT NULL, "
+					+ "instance_type INTEGER NOT NULL, " // PerformedActionOn/ReferencedOn
+					+ "UNIQUE(os_account_obj_id, data_source_obj_id, instance_type), "
+					+ "FOREIGN KEY(os_account_obj_id) REFERENCES tsk_os_accounts(os_account_obj_id) ON DELETE CASCADE, "
+					+ "FOREIGN KEY(data_source_obj_id) REFERENCES tsk_objects(obj_id) ON DELETE CASCADE ) ");
+
+			// Copy the data from old table, order by id preserves the primary key. 
+			updateSchemaStatement.execute("INSERT INTO tsk_os_account_instances(os_account_obj_id, "
+					+ "data_source_obj_id, instance_type) SELECT os_account_obj_id, data_source_obj_id, instance_type FROM old_tsk_os_account_instances ORDER BY id ASC");
+
+			// delete old table
+			updateSchemaStatement.execute("DROP TABLE old_tsk_os_account_instances");
+
+			return new CaseDbSchemaVersionNumber(9, 2);
+		} finally {
+			closeResultSet(results);
+			closeStatement(updateSchemaStatement);
+			releaseSingleUserCaseWriteLock();
+		}
+	}
+
+	private CaseDbSchemaVersionNumber updateFromSchema9dot2toSchema9dot3(CaseDbSchemaVersionNumber schemaVersion, CaseDbConnection connection) throws SQLException, TskCoreException {
+		if (schemaVersion.getMajor() != 9) {
+			return schemaVersion;
+		}
+
+		if (schemaVersion.getMinor() != 2) {
+			return schemaVersion;
+		}
+
+		Statement statement = connection.createStatement();
+		acquireSingleUserCaseWriteLock();
+		try {
+			// add a new column 'sha1' to tsk_files
+			statement.execute("ALTER TABLE tsk_files ADD COLUMN sha1 TEXT");
+
+			
+			return new CaseDbSchemaVersionNumber(9, 3);
+
+		} finally {
+			closeStatement(statement);
+			releaseSingleUserCaseWriteLock();
+		}
+	}
+	
+	private CaseDbSchemaVersionNumber updateFromSchema9dot3toSchema9dot4(CaseDbSchemaVersionNumber schemaVersion, CaseDbConnection connection) throws SQLException, TskCoreException {
+		if (schemaVersion.getMajor() != 9) {
+			return schemaVersion;
+		}
+
+		if (schemaVersion.getMinor() != 3) {
+			return schemaVersion;
+		}
+
+		Statement statement = connection.createStatement();
+		acquireSingleUserCaseWriteLock();
+		try {
+			// Add file_collection_status_types table
+			statement.execute("CREATE TABLE file_collection_status_types (collection_status_type INTEGER PRIMARY KEY, name TEXT NOT NULL);");
+			initCollectedStatusTypes(connection);
+			
+			// add a new column 'collected' to tsk_files
+			statement.execute("ALTER TABLE tsk_files ADD COLUMN collected INTEGER NOT NULL DEFAULT " + 
+					TskData.CollectedStatus.UNKNOWN.getType() + ";");
+
+			return new CaseDbSchemaVersionNumber(9, 4);
+
+		} finally {
+			closeStatement(statement);
+			releaseSingleUserCaseWriteLock();
+		}
+	}
+
 	/**
 	 * Inserts a row for the given account type in account_types table, if one
 	 * doesn't exist.
@@ -2883,9 +2988,24 @@ public void releaseSingleUserCaseReadLock() {
 	 * @throws org.sleuthkit.datamodel.TskCoreException
 	 */
 	public static SleuthkitCase openCase(String dbPath) throws TskCoreException {
+		return openCase(dbPath, null);
+	}
+	
+	/**
+	 * Open an existing case database.
+	 *
+	 * @param dbPath Path to SQLite case database.
+	 * @param contentProvider Custom provider for file content bytes (can be null).
+	 *
+	 * @return Case database object.
+	 *
+	 * @throws org.sleuthkit.datamodel.TskCoreException
+	 */
+	@Beta
+	public static SleuthkitCase openCase(String dbPath, ContentStreamProvider provider) throws TskCoreException {
 		try {
 			final SleuthkitJNI.CaseDbHandle caseHandle = SleuthkitJNI.openCaseDb(dbPath);
-			return new SleuthkitCase(dbPath, caseHandle, DbType.SQLITE);
+			return new SleuthkitCase(dbPath, caseHandle, DbType.SQLITE, provider);
 		} catch (TskUnsupportedSchemaVersionException ex) {
 			//don't wrap in new TskCoreException
 			throw ex;
@@ -2906,6 +3026,23 @@ public static SleuthkitCase openCase(String dbPath) throws TskCoreException {
 	 * @throws TskCoreException If there is a problem opening the database.
 	 */
 	public static SleuthkitCase openCase(String databaseName, CaseDbConnectionInfo info, String caseDir) throws TskCoreException {
+		return openCase(databaseName, info, caseDir, null);
+	}
+	
+	/**
+	 * Open an existing multi-user case database.
+	 *
+	 * @param databaseName The name of the database.
+	 * @param info         Connection information for the the database.
+	 * @param caseDir      The folder where the case metadata fils is stored.
+	 * @param contentProvider Custom provider for file content bytes (can be null).
+	 *
+	 * @return A case database object.
+	 *
+	 * @throws TskCoreException If there is a problem opening the database.
+	 */
+	@Beta
+	public static SleuthkitCase openCase(String databaseName, CaseDbConnectionInfo info, String caseDir, ContentStreamProvider contentProvider) throws TskCoreException {
 		try {
 			/*
 			 * The flow of this method involves trying to open case and if
@@ -2920,7 +3057,7 @@ public static SleuthkitCase openCase(String databaseName, CaseDbConnectionInfo i
 			 * are able, but do not lose any information if unable.
 			 */
 			final SleuthkitJNI.CaseDbHandle caseHandle = SleuthkitJNI.openCaseDb(databaseName, info);
-			return new SleuthkitCase(info.getHost(), Integer.parseInt(info.getPort()), databaseName, info.getUserName(), info.getPassword(), caseHandle, caseDir, info.getDbType());
+			return new SleuthkitCase(info.getHost(), Integer.parseInt(info.getPort()), databaseName, info.getUserName(), info.getPassword(), caseHandle, caseDir, info.getDbType(), contentProvider);
 		} catch (PropertyVetoException exp) {
 			// In this case, the JDBC driver doesn't support PostgreSQL. Use the generic message here.
 			throw new TskCoreException(exp.getMessage(), exp);
@@ -2943,12 +3080,27 @@ public static SleuthkitCase openCase(String databaseName, CaseDbConnectionInfo i
 	 * @throws org.sleuthkit.datamodel.TskCoreException
 	 */
 	public static SleuthkitCase newCase(String dbPath) throws TskCoreException {
+		return newCase(dbPath, null);
+	}
+	
+	/**
+	 * Creates a new SQLite case database.
+	 *
+	 * @param dbPath Path to where SQlite case database should be created.
+	 * @param contentProvider Custom provider for file bytes (can be null).
+	 *
+	 * @return A case database object.
+	 *
+	 * @throws org.sleuthkit.datamodel.TskCoreException
+	 */
+	@Beta
+	public static SleuthkitCase newCase(String dbPath, ContentStreamProvider contentProvider) throws TskCoreException {
 		try {
 			CaseDatabaseFactory factory = new CaseDatabaseFactory(dbPath);
 			factory.createCaseDatabase();
 
 			SleuthkitJNI.CaseDbHandle caseHandle = SleuthkitJNI.openCaseDb(dbPath);
-			return new SleuthkitCase(dbPath, caseHandle, DbType.SQLITE);
+			return new SleuthkitCase(dbPath, caseHandle, DbType.SQLITE, contentProvider);
 		} catch (Exception ex) {
 			throw new TskCoreException("Failed to create case database at " + dbPath, ex);
 		}
@@ -2970,6 +3122,28 @@ public static SleuthkitCase newCase(String dbPath) throws TskCoreException {
 	 * @throws org.sleuthkit.datamodel.TskCoreException
 	 */
 	public static SleuthkitCase newCase(String caseName, CaseDbConnectionInfo info, String caseDirPath) throws TskCoreException {
+		return newCase(caseName, info, caseDirPath, null);
+	}
+	
+	
+	/**
+	 * Creates a new PostgreSQL case database.
+	 *
+	 * @param caseName    The name of the case. It will be used to create a case
+	 *                    database name that can be safely used in SQL commands
+	 *                    and will not be subject to name collisions on the case
+	 *                    database server. Use getDatabaseName to get the
+	 *                    created name.
+	 * @param info        The information to connect to the database.
+	 * @param caseDirPath The case directory path.
+	 * @param contentProvider Custom provider for file bytes (can be null).
+	 *
+	 * @return A case database object.
+	 *
+	 * @throws org.sleuthkit.datamodel.TskCoreException
+	 */
+	@Beta
+	public static SleuthkitCase newCase(String caseName, CaseDbConnectionInfo info, String caseDirPath, ContentStreamProvider contentProvider) throws TskCoreException {
 		String databaseName = createCaseDataBaseName(caseName);
 		try {
 			/**
@@ -2989,7 +3163,7 @@ public static SleuthkitCase newCase(String caseName, CaseDbConnectionInfo info,
 
 			final SleuthkitJNI.CaseDbHandle caseHandle = SleuthkitJNI.openCaseDb(databaseName, info);
 			return new SleuthkitCase(info.getHost(), Integer.parseInt(info.getPort()),
-					databaseName, info.getUserName(), info.getPassword(), caseHandle, caseDirPath, info.getDbType());
+					databaseName, info.getUserName(), info.getPassword(), caseHandle, caseDirPath, info.getDbType(), contentProvider);
 		} catch (PropertyVetoException exp) {
 			// In this case, the JDBC driver doesn't support PostgreSQL. Use the generic message here.
 			throw new TskCoreException(exp.getMessage(), exp);
@@ -3062,9 +3236,19 @@ private static String createCaseDataBaseName(String candidateDbName) {
 
 		return dbName;
 	}
-
+	
 	/**
-	 * Returns the Examiner object for currently logged in user
+	 * Disable the creation of timeline events for new files.
+	 * 
+	 * This setting is not saved to the case database.
+	 */
+	@Beta
+	public void disableTimelineEventCreation() {
+		timelineEventsDisabled.set(true);
+	}
+
+	/**
+	 * Returns the Examiner object for currently logged in user
 	 *
 	 * @return A Examiner object.
 	 *
@@ -3335,7 +3519,7 @@ public List<DataSource> getDataSources() throws TskCoreException {
 					final short metaFlags = (short) (TSK_FS_META_FLAG_ENUM.ALLOC.getValue()
 							| TSK_FS_META_FLAG_ENUM.USED.getValue());
 					String parentPath = "/"; //NON-NLS
-					dataSource = new LocalFilesDataSource(this, objectId, objectId, deviceId, dsName, dirType, metaType, dirFlag, metaFlags, timezone, null, null, FileKnown.UNKNOWN, parentPath);
+					dataSource = new LocalFilesDataSource(this, objectId, objectId, deviceId, dsName, dirType, metaType, dirFlag, metaFlags, timezone, null, null, null, FileKnown.UNKNOWN, parentPath);
 				} else {
 					/*
 					 * Data found in 'tsk_image_info', so we build an Image.
@@ -3435,7 +3619,7 @@ public DataSource getDataSource(long objectId) throws TskDataException, TskCoreE
 					final short metaFlags = (short) (TSK_FS_META_FLAG_ENUM.ALLOC.getValue()
 							| TSK_FS_META_FLAG_ENUM.USED.getValue());
 					String parentPath = "/"; //NON-NLS
-					dataSource = new LocalFilesDataSource(this, objectId, objectId, deviceId, dsName, dirType, metaType, dirFlag, metaFlags, timezone, null, null, FileKnown.UNKNOWN, parentPath);
+					dataSource = new LocalFilesDataSource(this, objectId, objectId, deviceId, dsName, dirType, metaType, dirFlag, metaFlags, timezone, null, null, null, FileKnown.UNKNOWN, parentPath);
 				} else {
 					/*
 					 * Data found in 'tsk_image_info', so we build an Image.
@@ -3486,9 +3670,14 @@ public DataSource getDataSource(long objectId) throws TskDataException, TskCoreE
 	 * @return list of blackboard artifacts.
 	 *
 	 * @throws TskCoreException
+	 *
+	 * @deprecated Use Blackboard.getArtifacts with the desired type(s) and data source(s) as arguments instead.
 	 */
+	@Deprecated
 	public ArrayList<BlackboardArtifact> getBlackboardArtifacts(int artifactTypeID) throws TskCoreException {
-		return getArtifactsHelper("blackboard_artifacts.artifact_type_id = " + artifactTypeID);
+		ArrayList<BlackboardArtifact> artifacts = new ArrayList<>();
+		artifacts.addAll(blackboard.getArtifactsByType(blackboard.getArtifactType(artifactTypeID)));
+		return artifacts;
 	}
 
 	/**
@@ -3507,7 +3696,7 @@ public long getBlackboardArtifactsCount(long objId) throws TskCoreException {
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			// SELECT COUNT(*) AS count FROM blackboard_artifacts WHERE obj_id = ?
 			PreparedStatement statement = connection.getPreparedStatement(PREPARED_STATEMENT.COUNT_ARTIFACTS_FROM_SOURCE);
 			statement.clearParameters();
@@ -3543,7 +3732,7 @@ public long getBlackboardArtifactsTypeCount(int artifactTypeID) throws TskCoreEx
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			// SELECT COUNT(*) AS count FROM blackboard_artifacts WHERE artifact_type_id = ?
 			PreparedStatement statement = connection.getPreparedStatement(PREPARED_STATEMENT.COUNT_ARTIFACTS_OF_TYPE);
 			statement.clearParameters();
@@ -3580,7 +3769,7 @@ public long getBlackboardArtifactsTypeCount(int artifactTypeID, long dataSourceI
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			// SELECT COUNT(*) AS count FROM blackboard_artifacts WHERE artifact_type_id = ?
 			PreparedStatement statement = connection.getPreparedStatement(PREPARED_STATEMENT.COUNT_ARTIFACTS_OF_TYPE_BY_DATA_SOURCE);
 			statement.clearParameters();
@@ -3614,39 +3803,47 @@ public long getBlackboardArtifactsTypeCount(int artifactTypeID, long dataSourceI
 	 * @throws TskCoreException exception thrown if a critical error occurred
 	 *                          within tsk core and artifacts could not be
 	 *                          queried
+	 *
+	 * @deprecated Do not use.
 	 */
+	@Deprecated
 	public List<BlackboardArtifact> getBlackboardArtifacts(BlackboardAttribute.ATTRIBUTE_TYPE attrType, String value) throws TskCoreException {
-		CaseDbConnection connection = null;
-		Statement s = null;
-		ResultSet rs = null;
 		acquireSingleUserCaseReadLock();
-		try {
-			connection = connections.getConnection();
-			s = connection.createStatement();
-			rs = connection.executeQuery(s, "SELECT DISTINCT arts.artifact_id AS artifact_id, " //NON-NLS
-					+ "arts.obj_id AS obj_id, arts.artifact_obj_id AS artifact_obj_id, arts.data_source_obj_id AS data_source_obj_id, arts.artifact_type_id AS artifact_type_id, "
-					+ "types.type_name AS type_name, types.display_name AS display_name, "//NON-NLS
-					+ " arts.review_status_id AS review_status_id " //NON-NLS
-					+ "FROM blackboard_artifacts AS arts, blackboard_attributes AS attrs, blackboard_artifact_types AS types " //NON-NLS
-					+ "WHERE arts.artifact_id = attrs.artifact_id " //NON-NLS
-					+ " AND attrs.attribute_type_id = " + attrType.getTypeID() //NON-NLS
-					+ " AND attrs.value_text = '" + value + "'"
-					+ " AND types.artifact_type_id=arts.artifact_type_id"
-					+ " AND arts.review_status_id !=" + BlackboardArtifact.ReviewStatus.REJECTED.getID());	 //NON-NLS
-			ArrayList<BlackboardArtifact> artifacts = new ArrayList<BlackboardArtifact>();
-			while (rs.next()) {
-				artifacts.add(new BlackboardArtifact(this, rs.getLong("artifact_id"), rs.getLong("obj_id"), rs.getLong("artifact_obj_id"),
-						rs.getObject("data_source_obj_id") != null ? rs.getLong("data_source_obj_id") : null,
-						rs.getInt("artifact_type_id"), rs.getString("type_name"), rs.getString("display_name"),
-						BlackboardArtifact.ReviewStatus.withID(rs.getInt("review_status_id"))));
+		try (CaseDbConnection connection = connections.getConnection(); Statement statement = connection.createStatement();
+				ResultSet resultSet = connection.executeQuery(statement, "SELECT DISTINCT arts.artifact_id AS artifact_id, " //NON-NLS
+						+ "arts.obj_id AS obj_id, arts.artifact_obj_id AS artifact_obj_id, arts.data_source_obj_id AS data_source_obj_id, arts.artifact_type_id AS artifact_type_id, "
+						+ "types.type_name AS type_name, types.display_name AS display_name, "//NON-NLS
+						+ " arts.review_status_id AS review_status_id " //NON-NLS
+						+ "FROM blackboard_artifacts AS arts, blackboard_attributes AS attrs, blackboard_artifact_types AS types " //NON-NLS
+						+ "WHERE arts.artifact_id = attrs.artifact_id " //NON-NLS
+						+ " AND attrs.attribute_type_id = " + attrType.getTypeID() //NON-NLS
+						+ " AND attrs.value_text = '" + value + "'"
+						+ " AND types.artifact_type_id=arts.artifact_type_id"
+						+ " AND arts.review_status_id !=" + BlackboardArtifact.ReviewStatus.REJECTED.getID());) {	 //NON-NLS
+
+			List<Long> analysisArtifactObjIds = new ArrayList<>();
+			List<Long> dataArtifactObjIds = new ArrayList<>();
+			while (resultSet.next()) {
+				BlackboardArtifact.Type type = blackboard.getArtifactType(resultSet.getInt("artifact_type_id"));
+				if (type.getCategory() == BlackboardArtifact.Category.ANALYSIS_RESULT) {
+					analysisArtifactObjIds.add(resultSet.getLong("artifact_obj_id"));
+				} else {
+					dataArtifactObjIds.add(resultSet.getLong("artifact_obj_id"));
+				}
+			}
+
+			ArrayList<BlackboardArtifact> artifacts = new ArrayList<>();
+			if (!analysisArtifactObjIds.isEmpty()) {
+				artifacts.addAll(getArtifactsForValues(BlackboardArtifact.Category.ANALYSIS_RESULT, "artifacts.artifact_obj_id", analysisArtifactObjIds, connection));
+			}
+
+			if (!dataArtifactObjIds.isEmpty()) {
+				artifacts.addAll(getArtifactsForValues(BlackboardArtifact.Category.DATA_ARTIFACT, "artifacts.artifact_obj_id", dataArtifactObjIds, connection));
 			}
 			return artifacts;
 		} catch (SQLException ex) {
 			throw new TskCoreException("Error getting blackboard artifacts by attribute", ex);
 		} finally {
-			closeResultSet(rs);
-			closeStatement(s);
-			closeConnection(connection);
 			releaseSingleUserCaseReadLock();
 		}
 	}
@@ -3667,43 +3864,50 @@ public List<BlackboardArtifact> getBlackboardArtifacts(BlackboardAttribute.ATTRI
 	 * @throws TskCoreException exception thrown if a critical error occurred
 	 *                          within tsk core and artifacts could not be
 	 *                          queried
+	 * @deprecated Do not use.
 	 */
+	@Deprecated
 	public List<BlackboardArtifact> getBlackboardArtifacts(BlackboardAttribute.ATTRIBUTE_TYPE attrType, String subString, boolean startsWith) throws TskCoreException {
 		String valSubStr = "%" + subString; //NON-NLS
 		if (startsWith == false) {
 			valSubStr += "%"; //NON-NLS
 		}
-		CaseDbConnection connection = null;
-		Statement s = null;
-		ResultSet rs = null;
+
 		acquireSingleUserCaseReadLock();
-		try {
-			connection = connections.getConnection();
-			s = connection.createStatement();
-			rs = connection.executeQuery(s, "SELECT DISTINCT arts.artifact_id AS artifact_id, " //NON-NLS
-					+ " arts.obj_id AS obj_id, arts.artifact_obj_id AS artifact_obj_id, arts.data_source_obj_id AS data_source_obj_id, arts.artifact_type_id AS artifact_type_id, " //NON-NLS
-					+ " types.type_name AS type_name, types.display_name AS display_name, " //NON-NLS
-					+ " arts.review_status_id AS review_status_id " //NON-NLS
-					+ " FROM blackboard_artifacts AS arts, blackboard_attributes AS attrs, blackboard_artifact_types AS types " //NON-NLS
-					+ " WHERE arts.artifact_id = attrs.artifact_id " //NON-NLS
-					+ " AND attrs.attribute_type_id = " + attrType.getTypeID() //NON-NLS
-					+ " AND LOWER(attrs.value_text) LIKE LOWER('" + valSubStr + "')"
-					+ " AND types.artifact_type_id=arts.artifact_type_id "
-					+ " AND arts.review_status_id !=" + BlackboardArtifact.ReviewStatus.REJECTED.getID());
-			ArrayList<BlackboardArtifact> artifacts = new ArrayList<BlackboardArtifact>();
-			while (rs.next()) {
-				artifacts.add(new BlackboardArtifact(this, rs.getLong("artifact_id"), rs.getLong("obj_id"), rs.getLong("artifact_obj_id"),
-						rs.getObject("data_source_obj_id") != null ? rs.getLong("data_source_obj_id") : null,
-						rs.getInt("artifact_type_id"), rs.getString("type_name"), rs.getString("display_name"),
-						BlackboardArtifact.ReviewStatus.withID(rs.getInt("review_status_id"))));
+		try (CaseDbConnection connection = connections.getConnection(); Statement statement = connection.createStatement();
+				ResultSet resultSet = connection.executeQuery(statement, "SELECT DISTINCT arts.artifact_id AS artifact_id, " //NON-NLS
+						+ " arts.obj_id AS obj_id, arts.artifact_obj_id AS artifact_obj_id, arts.data_source_obj_id AS data_source_obj_id, arts.artifact_type_id AS artifact_type_id, " //NON-NLS
+						+ " types.type_name AS type_name, types.display_name AS display_name, " //NON-NLS
+						+ " arts.review_status_id AS review_status_id " //NON-NLS
+						+ " FROM blackboard_artifacts AS arts, blackboard_attributes AS attrs, blackboard_artifact_types AS types " //NON-NLS
+						+ " WHERE arts.artifact_id = attrs.artifact_id " //NON-NLS
+						+ " AND attrs.attribute_type_id = " + attrType.getTypeID() //NON-NLS
+						+ " AND LOWER(attrs.value_text) LIKE LOWER('" + valSubStr + "')"
+						+ " AND types.artifact_type_id=arts.artifact_type_id "
+						+ " AND arts.review_status_id !=" + BlackboardArtifact.ReviewStatus.REJECTED.getID());) {
+			List<Long> analysisArtifactObjIds = new ArrayList<>();
+			List<Long> dataArtifactObjIds = new ArrayList<>();
+			while (resultSet.next()) {
+				BlackboardArtifact.Type type = blackboard.getArtifactType(resultSet.getInt("artifact_type_id"));
+				if (type.getCategory() == BlackboardArtifact.Category.ANALYSIS_RESULT) {
+					analysisArtifactObjIds.add(resultSet.getLong("artifact_obj_id"));
+				} else {
+					dataArtifactObjIds.add(resultSet.getLong("artifact_obj_id"));
+				}
+			}
+
+			ArrayList<BlackboardArtifact> artifacts = new ArrayList<>();
+			if (!analysisArtifactObjIds.isEmpty()) {
+				artifacts.addAll(getArtifactsForValues(BlackboardArtifact.Category.ANALYSIS_RESULT, "artifacts.artifact_obj_id", analysisArtifactObjIds, connection));
+			}
+
+			if (!dataArtifactObjIds.isEmpty()) {
+				artifacts.addAll(getArtifactsForValues(BlackboardArtifact.Category.DATA_ARTIFACT, "artifacts.artifact_obj_id", dataArtifactObjIds, connection));
 			}
 			return artifacts;
 		} catch (SQLException ex) {
 			throw new TskCoreException("Error getting blackboard artifacts by attribute. " + ex.getMessage(), ex);
 		} finally {
-			closeResultSet(rs);
-			closeStatement(s);
-			closeConnection(connection);
 			releaseSingleUserCaseReadLock();
 		}
 	}
@@ -3721,39 +3925,45 @@ public List<BlackboardArtifact> getBlackboardArtifacts(BlackboardAttribute.ATTRI
 	 * @throws TskCoreException exception thrown if a critical error occurred
 	 *                          within tsk core and artifacts could not be
 	 *                          queried
+	 * @deprecated Do not use.
 	 */
+	@Deprecated
 	public List<BlackboardArtifact> getBlackboardArtifacts(BlackboardAttribute.ATTRIBUTE_TYPE attrType, int value) throws TskCoreException {
-		CaseDbConnection connection = null;
-		Statement s = null;
-		ResultSet rs = null;
 		acquireSingleUserCaseReadLock();
-		try {
-			connection = connections.getConnection();
-			s = connection.createStatement();
-			rs = connection.executeQuery(s, "SELECT DISTINCT arts.artifact_id AS artifact_id, " //NON-NLS
-					+ " arts.obj_id AS obj_id, arts.artifact_obj_id AS artifact_obj_id, arts.data_source_obj_id AS data_source_obj_id, arts.artifact_type_id AS artifact_type_id, "
-					+ " types.type_name AS type_name, types.display_name AS display_name, "
-					+ " arts.review_status_id AS review_status_id  "//NON-NLS
-					+ " FROM blackboard_artifacts AS arts, blackboard_attributes AS attrs, blackboard_artifact_types AS types " //NON-NLS
-					+ "WHERE arts.artifact_id = attrs.artifact_id " //NON-NLS
-					+ " AND attrs.attribute_type_id = " + attrType.getTypeID() //NON-NLS
-					+ " AND attrs.value_int32 = " + value //NON-NLS
-					+ " AND types.artifact_type_id=arts.artifact_type_id "
-					+ " AND arts.review_status_id !=" + BlackboardArtifact.ReviewStatus.REJECTED.getID());
-			ArrayList<BlackboardArtifact> artifacts = new ArrayList<BlackboardArtifact>();
-			while (rs.next()) {
-				artifacts.add(new BlackboardArtifact(this, rs.getLong("artifact_id"), rs.getLong("obj_id"), rs.getLong("artifact_obj_id"),
-						rs.getObject("data_source_obj_id") != null ? rs.getLong("data_source_obj_id") : null,
-						rs.getInt("artifact_type_id"), rs.getString("type_name"), rs.getString("display_name"),
-						BlackboardArtifact.ReviewStatus.withID(rs.getInt("review_status_id"))));
+		try (CaseDbConnection connection = connections.getConnection(); Statement statement = connection.createStatement();
+				ResultSet resultSet = connection.executeQuery(statement, "SELECT DISTINCT arts.artifact_id AS artifact_id, " //NON-NLS
+						+ " arts.obj_id AS obj_id, arts.artifact_obj_id AS artifact_obj_id, arts.data_source_obj_id AS data_source_obj_id, arts.artifact_type_id AS artifact_type_id, "
+						+ " types.type_name AS type_name, types.display_name AS display_name, "
+						+ " arts.review_status_id AS review_status_id  "//NON-NLS
+						+ " FROM blackboard_artifacts AS arts, blackboard_attributes AS attrs, blackboard_artifact_types AS types " //NON-NLS
+						+ "WHERE arts.artifact_id = attrs.artifact_id " //NON-NLS
+						+ " AND attrs.attribute_type_id = " + attrType.getTypeID() //NON-NLS
+						+ " AND attrs.value_int32 = " + value //NON-NLS
+						+ " AND types.artifact_type_id=arts.artifact_type_id "
+						+ " AND arts.review_status_id !=" + BlackboardArtifact.ReviewStatus.REJECTED.getID());) {
+			List<Long> analysisArtifactObjIds = new ArrayList<>();
+			List<Long> dataArtifactObjIds = new ArrayList<>();
+			while (resultSet.next()) {
+				BlackboardArtifact.Type type = blackboard.getArtifactType(resultSet.getInt("artifact_type_id"));
+				if (type.getCategory() == BlackboardArtifact.Category.ANALYSIS_RESULT) {
+					analysisArtifactObjIds.add(resultSet.getLong("artifact_obj_id"));
+				} else {
+					dataArtifactObjIds.add(resultSet.getLong("artifact_obj_id"));
+				}
+			}
+
+			ArrayList<BlackboardArtifact> artifacts = new ArrayList<>();
+			if (!analysisArtifactObjIds.isEmpty()) {
+				artifacts.addAll(getArtifactsForValues(BlackboardArtifact.Category.ANALYSIS_RESULT, "artifacts.artifact_obj_id", analysisArtifactObjIds, connection));
+			}
+
+			if (!dataArtifactObjIds.isEmpty()) {
+				artifacts.addAll(getArtifactsForValues(BlackboardArtifact.Category.DATA_ARTIFACT, "artifacts.artifact_obj_id", dataArtifactObjIds, connection));
 			}
 			return artifacts;
 		} catch (SQLException ex) {
 			throw new TskCoreException("Error getting blackboard artifacts by attribute", ex);
 		} finally {
-			closeResultSet(rs);
-			closeStatement(s);
-			closeConnection(connection);
 			releaseSingleUserCaseReadLock();
 		}
 	}
@@ -3771,39 +3981,46 @@ public List<BlackboardArtifact> getBlackboardArtifacts(BlackboardAttribute.ATTRI
 	 * @throws TskCoreException exception thrown if a critical error occurred
 	 *                          within tsk core and artifacts could not be
 	 *                          queried
+	 *
+	 * @deprecated Do not use.
 	 */
+	@Deprecated
 	public List<BlackboardArtifact> getBlackboardArtifacts(BlackboardAttribute.ATTRIBUTE_TYPE attrType, long value) throws TskCoreException {
-		CaseDbConnection connection = null;
-		Statement s = null;
-		ResultSet rs = null;
 		acquireSingleUserCaseReadLock();
-		try {
-			connection = connections.getConnection();
-			s = connection.createStatement();
-			rs = connection.executeQuery(s, "SELECT DISTINCT arts.artifact_id AS artifact_id, " //NON-NLS
-					+ " arts.obj_id AS obj_id, arts.artifact_obj_id AS artifact_obj_id, arts.data_source_obj_id AS data_source_obj_id, arts.artifact_type_id AS artifact_type_id, "
-					+ " types.type_name AS type_name, types.display_name AS display_name, "
-					+ " arts.review_status_id AS review_status_id "//NON-NLS
-					+ " FROM blackboard_artifacts AS arts, blackboard_attributes AS attrs, blackboard_artifact_types AS types " //NON-NLS
-					+ " WHERE arts.artifact_id = attrs.artifact_id " //NON-NLS
-					+ " AND attrs.attribute_type_id = " + attrType.getTypeID() //NON-NLS
-					+ " AND attrs.value_int64 = " + value //NON-NLS
-					+ " AND types.artifact_type_id=arts.artifact_type_id "
-					+ " AND arts.review_status_id !=" + BlackboardArtifact.ReviewStatus.REJECTED.getID());
-			ArrayList<BlackboardArtifact> artifacts = new ArrayList<BlackboardArtifact>();
-			while (rs.next()) {
-				artifacts.add(new BlackboardArtifact(this, rs.getLong("artifact_id"), rs.getLong("obj_id"), rs.getLong("artifact_obj_id"),
-						rs.getObject("data_source_obj_id") != null ? rs.getLong("data_source_obj_id") : null,
-						rs.getInt("artifact_type_id"), rs.getString("type_name"), rs.getString("display_name"),
-						BlackboardArtifact.ReviewStatus.withID(rs.getInt("review_status_id"))));
+		try (CaseDbConnection connection = connections.getConnection(); Statement statement = connection.createStatement();
+				ResultSet resultSet = connection.executeQuery(statement, "SELECT DISTINCT arts.artifact_id AS artifact_id, " //NON-NLS
+						+ " arts.obj_id AS obj_id, arts.artifact_obj_id AS artifact_obj_id, arts.data_source_obj_id AS data_source_obj_id, arts.artifact_type_id AS artifact_type_id, "
+						+ " types.type_name AS type_name, types.display_name AS display_name, "
+						+ " arts.review_status_id AS review_status_id "//NON-NLS
+						+ " FROM blackboard_artifacts AS arts, blackboard_attributes AS attrs, blackboard_artifact_types AS types " //NON-NLS
+						+ " WHERE arts.artifact_id = attrs.artifact_id " //NON-NLS
+						+ " AND attrs.attribute_type_id = " + attrType.getTypeID() //NON-NLS
+						+ " AND attrs.value_int64 = " + value //NON-NLS
+						+ " AND types.artifact_type_id=arts.artifact_type_id "
+						+ " AND arts.review_status_id !=" + BlackboardArtifact.ReviewStatus.REJECTED.getID());) {
+			List<Long> analysisArtifactObjIds = new ArrayList<>();
+			List<Long> dataArtifactObjIds = new ArrayList<>();
+			while (resultSet.next()) {
+				BlackboardArtifact.Type type = blackboard.getArtifactType(resultSet.getInt("artifact_type_id"));
+				if (type.getCategory() == BlackboardArtifact.Category.ANALYSIS_RESULT) {
+					analysisArtifactObjIds.add(resultSet.getLong("artifact_obj_id"));
+				} else {
+					dataArtifactObjIds.add(resultSet.getLong("artifact_obj_id"));
+				}
+			}
+
+			ArrayList<BlackboardArtifact> artifacts = new ArrayList<>();
+			if (!analysisArtifactObjIds.isEmpty()) {
+				artifacts.addAll(getArtifactsForValues(BlackboardArtifact.Category.ANALYSIS_RESULT, "artifacts.artifact_obj_id", analysisArtifactObjIds, connection));
+			}
+
+			if (!dataArtifactObjIds.isEmpty()) {
+				artifacts.addAll(getArtifactsForValues(BlackboardArtifact.Category.DATA_ARTIFACT, "artifacts.artifact_obj_id", dataArtifactObjIds, connection));
 			}
 			return artifacts;
 		} catch (SQLException ex) {
 			throw new TskCoreException("Error getting blackboard artifacts by attribute. " + ex.getMessage(), ex);
 		} finally {
-			closeResultSet(rs);
-			closeStatement(s);
-			closeConnection(connection);
 			releaseSingleUserCaseReadLock();
 		}
 	}
@@ -3821,39 +4038,46 @@ public List<BlackboardArtifact> getBlackboardArtifacts(BlackboardAttribute.ATTRI
 	 * @throws TskCoreException exception thrown if a critical error occurred
 	 *                          within tsk core and artifacts could not be
 	 *                          queried
+	 *
+	 * @deprecated Do not use.
 	 */
+	@Deprecated
 	public List<BlackboardArtifact> getBlackboardArtifacts(BlackboardAttribute.ATTRIBUTE_TYPE attrType, double value) throws TskCoreException {
-		CaseDbConnection connection = null;
-		Statement s = null;
-		ResultSet rs = null;
 		acquireSingleUserCaseReadLock();
-		try {
-			connection = connections.getConnection();
-			s = connection.createStatement();
-			rs = connection.executeQuery(s, "SELECT DISTINCT arts.artifact_id AS artifact_id, " //NON-NLS
-					+ " arts.obj_id AS obj_id, arts.artifact_obj_id AS artifact_obj_id, arts.data_source_obj_id AS data_source_obj_id, arts.artifact_type_id AS artifact_type_id, "
-					+ " types.type_name AS type_name, types.display_name AS display_name, "
-					+ " arts.review_status_id AS review_status_id "//NON-NLS
-					+ " FROM blackboard_artifacts AS arts, blackboard_attributes AS attrs, blackboard_artifact_types AS types " //NON-NLS
-					+ " WHERE arts.artifact_id = attrs.artifact_id " //NON-NLS
-					+ " AND attrs.attribute_type_id = " + attrType.getTypeID() //NON-NLS
-					+ " AND attrs.value_double = " + value //NON-NLS
-					+ " AND types.artifact_type_id=arts.artifact_type_id "
-					+ " AND arts.review_status_id !=" + BlackboardArtifact.ReviewStatus.REJECTED.getID());
-			ArrayList<BlackboardArtifact> artifacts = new ArrayList<BlackboardArtifact>();
-			while (rs.next()) {
-				artifacts.add(new BlackboardArtifact(this, rs.getLong("artifact_id"), rs.getLong("obj_id"), rs.getLong("artifact_obj_id"),
-						rs.getObject("data_source_obj_id") != null ? rs.getLong("data_source_obj_id") : null,
-						rs.getInt("artifact_type_id"), rs.getString("type_name"), rs.getString("display_name"),
-						BlackboardArtifact.ReviewStatus.withID(rs.getInt("review_status_id"))));
+		try (CaseDbConnection connection = connections.getConnection(); Statement statement = connection.createStatement();
+				ResultSet resultSet = connection.executeQuery(statement, "SELECT DISTINCT arts.artifact_id AS artifact_id, " //NON-NLS
+						+ " arts.obj_id AS obj_id, arts.artifact_obj_id AS artifact_obj_id, arts.data_source_obj_id AS data_source_obj_id, arts.artifact_type_id AS artifact_type_id, "
+						+ " types.type_name AS type_name, types.display_name AS display_name, "
+						+ " arts.review_status_id AS review_status_id "//NON-NLS
+						+ " FROM blackboard_artifacts AS arts, blackboard_attributes AS attrs, blackboard_artifact_types AS types " //NON-NLS
+						+ " WHERE arts.artifact_id = attrs.artifact_id " //NON-NLS
+						+ " AND attrs.attribute_type_id = " + attrType.getTypeID() //NON-NLS
+						+ " AND attrs.value_double = " + value //NON-NLS
+						+ " AND types.artifact_type_id=arts.artifact_type_id "
+						+ " AND arts.review_status_id !=" + BlackboardArtifact.ReviewStatus.REJECTED.getID());) {
+			List<Long> analysisArtifactObjIds = new ArrayList<>();
+			List<Long> dataArtifactObjIds = new ArrayList<>();
+			while (resultSet.next()) {
+				BlackboardArtifact.Type type = blackboard.getArtifactType(resultSet.getInt("artifact_type_id"));
+				if (type.getCategory() == BlackboardArtifact.Category.ANALYSIS_RESULT) {
+					analysisArtifactObjIds.add(resultSet.getLong("artifact_obj_id"));
+				} else {
+					dataArtifactObjIds.add(resultSet.getLong("artifact_obj_id"));
+				}
+			}
+
+			ArrayList<BlackboardArtifact> artifacts = new ArrayList<>();
+			if (!analysisArtifactObjIds.isEmpty()) {
+				artifacts.addAll(getArtifactsForValues(BlackboardArtifact.Category.ANALYSIS_RESULT, "artifacts.artifact_obj_id", analysisArtifactObjIds, connection));
+			}
+
+			if (!dataArtifactObjIds.isEmpty()) {
+				artifacts.addAll(getArtifactsForValues(BlackboardArtifact.Category.DATA_ARTIFACT, "artifacts.artifact_obj_id", dataArtifactObjIds, connection));
 			}
 			return artifacts;
 		} catch (SQLException ex) {
 			throw new TskCoreException("Error getting blackboard artifacts by attribute", ex);
 		} finally {
-			closeResultSet(rs);
-			closeStatement(s);
-			closeConnection(connection);
 			releaseSingleUserCaseReadLock();
 		}
 	}
@@ -3871,39 +4095,47 @@ public List<BlackboardArtifact> getBlackboardArtifacts(BlackboardAttribute.ATTRI
 	 * @throws TskCoreException exception thrown if a critical error occurred
 	 *                          within tsk core and artifacts could not be
 	 *                          queried
+	 *
+	 * @deprecated Do not use.
 	 */
+	@Deprecated
 	public List<BlackboardArtifact> getBlackboardArtifacts(BlackboardAttribute.ATTRIBUTE_TYPE attrType, byte value) throws TskCoreException {
-		CaseDbConnection connection = null;
-		Statement s = null;
-		ResultSet rs = null;
+
 		acquireSingleUserCaseReadLock();
-		try {
-			connection = connections.getConnection();
-			s = connection.createStatement();
-			rs = connection.executeQuery(s, "SELECT DISTINCT arts.artifact_id AS artifact_id, " //NON-NLS
-					+ " arts.obj_id AS obj_id, arts.artifact_obj_id AS artifact_obj_id, arts.data_source_obj_id AS data_source_obj_id, arts.artifact_type_id AS artifact_type_id, "
-					+ " types.type_name AS type_name, types.display_name AS display_name, "
-					+ " arts.review_status_id AS review_status_id "//NON-NLS
-					+ " FROM blackboard_artifacts AS arts, blackboard_attributes AS attrs, blackboard_artifact_types AS types " //NON-NLS
-					+ " WHERE arts.artifact_id = attrs.artifact_id " //NON-NLS
-					+ " AND attrs.attribute_type_id = " + attrType.getTypeID() //NON-NLS
-					+ " AND attrs.value_byte = " + value //NON-NLS
-					+ " AND types.artifact_type_id=arts.artifact_type_id "
-					+ " AND arts.review_status_id !=" + BlackboardArtifact.ReviewStatus.REJECTED.getID());
-			ArrayList<BlackboardArtifact> artifacts = new ArrayList<BlackboardArtifact>();
-			while (rs.next()) {
-				artifacts.add(new BlackboardArtifact(this, rs.getLong("artifact_id"), rs.getLong("obj_id"), rs.getLong("artifact_obj_id"),
-						rs.getObject("data_source_obj_id") != null ? rs.getLong("data_source_obj_id") : null,
-						rs.getInt("artifact_type_id"), rs.getString("type_name"), rs.getString("display_name"),
-						BlackboardArtifact.ReviewStatus.withID(rs.getInt("review_status_id"))));
+		try (CaseDbConnection connection = connections.getConnection(); Statement statement = connection.createStatement();
+				ResultSet resultSet = connection.executeQuery(statement, "SELECT DISTINCT arts.artifact_id AS artifact_id, " //NON-NLS
+						+ " arts.obj_id AS obj_id, arts.artifact_obj_id AS artifact_obj_id, arts.data_source_obj_id AS data_source_obj_id, arts.artifact_type_id AS artifact_type_id, "
+						+ " types.type_name AS type_name, types.display_name AS display_name, "
+						+ " arts.review_status_id AS review_status_id "//NON-NLS
+						+ " FROM blackboard_artifacts AS arts, blackboard_attributes AS attrs, blackboard_artifact_types AS types " //NON-NLS
+						+ " WHERE arts.artifact_id = attrs.artifact_id " //NON-NLS
+						+ " AND attrs.attribute_type_id = " + attrType.getTypeID() //NON-NLS
+						+ " AND attrs.value_byte = " + value //NON-NLS
+						+ " AND types.artifact_type_id=arts.artifact_type_id "
+						+ " AND arts.review_status_id !=" + BlackboardArtifact.ReviewStatus.REJECTED.getID());) {
+			List<Long> analysisArtifactObjIds = new ArrayList<>();
+			List<Long> dataArtifactObjIds = new ArrayList<>();
+			while (resultSet.next()) {
+				BlackboardArtifact.Type type = blackboard.getArtifactType(resultSet.getInt("artifact_type_id"));
+				if (type.getCategory() == BlackboardArtifact.Category.ANALYSIS_RESULT) {
+					analysisArtifactObjIds.add(resultSet.getLong("artifact_obj_id"));
+				} else {
+					dataArtifactObjIds.add(resultSet.getLong("artifact_obj_id"));
+				}
+			}
+
+			ArrayList<BlackboardArtifact> artifacts = new ArrayList<>();
+			if (!analysisArtifactObjIds.isEmpty()) {
+				artifacts.addAll(getArtifactsForValues(BlackboardArtifact.Category.ANALYSIS_RESULT, "artifacts.artifact_obj_id", analysisArtifactObjIds, connection));
+			}
+
+			if (!dataArtifactObjIds.isEmpty()) {
+				artifacts.addAll(getArtifactsForValues(BlackboardArtifact.Category.DATA_ARTIFACT, "artifacts.artifact_obj_id", dataArtifactObjIds, connection));
 			}
 			return artifacts;
 		} catch (SQLException ex) {
 			throw new TskCoreException("Error getting blackboard artifacts by attribute", ex);
 		} finally {
-			closeResultSet(rs);
-			closeStatement(s);
-			closeConnection(connection);
 			releaseSingleUserCaseReadLock();
 		}
 	}
@@ -3927,7 +4159,7 @@ public Iterable<BlackboardArtifact.Type> getArtifactTypes() throws TskCoreExcept
 			ArrayList<BlackboardArtifact.Type> artifactTypes = new ArrayList<BlackboardArtifact.Type>();
 			while (rs.next()) {
 				artifactTypes.add(new BlackboardArtifact.Type(rs.getInt("artifact_type_id"),
-						rs.getString("type_name"), rs.getString("display_name"), 
+						rs.getString("type_name"), rs.getString("display_name"),
 						BlackboardArtifact.Category.fromID(rs.getInt("category_type"))));
 			}
 			return artifactTypes;
@@ -4011,7 +4243,7 @@ public List<BlackboardArtifact.Type> getArtifactTypesInUse() throws TskCoreExcep
 			List<BlackboardArtifact.Type> uniqueArtifactTypes = new ArrayList<BlackboardArtifact.Type>();
 			while (rs.next()) {
 				uniqueArtifactTypes.add(new BlackboardArtifact.Type(rs.getInt("artifact_type_id"),
-						rs.getString("type_name"), rs.getString("display_name"), 
+						rs.getString("type_name"), rs.getString("display_name"),
 						BlackboardArtifact.Category.fromID(rs.getInt("category_type"))));
 			}
 			return uniqueArtifactTypes;
@@ -4092,57 +4324,6 @@ public int getBlackboardAttributeTypesCount() throws TskCoreException {
 		}
 	}
 
-	/**
-	 * Gets unrejected blackboard artifacts that match a given WHERE clause.
-	 * Uses a SELECT	* statement that does a join of the blackboard_artifacts
-	 * and blackboard_artifact_types tables to get all of the required data.
-	 *
-	 * @param whereClause The WHERE clause to append to the SELECT statement.
-	 *
-	 * @return A list of BlackboardArtifact objects.
-	 *
-	 * @throws TskCoreException If there is a problem querying the case
-	 *                          database.
-	 */
-	ArrayList<BlackboardArtifact> getArtifactsHelper(String whereClause) throws TskCoreException {
-		CaseDbConnection connection = null;
-		Statement statement = null;
-		ResultSet rs = null;
-		acquireSingleUserCaseReadLock();
-		try {
-			connection = connections.getConnection();
-			statement = connection.createStatement();
-			String query = "SELECT blackboard_artifacts.artifact_id AS artifact_id, "
-					+ "blackboard_artifacts.obj_id AS obj_id, "
-					+ "blackboard_artifacts.artifact_obj_id AS artifact_obj_id, "
-					+ "blackboard_artifacts.data_source_obj_id AS data_source_obj_id, "
-					+ "blackboard_artifact_types.artifact_type_id AS artifact_type_id, "
-					+ "blackboard_artifact_types.type_name AS type_name, "
-					+ "blackboard_artifact_types.display_name AS display_name, "
-					+ "blackboard_artifacts.review_status_id AS review_status_id "
-					+ "FROM blackboard_artifacts, blackboard_artifact_types "
-					+ "WHERE blackboard_artifacts.artifact_type_id = blackboard_artifact_types.artifact_type_id "
-					+ " AND blackboard_artifacts.review_status_id !=" + BlackboardArtifact.ReviewStatus.REJECTED.getID()
-					+ " AND " + whereClause;
-			rs = connection.executeQuery(statement, query);
-			ArrayList<BlackboardArtifact> artifacts = new ArrayList<BlackboardArtifact>();
-			while (rs.next()) {
-				artifacts.add(new BlackboardArtifact(this, rs.getLong("artifact_id"), rs.getLong("obj_id"), rs.getLong("artifact_obj_id"),
-						rs.getObject("data_source_obj_id") != null ? rs.getLong("data_source_obj_id") : null,
-						rs.getInt("artifact_type_id"), rs.getString("type_name"), rs.getString("display_name"),
-						BlackboardArtifact.ReviewStatus.withID(rs.getInt("review_status_id"))));
-			}
-			return artifacts;
-		} catch (SQLException ex) {
-			throw new TskCoreException("Error getting or creating a blackboard artifact", ex);
-		} finally {
-			closeResultSet(rs);
-			closeStatement(statement);
-			closeConnection(connection);
-			releaseSingleUserCaseReadLock();
-		}
-	}
-
 	/**
 	 * Helper method to get count of all artifacts matching the type id and
 	 * object id. Does not included rejected artifacts.
@@ -4161,7 +4342,7 @@ private long getArtifactsCountHelper(int artifactTypeID, long obj_id) throws Tsk
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			// SELECT COUNT(*) AS count FROM blackboard_artifacts WHERE obj_id = ? AND artifact_type_id = ?
 			PreparedStatement statement = connection.getPreparedStatement(PREPARED_STATEMENT.COUNT_ARTIFACTS_BY_SOURCE_AND_TYPE);
 			statement.clearParameters();
@@ -4183,8 +4364,8 @@ private long getArtifactsCountHelper(int artifactTypeID, long obj_id) throws Tsk
 	}
 
 	/**
-	 * Get all blackboard artifacts of a given type for the given object id.
-	 * Does	not included rejected artifacts.
+	 * Get all blackboard artifacts of a given type for the given source (object
+	 * id). Does	not included rejected artifacts.
 	 *
 	 * @param artifactTypeName artifact type name
 	 * @param obj_id           object id
@@ -4195,7 +4376,9 @@ private long getArtifactsCountHelper(int artifactTypeID, long obj_id) throws Tsk
 	 *                          within TSK core
 	 */
 	public ArrayList<BlackboardArtifact> getBlackboardArtifacts(String artifactTypeName, long obj_id) throws TskCoreException {
-		return getArtifactsHelper("blackboard_artifacts.obj_id = " + obj_id + " AND blackboard_artifact_types.type_name = '" + artifactTypeName + "';");
+		ArrayList<BlackboardArtifact> artifacts = new ArrayList<>();
+		artifacts.addAll(blackboard.getArtifactsBySourceId(getArtifactType(artifactTypeName), obj_id));
+		return artifacts;
 	}
 
 	/**
@@ -4211,7 +4394,9 @@ public ArrayList<BlackboardArtifact> getBlackboardArtifacts(String artifactTypeN
 	 *                          within TSK core
 	 */
 	public ArrayList<BlackboardArtifact> getBlackboardArtifacts(int artifactTypeID, long obj_id) throws TskCoreException {
-		return getArtifactsHelper("blackboard_artifacts.obj_id = " + obj_id + " AND blackboard_artifact_types.artifact_type_id = " + artifactTypeID + ";");
+		ArrayList<BlackboardArtifact> artifacts = new ArrayList<>();
+		artifacts.addAll(blackboard.getArtifactsBySourceId(blackboard.getArtifactType(artifactTypeID), obj_id));
+		return artifacts;
 	}
 
 	/**
@@ -4294,7 +4479,9 @@ public long getBlackboardArtifactsCount(ARTIFACT_TYPE artifactType, long obj_id)
 	 *                          within TSK core
 	 */
 	public ArrayList<BlackboardArtifact> getBlackboardArtifacts(String artifactTypeName) throws TskCoreException {
-		return getArtifactsHelper("blackboard_artifact_types.type_name = '" + artifactTypeName + "';");
+		ArrayList<BlackboardArtifact> artifacts = new ArrayList<>();
+		artifacts.addAll(blackboard.getArtifactsByType(getArtifactType(artifactTypeName)));
+		return artifacts;
 	}
 
 	/**
@@ -4309,7 +4496,9 @@ public ArrayList<BlackboardArtifact> getBlackboardArtifacts(String artifactTypeN
 	 *                          within TSK core
 	 */
 	public ArrayList<BlackboardArtifact> getBlackboardArtifacts(ARTIFACT_TYPE artifactType) throws TskCoreException {
-		return getArtifactsHelper("blackboard_artifact_types.artifact_type_id = " + artifactType.getTypeID() + ";");
+		ArrayList<BlackboardArtifact> artifacts = new ArrayList<>();
+		artifacts.addAll(blackboard.getArtifactsByType(blackboard.getArtifactType(artifactType.getTypeID())));
+		return artifacts;
 	}
 
 	/**
@@ -4324,40 +4513,69 @@ public ArrayList<BlackboardArtifact> getBlackboardArtifacts(ARTIFACT_TYPE artifa
 	 *
 	 * @throws TskCoreException exception thrown if a critical error occurs
 	 *                          within TSK core
+	 *
+	 * @deprecated Do not use.
 	 */
+	@Deprecated
 	public List<BlackboardArtifact> getBlackboardArtifacts(ARTIFACT_TYPE artifactType, BlackboardAttribute.ATTRIBUTE_TYPE attrType, String value) throws TskCoreException {
-		CaseDbConnection connection = null;
-		Statement s = null;
-		ResultSet rs = null;
+
+		String dataArtifactJoin = "tsk_data_artifacts AS datarts ON datarts.artifact_obj_id = arts.artifact_obj_id";
+		String analysisResultJoin = "tsk_analysis_results AS anresult ON anresult.artifact_obj_id = arts.artifact_obj_id";
+		String dataArtifactColumns = ", datarts.os_account_obj_id AS os_account_obj_id";
+		String analysResultColumns = ", anresult.conclusion AS conclusion, anresult.significance AS significance, anresult.priority AS priority, anresult.configuration AS configuration, anresult.justification AS justification ";
+
+		String formatQuery = "SELECT DISTINCT arts.artifact_id AS artifact_id, " //NON-NLS
+				+ "arts.obj_id AS obj_id, arts.artifact_obj_id as artifact_obj_id, arts.data_source_obj_id AS data_source_obj_id, arts.artifact_type_id AS artifact_type_id, "
+				+ "types.type_name AS type_name, types.display_name AS display_name,"
+				+ "arts.review_status_id AS review_status_id %s "//NON-NLS
+				+ "FROM blackboard_artifacts AS arts "
+				+ "JOIN blackboard_attributes AS attrs ON arts.artifact_id = attrs.artifact_id "
+				+ "JOIN blackboard_artifact_types AS types ON types.artifact_type_id = arts.artifact_type_id " //NON-NLS
+				+ "LEFT JOIN %s "
+				+ "WHERE arts.artifact_id = attrs.artifact_id " //NON-NLS
+				+ "AND attrs.attribute_type_id = %d "
+				+ " AND arts.artifact_type_id = %d "
+				+ " AND attrs.value_text = '%s' " //NON-NLS
+				+ " AND types.artifact_type_id=arts.artifact_type_id "
+				+ " AND arts.review_status_id != %d";
+
+		String query = String.format(formatQuery,
+				(artifactType.getCategory() == BlackboardArtifact.Category.ANALYSIS_RESULT ? analysResultColumns : dataArtifactColumns),
+				(artifactType.getCategory() == BlackboardArtifact.Category.ANALYSIS_RESULT ? analysisResultJoin : dataArtifactJoin),
+				attrType.getTypeID(),
+				artifactType.getTypeID(),
+				value,
+				BlackboardArtifact.ReviewStatus.REJECTED.getID());
+
 		acquireSingleUserCaseReadLock();
-		try {
-			connection = connections.getConnection();	
-			s = connection.createStatement();
-			rs = connection.executeQuery(s, "SELECT DISTINCT arts.artifact_id AS artifact_id, " //NON-NLS
-					+ "arts.obj_id AS obj_id, arts.artifact_obj_id as artifact_obj_id, arts.data_source_obj_id AS data_source_obj_id, arts.artifact_type_id AS artifact_type_id, "
-					+ "types.type_name AS type_name, types.display_name AS display_name,"
-					+ "arts.review_status_id AS review_status_id "//NON-NLS
-					+ "FROM blackboard_artifacts AS arts, blackboard_attributes AS attrs, blackboard_artifact_types AS types " //NON-NLS
-					+ "WHERE arts.artifact_id = attrs.artifact_id " //NON-NLS
-					+ "AND attrs.attribute_type_id = " + attrType.getTypeID() //NON-NLS
-					+ " AND arts.artifact_type_id = " + artifactType.getTypeID() //NON-NLS
-					+ " AND attrs.value_text = '" + value + "'" //NON-NLS
-					+ " AND types.artifact_type_id=arts.artifact_type_id"
-					+ " AND arts.review_status_id !=" + BlackboardArtifact.ReviewStatus.REJECTED.getID());
-			ArrayList<BlackboardArtifact> artifacts = new ArrayList<BlackboardArtifact>();
+		try (CaseDbConnection connection = connections.getConnection(); Statement s = connection.createStatement(); ResultSet rs = connection.executeQuery(s, query)) {
+			ArrayList<BlackboardArtifact> artifacts = new ArrayList<>();
 			while (rs.next()) {
-				artifacts.add(new BlackboardArtifact(this, rs.getLong("artifact_id"), rs.getLong("obj_id"), rs.getLong("artifact_obj_id"),
-						rs.getObject("data_source_obj_id") != null ? rs.getLong("data_source_obj_id") : null,
-						rs.getInt("artifact_type_id"), rs.getString("type_name"), rs.getString("display_name"),
-						BlackboardArtifact.ReviewStatus.withID(rs.getInt("review_status_id"))));
+				if (artifactType.getCategory() == BlackboardArtifact.Category.DATA_ARTIFACT) {
+					Long osAccountObjId = rs.getLong("os_account_obj_id");
+					if (rs.wasNull()) {
+						osAccountObjId = null;
+					}
+
+					artifacts.add(new DataArtifact(this, rs.getLong("artifact_id"), rs.getLong("obj_id"),
+							rs.getLong("artifact_obj_id"),
+							rs.getObject("data_source_obj_id") != null ? rs.getLong("data_source_obj_id") : null,
+							rs.getInt("artifact_type_id"), rs.getString("type_name"), rs.getString("display_name"),
+							BlackboardArtifact.ReviewStatus.withID(rs.getInt("review_status_id")), osAccountObjId, false));
+				} else {
+					artifacts.add(new AnalysisResult(this, rs.getLong("artifact_id"), rs.getLong("obj_id"),
+							rs.getLong("artifact_obj_id"),
+							rs.getObject("data_source_obj_id") != null ? rs.getLong("data_source_obj_id") : null,
+							rs.getInt("artifact_type_id"), rs.getString("type_name"), rs.getString("display_name"),
+							BlackboardArtifact.ReviewStatus.withID(rs.getInt("review_status_id")),
+							new Score(Score.Significance.fromID(rs.getInt("significance")), Score.Priority.fromID(rs.getInt("priority"))),
+							rs.getString("conclusion"), rs.getString("configuration"), rs.getString("justification")));
+				}
 			}
 			return artifacts;
 		} catch (SQLException ex) {
 			throw new TskCoreException("Error getting blackboard artifacts by artifact type and attribute. " + ex.getMessage(), ex);
 		} finally {
-			closeResultSet(rs);
-			closeStatement(s);
-			closeConnection(connection);
 			releaseSingleUserCaseReadLock();
 		}
 	}
@@ -4374,40 +4592,17 @@ public List<BlackboardArtifact> getBlackboardArtifacts(ARTIFACT_TYPE artifactTyp
 	 *                          within TSK core
 	 */
 	public BlackboardArtifact getBlackboardArtifact(long artifactID) throws TskCoreException {
-		CaseDbConnection connection = null;
-		Statement s = null;
-		ResultSet rs = null;
-		acquireSingleUserCaseReadLock();
-		try {
-			connection = connections.getConnection();	
-			s = connection.createStatement();
-			rs = connection.executeQuery(s, "SELECT arts.artifact_id AS artifact_id, "
-					+ "arts.obj_id AS obj_id, arts.artifact_obj_id as artifact_obj_id, arts.data_source_obj_id AS data_source_obj_id, arts.artifact_type_id AS artifact_type_id, "
-					+ "types.type_name AS type_name, types.display_name AS display_name,"
-					+ "arts.review_status_id AS review_status_id "//NON-NLS
-					+ "FROM blackboard_artifacts AS arts, blackboard_artifact_types AS types "
-					+ "WHERE arts.artifact_id = " + artifactID
-					+ " AND arts.artifact_type_id = types.artifact_type_id");
-			if (rs.next()) {
-				return new BlackboardArtifact(this, rs.getLong("artifact_id"), rs.getLong("obj_id"), rs.getLong("artifact_obj_id"),
-						rs.getObject("data_source_obj_id") != null ? rs.getLong("data_source_obj_id") : null,
-						rs.getInt("artifact_type_id"), rs.getString("type_name"), rs.getString("display_name"),
-						BlackboardArtifact.ReviewStatus.withID(rs.getInt("review_status_id")));
-			} else {
-				/*
-				 * I think this should actually return null (or Optional) when
-				 * there is no artifact with the given id, but it looks like
-				 * existing code is not expecting that. -jm
-				 */
-				throw new TskCoreException("No blackboard artifact with id " + artifactID);
-			}
-		} catch (SQLException ex) {
-			throw new TskCoreException("Error getting a blackboard artifact. " + ex.getMessage(), ex);
-		} finally {
-			closeResultSet(rs);
-			closeConnection(connection);
-			releaseSingleUserCaseReadLock();
+		List<DataArtifact> dataArtifacts = blackboard.getDataArtifactsWhere("artifacts.artifact_id = " + artifactID);
+		if (!dataArtifacts.isEmpty()) {
+			return dataArtifacts.get(0);
 		}
+
+		List<AnalysisResult> analysisResults = blackboard.getAnalysisResultsWhere("artifacts.artifact_id = " + artifactID);
+		if (!analysisResults.isEmpty()) {
+			return analysisResults.get(0);
+		}
+
+		throw new TskCoreException("No blackboard artifact with id " + artifactID);
 	}
 
 	/**
@@ -4685,49 +4880,15 @@ String addSourceToArtifactAttribute(BlackboardAttribute attr, String source) thr
 	 *
 	 * @throws TskCoreException exception thrown if a critical error occurs
 	 *                          within tsk core
-	 * @throws TskDataException exception thrown if attribute type was already
-	 *                          in the system
+	 *
+	 * @deprecated Use Blackboard.getOrAddAttributeType() instead.
 	 */
+	@Deprecated
 	public BlackboardAttribute.Type addArtifactAttributeType(String attrTypeString, TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE valueType, String displayName) throws TskCoreException, TskDataException {
-		CaseDbConnection connection = null;
-		acquireSingleUserCaseWriteLock();
-		Statement s = null;
-		ResultSet rs = null;
 		try {
-			connection = connections.getConnection();
-			connection.beginTransaction();
-			s = connection.createStatement();
-			rs = connection.executeQuery(s, "SELECT attribute_type_id FROM blackboard_attribute_types WHERE type_name = '" + attrTypeString + "'"); //NON-NLS
-			if (!rs.next()) {
-				rs.close();
-				rs = connection.executeQuery(s, "SELECT MAX(attribute_type_id) AS highest_id FROM blackboard_attribute_types");
-				int maxID = 0;
-				if (rs.next()) {
-					maxID = rs.getInt("highest_id");
-					if (maxID < MIN_USER_DEFINED_TYPE_ID) {
-						maxID = MIN_USER_DEFINED_TYPE_ID;
-					} else {
-						maxID++;
-					}
-				}
-				connection.executeUpdate(s, "INSERT INTO blackboard_attribute_types (attribute_type_id, type_name, display_name, value_type) VALUES ('" + maxID + "', '" + attrTypeString + "', '" + displayName + "', '" + valueType.getType() + "')"); //NON-NLS
-				BlackboardAttribute.Type type = new BlackboardAttribute.Type(maxID, attrTypeString, displayName, valueType);
-				this.typeIdToAttributeTypeMap.put(type.getTypeID(), type);
-				this.typeNameToAttributeTypeMap.put(type.getTypeName(), type);
-				connection.commitTransaction();
-				return type;
-			} else {
-				throw new TskDataException("The attribute type that was added was already within the system.");
-			}
-
-		} catch (SQLException ex) {
-			rollbackTransaction(connection);
-			throw new TskCoreException("Error adding attribute type", ex);
-		} finally {
-			closeResultSet(rs);
-			closeStatement(s);
-			closeConnection(connection);
-			releaseSingleUserCaseWriteLock();
+			return blackboard.getOrAddAttributeType(attrTypeString, valueType, displayName);
+		} catch (BlackboardException ex) {
+			throw new TskCoreException("Error adding artifact type: " + attrTypeString, ex);
 		}
 	}
 
@@ -4740,75 +4901,11 @@ public BlackboardAttribute.Type addArtifactAttributeType(String attrTypeString,
 	 *
 	 * @throws TskCoreException If an error occurs accessing the case database.
 	 *
+	 * @deprecated Use Blackboard.getAttributeType instead
 	 */
+	@Deprecated
 	public BlackboardAttribute.Type getAttributeType(String attrTypeName) throws TskCoreException {
-		if (this.typeNameToAttributeTypeMap.containsKey(attrTypeName)) {
-			return this.typeNameToAttributeTypeMap.get(attrTypeName);
-		}
-		CaseDbConnection connection = null;
-		Statement s = null;
-		ResultSet rs = null;
-		acquireSingleUserCaseReadLock();
-		try {
-			connection = connections.getConnection();	
-			s = connection.createStatement();
-			rs = connection.executeQuery(s, "SELECT attribute_type_id, type_name, display_name, value_type FROM blackboard_attribute_types WHERE type_name = '" + attrTypeName + "'"); //NON-NLS
-			BlackboardAttribute.Type type = null;
-			if (rs.next()) {
-				type = new BlackboardAttribute.Type(rs.getInt("attribute_type_id"), rs.getString("type_name"),
-						rs.getString("display_name"), TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.fromType(rs.getLong("value_type")));
-				this.typeIdToAttributeTypeMap.put(type.getTypeID(), type);
-				this.typeNameToAttributeTypeMap.put(attrTypeName, type);
-			}
-			return type;
-		} catch (SQLException ex) {
-			throw new TskCoreException("Error getting attribute type id", ex);
-		} finally {
-			closeResultSet(rs);
-			closeStatement(s);
-			closeConnection(connection);
-			releaseSingleUserCaseReadLock();
-		}
-	}
-
-	/**
-	 * Get the attribute type associated with an attribute type ID.
-	 *
-	 * @param typeID An attribute type ID.
-	 *
-	 * @return An attribute type or null if the attribute type does not exist.
-	 *
-	 * @throws TskCoreException If an error occurs accessing the case database.
-	 *
-	 */
-	BlackboardAttribute.Type getAttributeType(int typeID) throws TskCoreException {
-		if (this.typeIdToAttributeTypeMap.containsKey(typeID)) {
-			return this.typeIdToAttributeTypeMap.get(typeID);
-		}
-		CaseDbConnection connection = null;
-		Statement s = null;
-		ResultSet rs = null;
-		acquireSingleUserCaseReadLock();
-		try {
-			connection = connections.getConnection();	
-			s = connection.createStatement();
-			rs = connection.executeQuery(s, "SELECT attribute_type_id, type_name, display_name, value_type FROM blackboard_attribute_types WHERE attribute_type_id = " + typeID + ""); //NON-NLS
-			BlackboardAttribute.Type type = null;
-			if (rs.next()) {
-				type = new BlackboardAttribute.Type(rs.getInt("attribute_type_id"), rs.getString("type_name"),
-						rs.getString("display_name"), TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.fromType(rs.getLong("value_type")));
-				this.typeIdToAttributeTypeMap.put(typeID, type);
-				this.typeNameToAttributeTypeMap.put(type.getTypeName(), type);
-			}
-			return type;
-		} catch (SQLException ex) {
-			throw new TskCoreException("Error getting attribute type id", ex);
-		} finally {
-			closeResultSet(rs);
-			closeStatement(s);
-			closeConnection(connection);
-			releaseSingleUserCaseReadLock();
-		}
+		return blackboard.getAttributeType(attrTypeName);
 	}
 
 	/**
@@ -4820,80 +4917,11 @@ BlackboardAttribute.Type getAttributeType(int typeID) throws TskCoreException {
 	 *
 	 * @throws TskCoreException If an error occurs accessing the case database.
 	 *
+	 * @deprecated Use Blackboard.getArtifactType instead
 	 */
+	@Deprecated
 	public BlackboardArtifact.Type getArtifactType(String artTypeName) throws TskCoreException {
-		if (this.typeNameToArtifactTypeMap.containsKey(artTypeName)) {
-			return this.typeNameToArtifactTypeMap.get(artTypeName);
-		}
-		CaseDbConnection connection = null;
-		Statement s = null;
-		ResultSet rs = null;
-		acquireSingleUserCaseReadLock();
-		try {
-			connection = connections.getConnection();	
-			s = connection.createStatement();
-			rs = connection.executeQuery(s, "SELECT artifact_type_id, type_name, display_name, category_type FROM blackboard_artifact_types WHERE type_name = '" + artTypeName + "'"); //NON-NLS
-			BlackboardArtifact.Type type = null;
-			if (rs.next()) {
-				type = new BlackboardArtifact.Type(rs.getInt("artifact_type_id"),
-						rs.getString("type_name"), rs.getString("display_name"), 
-						BlackboardArtifact.Category.fromID(rs.getInt("category_type")));
-				this.typeIdToArtifactTypeMap.put(type.getTypeID(), type);
-				this.typeNameToArtifactTypeMap.put(artTypeName, type);
-			}
-			return type;
-		} catch (SQLException ex) {
-			throw new TskCoreException("Error getting artifact type from the database", ex);
-		} finally {
-			closeResultSet(rs);
-			closeStatement(s);
-			closeConnection(connection);
-			releaseSingleUserCaseReadLock();
-		}
-	}
-
-	/**
-	 * Get the artifact type associated with an artifact type id.
-	 *
-	 * @param artTypeId An artifact type id.
-	 *
-	 * @return The artifact type.
-	 *
-	 * @throws TskCoreException If an error occurs accessing the case database 
-	 *						    or no value is found.
-	 *
-	 */
-	BlackboardArtifact.Type getArtifactType(int artTypeId) throws TskCoreException {
-		if (this.typeIdToArtifactTypeMap.containsKey(artTypeId)) {
-			return typeIdToArtifactTypeMap.get(artTypeId);
-		}
-		CaseDbConnection connection = null;
-		Statement s = null;
-		ResultSet rs = null;
-		acquireSingleUserCaseReadLock();
-		try {
-			connection = connections.getConnection();	
-			s = connection.createStatement();
-			rs = connection.executeQuery(s, "SELECT artifact_type_id, type_name, display_name, category_type FROM blackboard_artifact_types WHERE artifact_type_id = " + artTypeId + ""); //NON-NLS
-			BlackboardArtifact.Type type = null;
-			if (rs.next()) {
-				type = new BlackboardArtifact.Type(rs.getInt("artifact_type_id"),
-						rs.getString("type_name"), rs.getString("display_name"), 
-						BlackboardArtifact.Category.fromID(rs.getInt("category_type")));
-				this.typeIdToArtifactTypeMap.put(artTypeId, type);
-				this.typeNameToArtifactTypeMap.put(type.getTypeName(), type);
-				return type;
-			} else {
-				throw new TskCoreException("No artifact type found matching id: " + artTypeId);
-			}
-		} catch (SQLException ex) {
-			throw new TskCoreException("Error getting artifact type from the database", ex);
-		} finally {
-			closeResultSet(rs);
-			closeStatement(s);
-			closeConnection(connection);
-			releaseSingleUserCaseReadLock();
-		}
+		return blackboard.getArtifactType(artTypeName);
 	}
 
 	/**
@@ -4910,9 +4938,10 @@ BlackboardArtifact.Type getArtifactType(int artTypeId) throws TskCoreException {
 	 * @throws TskCoreException exception thrown if a critical error occurs
 	 * @throws TskDataException exception thrown if given data is already in db
 	 *                          within tsk core
+	 * @deprecated Use Blackboard.getOrAddArtifactType() instead.
 	 */
+	@Deprecated
 	public BlackboardArtifact.Type addBlackboardArtifactType(String artifactTypeName, String displayName) throws TskCoreException, TskDataException {
-
 		return addBlackboardArtifactType(artifactTypeName, displayName, BlackboardArtifact.Category.DATA_ARTIFACT);
 	}
 
@@ -4928,173 +4957,35 @@ public BlackboardArtifact.Type addBlackboardArtifactType(String artifactTypeName
 	 * @return Type of the artifact added.
 	 *
 	 * @throws TskCoreException exception thrown if a critical error occurs
-	 * @throws TskDataException exception thrown if given data is already in db
-	 *                          within tsk core
+	 * 
+	 * @deprecated Use Blackboard.getOrAddArtifactType() instead.
 	 */
+	@Deprecated
 	BlackboardArtifact.Type addBlackboardArtifactType(String artifactTypeName, String displayName, BlackboardArtifact.Category category) throws TskCoreException, TskDataException {
-		CaseDbConnection connection = null;
-		acquireSingleUserCaseWriteLock();
-		Statement s = null;
-		ResultSet rs = null;
-		try {
-			connection = connections.getConnection();
-			connection.beginTransaction();
-			s = connection.createStatement();
-			rs = connection.executeQuery(s, "SELECT artifact_type_id FROM blackboard_artifact_types WHERE type_name = '" + artifactTypeName + "'"); //NON-NLS
-			if (!rs.next()) {
-				rs.close();
-				rs = connection.executeQuery(s, "SELECT MAX(artifact_type_id) AS highest_id FROM blackboard_artifact_types");
-				int maxID = 0;
-				if (rs.next()) {
-					maxID = rs.getInt("highest_id");
-					if (maxID < MIN_USER_DEFINED_TYPE_ID) {
-						maxID = MIN_USER_DEFINED_TYPE_ID;
-					} else {
-						maxID++;
-					}
-				}
-				connection.executeUpdate(s, "INSERT INTO blackboard_artifact_types (artifact_type_id, type_name, display_name, category_type) VALUES ('" + maxID + "', '" + artifactTypeName + "', '" + displayName + "', " + category.getID() + " )"); //NON-NLS
-				BlackboardArtifact.Type type = new BlackboardArtifact.Type(maxID, artifactTypeName, displayName, category);
-				this.typeIdToArtifactTypeMap.put(type.getTypeID(), type);
-				this.typeNameToArtifactTypeMap.put(type.getTypeName(), type);
-				connection.commitTransaction();
-				return type;
-			} else {
-				throw new TskDataException("The attribute type that was added was already within the system.");
-			}
-		} catch (SQLException ex) {
-			rollbackTransaction(connection);
-			throw new TskCoreException("Error adding artifact type", ex);
-		} finally {
-			closeResultSet(rs);
-			closeStatement(s);
-			closeConnection(connection);
-			releaseSingleUserCaseWriteLock();
-		}
-	}
-
-	public ArrayList<BlackboardAttribute> getBlackboardAttributes(final BlackboardArtifact artifact) throws TskCoreException {
-		CaseDbConnection connection = null;
-		Statement statement = null;
-		ResultSet rs = null;
-		acquireSingleUserCaseReadLock();
 		try {
-			connection = connections.getConnection();	
-			statement = connection.createStatement();
-			rs = connection.executeQuery(statement, "SELECT attrs.artifact_id AS artifact_id, "
-					+ "attrs.source AS source, attrs.context AS context, attrs.attribute_type_id AS attribute_type_id, "
-					+ "attrs.value_type AS value_type, attrs.value_byte AS value_byte, "
-					+ "attrs.value_text AS value_text, attrs.value_int32 AS value_int32, "
-					+ "attrs.value_int64 AS value_int64, attrs.value_double AS value_double, "
-					+ "types.type_name AS type_name, types.display_name AS display_name "
-					+ "FROM blackboard_attributes AS attrs, blackboard_attribute_types AS types WHERE attrs.artifact_id = " + artifact.getArtifactID()
-					+ " AND attrs.attribute_type_id = types.attribute_type_id");
-			ArrayList<BlackboardAttribute> attributes = new ArrayList<BlackboardAttribute>();
-			while (rs.next()) {
-				int attributeTypeId = rs.getInt("attribute_type_id");
-				String attributeTypeName = rs.getString("type_name");
-				BlackboardAttribute.Type attributeType;
-				if (this.typeIdToAttributeTypeMap.containsKey(attributeTypeId)) {
-					attributeType = this.typeIdToAttributeTypeMap.get(attributeTypeId);
-				} else {
-					attributeType = new BlackboardAttribute.Type(attributeTypeId, attributeTypeName,
-							rs.getString("display_name"),
-							BlackboardAttribute.TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.fromType(rs.getInt("value_type")));
-					this.typeIdToAttributeTypeMap.put(attributeTypeId, attributeType);
-					this.typeNameToAttributeTypeMap.put(attributeTypeName, attributeType);
-				}
-
-				final BlackboardAttribute attr = new BlackboardAttribute(
-						rs.getLong("artifact_id"),
-						attributeType,
-						rs.getString("source"),
-						rs.getString("context"),
-						rs.getInt("value_int32"),
-						rs.getLong("value_int64"),
-						rs.getDouble("value_double"),
-						rs.getString("value_text"),
-						rs.getBytes("value_byte"), this
-				);
-				attr.setParentDataSourceID(artifact.getDataSourceObjectID());
-				attributes.add(attr);
-			}
-			return attributes;
-		} catch (SQLException ex) {
-			throw new TskCoreException("Error getting attributes for artifact, artifact id = " + artifact.getArtifactID(), ex);
-		} finally {
-			closeResultSet(rs);
-			closeStatement(statement);
-			closeConnection(connection);
-			releaseSingleUserCaseReadLock();
+			return blackboard.getOrAddArtifactType(displayName, displayName, category);
+		} catch (BlackboardException ex) {
+			throw new TskCoreException("Error getting or adding artifact type with name: " + artifactTypeName, ex);
 		}
 	}
-
+	
 	/**
-	 * Get the attributes associated with the given file.
-	 *
-	 * @param file
-	 *
-	 * @return
-	 *
-	 * @throws TskCoreException
+	 * Get the list of attributes for the given artifact.
+	 * 
+	 * @param artifact The artifact to load attributes for.
+	 * 
+	 * @return The list of attributes.
+	 * 
+	 * @throws TskCoreException 
+	 * 
+	 * @deprecated Use Blackboard.getBlackboardAttributes instead
 	 */
-	ArrayList<Attribute> getFileAttributes(final AbstractFile file) throws TskCoreException {
-		CaseDbConnection connection = null;
-		Statement statement = null;
-		ResultSet rs = null;
-		acquireSingleUserCaseReadLock();
-		try {
-			connection = connections.getConnection();
-			statement = connection.createStatement();
-			rs = connection.executeQuery(statement, "SELECT attrs.id as id,  attrs.obj_id AS obj_id, "
-					+ "attrs.attribute_type_id AS attribute_type_id, "
-					+ "attrs.value_type AS value_type, attrs.value_byte AS value_byte, "
-					+ "attrs.value_text AS value_text, attrs.value_int32 AS value_int32, "
-					+ "attrs.value_int64 AS value_int64, attrs.value_double AS value_double, "
-					+ "types.type_name AS type_name, types.display_name AS display_name "
-					+ "FROM tsk_file_attributes AS attrs "
-					+ " INNER JOIN blackboard_attribute_types AS types "
-					+ " ON attrs.attribute_type_id = types.attribute_type_id "
-					+ " WHERE attrs.obj_id = " + file.getId());
-
-			ArrayList<Attribute> attributes = new ArrayList<Attribute>();
-			while (rs.next()) {
-				int attributeTypeId = rs.getInt("attribute_type_id");
-				String attributeTypeName = rs.getString("type_name");
-				BlackboardAttribute.Type attributeType;
-				if (this.typeIdToAttributeTypeMap.containsKey(attributeTypeId)) {
-					attributeType = this.typeIdToAttributeTypeMap.get(attributeTypeId);
-				} else {
-					attributeType = new BlackboardAttribute.Type(attributeTypeId, attributeTypeName,
-							rs.getString("display_name"),
-							BlackboardAttribute.TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.fromType(rs.getInt("value_type")));
-					this.typeIdToAttributeTypeMap.put(attributeTypeId, attributeType);
-					this.typeNameToAttributeTypeMap.put(attributeTypeName, attributeType);
-				}
-
-				final Attribute attr = new Attribute(
-						rs.getLong("id"),
-						rs.getLong("obj_id"),
-						attributeType,
-						rs.getInt("value_int32"),
-						rs.getLong("value_int64"),
-						rs.getDouble("value_double"),
-						rs.getString("value_text"),
-						rs.getBytes("value_byte"), this
-				);
-				attributes.add(attr);
-			}
-			return attributes;
-		} catch (SQLException ex) {
-			throw new TskCoreException("Error getting attributes for file, file id = " + file.getId(), ex);
-		} finally {
-			closeResultSet(rs);
-			closeStatement(statement);
-			closeConnection(connection);
-			releaseSingleUserCaseReadLock();
-		}
+	@Deprecated
+	public ArrayList<BlackboardAttribute> getBlackboardAttributes(final BlackboardArtifact artifact) throws TskCoreException {
+		return blackboard.getBlackboardAttributes(artifact);
 	}
 
+
 	/**
 	 * Get all attributes that match a where clause. The clause should begin
 	 * with "WHERE" or "JOIN". To use this method you must know the database
@@ -5122,11 +5013,11 @@ public ArrayList<BlackboardAttribute> getMatchingAttributes(String whereClause)
 					+ "blackboard_attributes.value_text AS value_text, blackboard_attributes.value_int32 AS value_int32, "
 					+ "blackboard_attributes.value_int64 AS value_int64, blackboard_attributes.value_double AS value_double "
 					+ "FROM blackboard_attributes " + whereClause); //NON-NLS
-			ArrayList<BlackboardAttribute> matches = new ArrayList<BlackboardAttribute>();
+			ArrayList<BlackboardAttribute> matches = new ArrayList<>();
 			while (rs.next()) {
 				BlackboardAttribute.Type type;
 				// attribute type is cached, so this does not necessarily call to the db
-				type = this.getAttributeType(rs.getInt("attribute_type_id"));
+				type = blackboard.getAttributeType(rs.getInt("attribute_type_id"));
 				BlackboardAttribute attr = new BlackboardAttribute(
 						rs.getLong("artifact_id"),
 						type,
@@ -5163,35 +5054,37 @@ public ArrayList<BlackboardAttribute> getMatchingAttributes(String whereClause)
 	 *                          within tsk core \ref query_database_page
 	 */
 	public ArrayList<BlackboardArtifact> getMatchingArtifacts(String whereClause) throws TskCoreException {
-		CaseDbConnection connection = null;
-		Statement s = null;
-		ResultSet rs = null;
+		String query = "SELECT blackboard_artifacts.artifact_id AS artifact_id, "
+				+ "blackboard_artifacts.obj_id AS obj_id, blackboard_artifacts.artifact_obj_id AS artifact_obj_id, blackboard_artifacts.data_source_obj_id AS data_source_obj_id, blackboard_artifacts.artifact_type_id AS artifact_type_id, "
+				+ "blackboard_artifacts.review_status_id AS review_status_id  "
+				+ "FROM blackboard_artifacts " + whereClause;
 		acquireSingleUserCaseReadLock();
-		try {
-			connection = connections.getConnection();
-			s = connection.createStatement();
-			rs = connection.executeQuery(s, "SELECT blackboard_artifacts.artifact_id AS artifact_id, "
-					+ "blackboard_artifacts.obj_id AS obj_id, blackboard_artifacts.artifact_obj_id AS artifact_obj_id, blackboard_artifacts.data_source_obj_id AS data_source_obj_id, blackboard_artifacts.artifact_type_id AS artifact_type_id, "
-					+ "blackboard_artifacts.review_status_id AS review_status_id  "
-					+ "FROM blackboard_artifacts " + whereClause); //NON-NLS
-			ArrayList<BlackboardArtifact> matches = new ArrayList<BlackboardArtifact>();
-			while (rs.next()) {
-				BlackboardArtifact.Type type;
-				// artifact type is cached, so this does not necessarily call to the db
-				type = this.getArtifactType(rs.getInt("artifact_type_id"));
-				BlackboardArtifact artifact = new BlackboardArtifact(this, rs.getLong("artifact_id"), rs.getLong("obj_id"), rs.getLong("artifact_obj_id"),
-						rs.getObject("data_source_obj_id") != null ? rs.getLong("data_source_obj_id") : null,
-						type.getTypeID(), type.getTypeName(), type.getDisplayName(),
-						BlackboardArtifact.ReviewStatus.withID(rs.getInt("review_status_id")));
-				matches.add(artifact);
+		try (CaseDbConnection connection = connections.getConnection(); Statement statement = connection.createStatement(); ResultSet resultSet = statement.executeQuery(query)) {
+
+			List<Long> analysisArtifactObjIds = new ArrayList<>();
+			List<Long> dataArtifactObjIds = new ArrayList<>();
+			while (resultSet.next()) {
+				BlackboardArtifact.Type type = blackboard.getArtifactType(resultSet.getInt("artifact_type_id"));
+				if (type.getCategory() == BlackboardArtifact.Category.ANALYSIS_RESULT) {
+					analysisArtifactObjIds.add(resultSet.getLong("artifact_obj_id"));
+				} else {
+					dataArtifactObjIds.add(resultSet.getLong("artifact_obj_id"));
+				}
+			}
+
+			ArrayList<BlackboardArtifact> matches = new ArrayList<>();
+			if (!analysisArtifactObjIds.isEmpty()) {
+				matches.addAll(getArtifactsForValues(BlackboardArtifact.Category.ANALYSIS_RESULT, "artifacts.artifact_obj_id", analysisArtifactObjIds, connection));
+			}
+
+			if (!dataArtifactObjIds.isEmpty()) {
+				matches.addAll(getArtifactsForValues(BlackboardArtifact.Category.DATA_ARTIFACT, "artifacts.artifact_obj_id", dataArtifactObjIds, connection));
 			}
+
 			return matches;
 		} catch (SQLException ex) {
 			throw new TskCoreException("Error getting attributes using this where clause: " + whereClause, ex);
 		} finally {
-			closeResultSet(rs);
-			closeStatement(s);
-			closeConnection(connection);
 			releaseSingleUserCaseReadLock();
 		}
 	}
@@ -5212,25 +5105,25 @@ public ArrayList<BlackboardArtifact> getMatchingArtifacts(String whereClause) th
 	 */
 	@Deprecated
 	public BlackboardArtifact newBlackboardArtifact(int artifactTypeID, long obj_id) throws TskCoreException {
-		BlackboardArtifact.Type type = getArtifactType(artifactTypeID);
+		BlackboardArtifact.Type type = blackboard.getArtifactType(artifactTypeID);
 		if (type == null) {
 			throw new TskCoreException("Unknown artifact type for id: " + artifactTypeID);
 		}
-		
+
 		Category category = type.getCategory();
 		if (category == null) {
-			throw new TskCoreException(String.format("No category for %s (id: %d)", 
+			throw new TskCoreException(String.format("No category for %s (id: %d)",
 					type.getDisplayName() == null ? "<null>" : type.getDisplayName(),
 					type.getTypeID()));
 		}
-		
+
 		Content content = getContentById(obj_id);
 		if (content == null) {
 			throw new TskCoreException("No content found for object id: " + obj_id);
 		}
-		
+
 		switch (category) {
-			case ANALYSIS_RESULT: 
+			case ANALYSIS_RESULT:
 				return content.newAnalysisResult(type, Score.SCORE_UNKNOWN, null, null, null, Collections.emptyList())
 						.getAnalysisResult();
 			case DATA_ARTIFACT:
@@ -5276,7 +5169,7 @@ public BlackboardArtifact newBlackboardArtifact(ARTIFACT_TYPE artifactType, long
 	@Deprecated
 	@SuppressWarnings("deprecation")
 	BlackboardArtifact newBlackboardArtifact(int artifactTypeID, long obj_id, long data_source_obj_id) throws TskCoreException {
-		BlackboardArtifact.Type type = getArtifactType(artifactTypeID);
+		BlackboardArtifact.Type type = blackboard.getArtifactType(artifactTypeID);
 		try (CaseDbConnection connection = connections.getConnection()) {
 			return newBlackboardArtifact(artifactTypeID, obj_id, type.getTypeName(), type.getDisplayName(), data_source_obj_id, connection);
 		}
@@ -5314,23 +5207,33 @@ PreparedStatement createInsertArtifactStatement(int artifact_type_id, long obj_i
 		return statement;
 	}
 
+	/**
+	 * Add a new blackboard artifact with the given type.
+	 *
+	 * @param artifact_type_id    The type the given artifact should have.
+	 * @param obj_id              The parent content id.
+	 * @param artifactTypeName    The artifact type name.
+	 * @param artifactDisplayName The artifact type display name.
+	 * @param data_source_obj_id  The id of the artifact data source.
+	 * @param connection          The CaseDBConnection.
+	 *
+	 * @return A new blackboard artifact.
+	 *
+	 * @throws TskCoreException
+	 *
+	 * @deprecated Use type specific methods in Blackboard.
+	 */
 	@Deprecated
-	BlackboardArtifact newBlackboardArtifact(int artifact_type_id, long obj_id, String artifactTypeName, String artifactDisplayName, long data_source_obj_id, CaseDbConnection connection) throws TskCoreException {
-		acquireSingleUserCaseWriteLock();
+	private BlackboardArtifact newBlackboardArtifact(int artifact_type_id, long obj_id, String artifactTypeName, String artifactDisplayName, long data_source_obj_id, CaseDbConnection connection) throws TskCoreException {
+		BlackboardArtifact.Type type = blackboard.getArtifactType(artifact_type_id);
 		try {
-			long artifact_obj_id = addObject(obj_id, TskData.ObjectType.ARTIFACT.getObjectType(), connection);
-			PreparedStatement statement = createInsertArtifactStatement(artifact_type_id, obj_id, artifact_obj_id, data_source_obj_id, connection);
-
-			connection.executeUpdate(statement);
-			try (ResultSet resultSet = statement.getGeneratedKeys()) {
-				resultSet.next();
-				return new BlackboardArtifact(this, resultSet.getLong(1), //last_insert_rowid()
-						obj_id, artifact_obj_id, data_source_obj_id, artifact_type_id, artifactTypeName, artifactDisplayName, BlackboardArtifact.ReviewStatus.UNDECIDED, true);
+			if (type.getCategory() == BlackboardArtifact.Category.ANALYSIS_RESULT) {
+				return blackboard.newAnalysisResult(type, obj_id, data_source_obj_id, Score.SCORE_UNKNOWN, null, null, null, Collections.emptyList()).getAnalysisResult();
+			} else {
+				return blackboard.newDataArtifact(type, obj_id, data_source_obj_id, Collections.emptyList(), null);
 			}
-		} catch (SQLException ex) {
+		} catch (BlackboardException ex) {
 			throw new TskCoreException("Error creating a blackboard artifact", ex);
-		} finally {
-			releaseSingleUserCaseWriteLock();
 		}
 	}
 
@@ -5430,7 +5333,7 @@ boolean getContentHasChildren(Content content) throws TskCoreException {
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			// SELECT COUNT(obj_id) AS count FROM tsk_objects WHERE par_obj_id = ?
 			PreparedStatement statement = connection.getPreparedStatement(PREPARED_STATEMENT.COUNT_CHILD_OBJECTS_BY_PARENT);
 			statement.clearParameters();
@@ -5473,7 +5376,7 @@ int getContentChildrenCount(Content content) throws TskCoreException {
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			// SELECT COUNT(obj_id) AS count FROM tsk_objects WHERE par_obj_id = ?
 			PreparedStatement statement = connection.getPreparedStatement(PREPARED_STATEMENT.COUNT_CHILD_OBJECTS_BY_PARENT);
 			statement.clearParameters();
@@ -5510,7 +5413,7 @@ List<Content> getAbstractFileChildren(Content parent, TSK_DB_FILES_TYPE_ENUM typ
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			PreparedStatement statement = connection.getPreparedStatement(PREPARED_STATEMENT.SELECT_FILES_BY_PARENT_AND_TYPE);
 			statement.clearParameters();
 			long parentId = parent.getId();
@@ -5526,7 +5429,7 @@ List<Content> getAbstractFileChildren(Content parent, TSK_DB_FILES_TYPE_ENUM typ
 			releaseSingleUserCaseReadLock();
 		}
 	}
-	
+
 	/**
 	 * Returns the list of all AbstractFile Children for a given
 	 * AbstractFileParent
@@ -5542,7 +5445,7 @@ List<Content> getAbstractFileChildren(Content parent) throws TskCoreException {
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			PreparedStatement statement = connection.getPreparedStatement(PREPARED_STATEMENT.SELECT_FILES_BY_PARENT);
 			statement.clearParameters();
 			long parentId = parent.getId();
@@ -5575,7 +5478,7 @@ List<Long> getAbstractFileChildrenIds(Content parent, TSK_DB_FILES_TYPE_ENUM typ
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			PreparedStatement statement = connection.getPreparedStatement(PREPARED_STATEMENT.SELECT_FILE_IDS_BY_PARENT_AND_TYPE);
 			statement.clearParameters();
 			statement.setLong(1, parent.getId());
@@ -5610,7 +5513,7 @@ List<Long> getAbstractFileChildrenIds(Content parent) throws TskCoreException {
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			PreparedStatement statement = connection.getPreparedStatement(PREPARED_STATEMENT.SELECT_FILE_IDS_BY_PARENT);
 			statement.clearParameters();
 			statement.setLong(1, parent.getId());
@@ -5645,7 +5548,7 @@ List<Long> getBlackboardArtifactChildrenIds(Content parent) throws TskCoreExcept
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			PreparedStatement statement = connection.getPreparedStatement(PREPARED_STATEMENT.SELECT_ARTIFACT_OBJECTIDS_BY_PARENT);
 			statement.clearParameters();
 			statement.setLong(1, parent.getId());
@@ -5674,12 +5577,10 @@ List<Long> getBlackboardArtifactChildrenIds(Content parent) throws TskCoreExcept
 	 * @throws TskCoreException
 	 */
 	List<Content> getBlackboardArtifactChildren(Content parent) throws TskCoreException {
-
 		long parentId = parent.getId();
-		ArrayList<BlackboardArtifact> artsArray = getArtifactsHelper("blackboard_artifacts.obj_id = " + parentId + ";");
-
-		List<Content> lc = new ArrayList<Content>();
-		lc.addAll(artsArray);
+		List<Content> lc = new ArrayList<>();
+		lc.addAll(blackboard.getAnalysisResults(parentId));
+		lc.addAll(blackboard.getDataArtifactsBySource(parentId));
 		return lc;
 	}
 
@@ -5819,7 +5720,7 @@ public Content getContentById(long id) throws TskCoreException {
 
 		long parentId;
 		TskData.ObjectType type;
-		
+
 		CaseDbConnection connection = null;
 		Statement s = null;
 		ResultSet rs = null;
@@ -5908,7 +5809,7 @@ String getFilePath(long id) {
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			PreparedStatement statement = connection.getPreparedStatement(PREPARED_STATEMENT.SELECT_LOCAL_PATH_FOR_FILE);
 			statement.clearParameters();
 			statement.setLong(1, id);
@@ -6027,7 +5928,7 @@ String getFileName(long objectId, CaseDbConnection connection) {
 	 *                          method could not be queried
 	 */
 	DerivedFile.DerivedMethod getDerivedMethod(long id) throws TskCoreException {
-		
+
 		DerivedFile.DerivedMethod method = null;
 		CaseDbConnection connection = null;
 		ResultSet rs1 = null;
@@ -6035,7 +5936,7 @@ DerivedFile.DerivedMethod getDerivedMethod(long id) throws TskCoreException {
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			PreparedStatement statement = connection.getPreparedStatement(PREPARED_STATEMENT.SELECT_DERIVED_FILE);
 			statement.clearParameters();
 			statement.setLong(1, id);
@@ -6136,7 +6037,7 @@ public BlackboardArtifact getArtifactById(long id) throws TskCoreException {
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			// get the artifact type.
 			PreparedStatement statement = connection.getPreparedStatement(PREPARED_STATEMENT.SELECT_ARTIFACT_TYPE_BY_ARTIFACT_OBJ_ID);
 			statement.clearParameters();
@@ -6148,7 +6049,7 @@ public BlackboardArtifact getArtifactById(long id) throws TskCoreException {
 			}
 
 			// based on the artifact type category, get the analysis result or the data artifact
-			BlackboardArtifact.Type artifactType = getArtifactType(rs.getInt("artifact_type_id"));
+			BlackboardArtifact.Type artifactType = blackboard.getArtifactType(rs.getInt("artifact_type_id"));
 			switch (artifactType.getCategory()) {
 				case ANALYSIS_RESULT:
 					return blackboard.getAnalysisResultById(id);
@@ -6176,29 +6077,32 @@ public BlackboardArtifact getArtifactById(long id) throws TskCoreException {
 	 *
 	 * @throws TskCoreException thrown if critical error occurred within tsk
 	 *                          core and file could not be queried
+	 *
+	 * @deprecated	Use the type specific methods in Blackboard
+	 * getAnalysisResultsById and getDataArtifactById
 	 */
+	@Deprecated
 	public BlackboardArtifact getArtifactByArtifactId(long id) throws TskCoreException {
-		CaseDbConnection connection = null;
-		ResultSet rs = null;
+		String query = "SELECT artifact_type_id, artifact_obj_id FROM blackboard_artifacts WHERE artifact_id = " + id;
 		acquireSingleUserCaseReadLock();
-		try {
-			connection = connections.getConnection();
-			
-			PreparedStatement statement = connection.getPreparedStatement(PREPARED_STATEMENT.SELECT_ARTIFACT_BY_ARTIFACT_ID);
-			statement.clearParameters();
-			statement.setLong(1, id);
-			rs = connection.executeQuery(statement);
-			List<BlackboardArtifact> artifacts = resultSetToArtifacts(rs);
-			if (artifacts.size() > 0) {
-				return artifacts.get(0);
-			} else {
-				return null;
+
+		try (CaseDbConnection connection = connections.getConnection();
+				Statement statement = connection.createStatement();
+				ResultSet resultSet = statement.executeQuery(query);) {
+			if (resultSet != null && resultSet.next()) {
+				BlackboardArtifact.Type artifactType = blackboard.getArtifactType(resultSet.getInt("artifact_type_id"));
+				long artifactObjId = resultSet.getLong("artifact_obj_id");
+				switch (artifactType.getCategory()) {
+					case ANALYSIS_RESULT:
+						return blackboard.getAnalysisResultById(artifactObjId);
+					case DATA_ARTIFACT:
+						return blackboard.getDataArtifactById(artifactObjId);
+				}
 			}
+			return null;
 		} catch (SQLException ex) {
 			throw new TskCoreException("Error getting artifacts by artifact id, artifact id = " + id, ex);
 		} finally {
-			closeResultSet(rs);
-			closeConnection(connection);
 			releaseSingleUserCaseReadLock();
 		}
 	}
@@ -6271,13 +6175,14 @@ public boolean isFileFromSource(Content dataSource, long fileId) throws TskCoreE
 			releaseSingleUserCaseReadLock();
 		}
 	}
-	
+
 	/**
-	 * Returns true if the string contains a SQL LIKE statement wild card based 
-	 * on https://www.postgresql.org/docs/9.5/functions-matching.html and 
+	 * Returns true if the string contains a SQL LIKE statement wild card based
+	 * on https://www.postgresql.org/docs/9.5/functions-matching.html and
 	 * https://sqlite.org/lang_expr.html#the_like_glob_regexp_and_match_operators.
-	 * 
+	 *
 	 * @param str The string.
+	 *
 	 * @return True if it contains a LIKE wild card.
 	 */
 	private static boolean containsLikeWildcard(String str) {
@@ -6302,16 +6207,16 @@ private static boolean containsLikeWildcard(String str) {
 	public List<AbstractFile> findFiles(Content dataSource, String fileName) throws TskCoreException {
 		String ext = "";
 		if (!containsLikeWildcard(fileName)) {
-			ext = SleuthkitCase.extractExtension(fileName);	
+			ext = SleuthkitCase.extractExtension(fileName);
 		}
-		
+
 		List<AbstractFile> files = new ArrayList<>();
 		CaseDbConnection connection = null;
 		ResultSet resultSet = null;
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			PreparedStatement statement;
 			if (ext.isEmpty()) {
 				statement = connection.getPreparedStatement(PREPARED_STATEMENT.SELECT_FILES_BY_DATA_SOURCE_AND_NAME);
@@ -6354,9 +6259,9 @@ public List<AbstractFile> findFiles(Content dataSource, String fileName) throws
 	public List<AbstractFile> findFiles(Content dataSource, String fileName, String dirSubString) throws TskCoreException {
 		String ext = "";
 		if (!containsLikeWildcard(fileName)) {
-			ext = SleuthkitCase.extractExtension(fileName);	
+			ext = SleuthkitCase.extractExtension(fileName);
 		}
-		
+
 		List<AbstractFile> files = new ArrayList<>();
 		CaseDbConnection connection = null;
 		ResultSet resultSet = null;
@@ -6378,7 +6283,7 @@ public List<AbstractFile> findFiles(Content dataSource, String fileName, String
 				statement.setString(3, "%" + dirSubString.toLowerCase() + "%"); //NON-NLS
 				statement.setLong(4, dataSource.getId());
 			}
-			
+
 			resultSet = connection.executeQuery(statement);
 			files.addAll(resultSetToAbstractFiles(resultSet, connection));
 		} catch (SQLException e) {
@@ -6493,7 +6398,11 @@ public VirtualDirectory addVirtualDirectory(long parentId, String directoryName,
 			Content parent = this.getAbstractFileById(parentId, connection);
 			if (parent instanceof AbstractFile) {
 				if (isRootDirectory((AbstractFile) parent, transaction)) {
-					parentPath = "/";
+					if (parent.getName().isEmpty()) {
+						parentPath = "/";
+					} else {
+						parentPath = "/" + parent.getName() + "/";
+					}
 				} else {
 					parentPath = ((AbstractFile) parent).getParentPath() + parent.getName() + "/"; //NON-NLS
 				}
@@ -6507,19 +6416,21 @@ public VirtualDirectory addVirtualDirectory(long parentId, String directoryName,
 
 			// Insert a row for the virtual directory into the tsk_files table.
 			// INSERT INTO tsk_files (obj_id, fs_obj_id, name, type, has_path, dir_type, meta_type,
-			// dir_flags, meta_flags, size, ctime, crtime, atime, mtime, md5, known, mime_type, parent_path, data_source_obj_id,extension,owner_uid, os_account_obj_id)
-			// VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,?,?,?)
+			// dir_flags, meta_flags, size, ctime, crtime, atime, mtime, md5, sha256, sha1, known, mime_type, parent_path, data_source_obj_id,extension,owner_uid, os_account_obj_id)
+			// VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,?,?,?,?)
 			PreparedStatement statement = connection.getPreparedStatement(PREPARED_STATEMENT.INSERT_FILE);
 			statement.clearParameters();
 			statement.setLong(1, newObjId);
 
 			// If the parent is part of a file system, grab its file system ID
+			Long fileSystemObjectId = null;
 			if (0 != parentId) {
-				long parentFs = this.getFileSystemId(parentId, connection);
-				if (parentFs != -1) {
-					statement.setLong(2, parentFs);
+				fileSystemObjectId = this.getFileSystemId(parentId, connection);
+				if (fileSystemObjectId != -1) {
+					statement.setLong(2, fileSystemObjectId);
 				} else {
 					statement.setNull(2, java.sql.Types.BIGINT);
+					fileSystemObjectId = null;
 				}
 			} else {
 				statement.setNull(2, java.sql.Types.BIGINT);
@@ -6556,11 +6467,13 @@ public VirtualDirectory addVirtualDirectory(long parentId, String directoryName,
 
 			statement.setNull(15, java.sql.Types.VARCHAR); // MD5
 			statement.setNull(16, java.sql.Types.VARCHAR); // SHA-256
-			statement.setByte(17, FileKnown.UNKNOWN.getFileKnownValue()); // Known
-			statement.setNull(18, java.sql.Types.VARCHAR); // MIME type	
+			statement.setNull(17, java.sql.Types.VARCHAR); // SHA-1
+			
+			statement.setByte(18, FileKnown.UNKNOWN.getFileKnownValue()); // Known
+			statement.setNull(19, java.sql.Types.VARCHAR); // MIME type	
 
 			// parent path
-			statement.setString(19, parentPath);
+			statement.setString(20, parentPath);
 
 			// data source object id (same as object id if this is a data source)
 			long dataSourceObjectId;
@@ -6569,18 +6482,19 @@ public VirtualDirectory addVirtualDirectory(long parentId, String directoryName,
 			} else {
 				dataSourceObjectId = getDataSourceObjectId(connection, parentId);
 			}
-			statement.setLong(20, dataSourceObjectId);
+			statement.setLong(21, dataSourceObjectId);
 
 			//extension, since this is not really file we just set it to null
-			statement.setString(21, null);
-
-			statement.setString(22, OsAccount.NO_OWNER_ID); // ownerUid
-			statement.setNull(23, java.sql.Types.BIGINT); // osAccountObjId
+			statement.setString(22, null);
 
+			statement.setString(23, OsAccount.NO_OWNER_ID); // ownerUid
+			statement.setNull(24, java.sql.Types.BIGINT); // osAccountObjId
+			statement.setLong(25, TskData.CollectedStatus.UNKNOWN.getType()); // collected
+			
 			connection.executeUpdate(statement);
 
-			return new VirtualDirectory(this, newObjId, dataSourceObjectId, directoryName, dirType,
-					metaType, dirFlag, metaFlags, null, null, FileKnown.UNKNOWN,
+			return new VirtualDirectory(this, newObjId, dataSourceObjectId, fileSystemObjectId, directoryName, dirType,
+					metaType, dirFlag, metaFlags, null, null, null, FileKnown.UNKNOWN,
 					parentPath);
 		} catch (SQLException e) {
 			throw new TskCoreException("Error creating virtual directory '" + directoryName + "'", e);
@@ -6656,8 +6570,8 @@ public LocalDirectory addLocalDirectory(long parentId, String directoryName, Cas
 
 			// Insert a row for the local directory into the tsk_files table.
 			// INSERT INTO tsk_files (obj_id, fs_obj_id, name, type, has_path, dir_type, meta_type,
-			// dir_flags, meta_flags, size, ctime, crtime, atime, mtime, md5, sha256, known, mime_type, parent_path, data_source_obj_id, extension, owner_uid, os_account_obj_id)
-			// VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
+			// dir_flags, meta_flags, size, ctime, crtime, atime, mtime, md5, sha256, sha1, known, mime_type, parent_path, data_source_obj_id, extension, owner_uid, os_account_obj_id)
+			// VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
 			PreparedStatement statement = connection.getPreparedStatement(PREPARED_STATEMENT.INSERT_FILE);
 			statement.clearParameters();
 			statement.setLong(1, newObjId);
@@ -6696,26 +6610,29 @@ public LocalDirectory addLocalDirectory(long parentId, String directoryName, Cas
 
 			statement.setNull(15, java.sql.Types.VARCHAR); // MD5
 			statement.setNull(16, java.sql.Types.VARCHAR); // SHA-256
-			statement.setByte(17, FileKnown.UNKNOWN.getFileKnownValue()); // Known
-			statement.setNull(18, java.sql.Types.VARCHAR); // MIME type			
+			statement.setNull(17, java.sql.Types.VARCHAR); // SHA-1
+						
+			statement.setByte(18, FileKnown.UNKNOWN.getFileKnownValue()); // Known
+			statement.setNull(19, java.sql.Types.VARCHAR); // MIME type			
 
 			// parent path
-			statement.setString(19, parentPath);
+			statement.setString(20, parentPath);
 
 			// data source object id
 			long dataSourceObjectId = getDataSourceObjectId(connection, parentId);
-			statement.setLong(20, dataSourceObjectId);
+			statement.setLong(21, dataSourceObjectId);
 
 			//extension, since this is a directory we just set it to null
-			statement.setString(21, null);
+			statement.setString(22, null);
 
-			statement.setString(22, OsAccount.NO_OWNER_ID); // ownerUid
-			statement.setNull(23, java.sql.Types.BIGINT); // osAccountObjId
+			statement.setString(23, OsAccount.NO_OWNER_ID); // ownerUid
+			statement.setNull(24, java.sql.Types.BIGINT); // osAccountObjId
+			statement.setLong(25, TskData.CollectedStatus.UNKNOWN.getType()); // collected
 
 			connection.executeUpdate(statement);
 
 			return new LocalDirectory(this, newObjId, dataSourceObjectId, directoryName, dirType,
-					metaType, dirFlag, metaFlags, null, null, FileKnown.UNKNOWN,
+					metaType, dirFlag, metaFlags, null, null, null, FileKnown.UNKNOWN,
 					parentPath);
 		} catch (SQLException e) {
 			throw new TskCoreException("Error creating local directory '" + directoryName + "'", e);
@@ -6793,8 +6710,8 @@ public LocalFilesDataSource addLocalFilesDataSource(String deviceId, String root
 			// its own object id.
 			// INSERT INTO tsk_files (obj_id, fs_obj_id, name, type, has_path,
 			// dir_type, meta_type, dir_flags, meta_flags, size, ctime, crtime,
-			// atime, mtime, md5, known, mime_type, parent_path, data_source_obj_id, extension, owner_uid, os_account_obj_id)
-			// VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,?, ?, ?)
+			// atime, mtime, md5, sha256, sha1, known, mime_type, parent_path, data_source_obj_id, extension, owner_uid, os_account_obj_id)
+			// VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,?, ?, ?, ?)
 			PreparedStatement preparedStatement = connection.getPreparedStatement(PREPARED_STATEMENT.INSERT_FILE);
 			preparedStatement.clearParameters();
 			preparedStatement.setLong(1, newObjId);
@@ -6818,17 +6735,21 @@ public LocalFilesDataSource addLocalFilesDataSource(String deviceId, String root
 			preparedStatement.setNull(14, java.sql.Types.BIGINT);
 			preparedStatement.setNull(15, java.sql.Types.VARCHAR); // MD5
 			preparedStatement.setNull(16, java.sql.Types.VARCHAR); // SHA-256
-			preparedStatement.setByte(17, FileKnown.UNKNOWN.getFileKnownValue()); // Known
-			preparedStatement.setNull(18, java.sql.Types.VARCHAR); // MIME type	
+			preparedStatement.setNull(17, java.sql.Types.VARCHAR); // SHA-1
+			preparedStatement.setByte(18, FileKnown.UNKNOWN.getFileKnownValue()); // Known
+			preparedStatement.setNull(19, java.sql.Types.VARCHAR); // MIME type	
 			String parentPath = "/"; //NON-NLS
-			preparedStatement.setString(19, parentPath);
-			preparedStatement.setLong(20, newObjId);
-			preparedStatement.setString(21, null); //extension, just set it to null
-			preparedStatement.setString(22, OsAccount.NO_OWNER_ID); // ownerUid
-			preparedStatement.setNull(23, java.sql.Types.BIGINT); // osAccountObjId
+			preparedStatement.setString(20, parentPath);
+			preparedStatement.setLong(21, newObjId);
+			preparedStatement.setString(22, null); //extension, just set it to null
+			preparedStatement.setString(23, OsAccount.NO_OWNER_ID); // ownerUid
+			preparedStatement.setNull(24, java.sql.Types.BIGINT); // osAccountObjId
+			preparedStatement.setLong(25, TskData.CollectedStatus.UNKNOWN.getType()); // collected
+			
+			
 			connection.executeUpdate(preparedStatement);
 
-			return new LocalFilesDataSource(this, newObjId, newObjId, deviceId, rootDirectoryName, dirType, metaType, dirFlag, metaFlags, timeZone, null, null, FileKnown.UNKNOWN, parentPath);
+			return new LocalFilesDataSource(this, newObjId, newObjId, deviceId, rootDirectoryName, dirType, metaType, dirFlag, metaFlags, timeZone, null, null, null, FileKnown.UNKNOWN, parentPath);
 
 		} catch (SQLException ex) {
 			throw new TskCoreException(String.format("Error creating local files data source with device id %s and directory name %s", deviceId, rootDirectoryName), ex);
@@ -7237,7 +7158,135 @@ public FsContent addFileSystemFile(long dataSourceObjId, long fsObjId,
 			boolean isFile, Content parent, String ownerUid,
 			OsAccount osAccount, List<Attribute> fileAttributes,
 			CaseDbTransaction transaction) throws TskCoreException {
-
+		
+		return addFileSystemFile(dataSourceObjId, fsObjId,
+				fileName,
+				metaAddr, metaSeq,
+				attrType, attrId,
+				dirFlag, metaFlags, size,
+				ctime, crtime, atime, mtime,
+				md5Hash, sha256Hash, null,
+				mimeType,
+				isFile, parent, ownerUid,
+				osAccount, fileAttributes,
+				transaction);
+	}
+	
+	/**
+	 * Add a file system file.
+	 *
+	 * @param dataSourceObjId The object id of the root data source of this
+	 *                        file.
+	 * @param fsObjId         The file system object id.
+	 * @param fileName        The name of the file.
+	 * @param metaAddr        The meta address of the file.
+	 * @param metaSeq         The meta address sequence of the file.
+	 * @param attrType        The attributed type of the file.
+	 * @param attrId          The attribute id.
+	 * @param dirFlag         The allocated status from the name structure
+	 * @param metaFlags       The allocated status of the file, usually as
+	 *                        reported in the metadata structure of the file
+	 *                        system.
+	 * @param size            The size of the file in bytes.
+	 * @param ctime           The changed time of the file.
+	 * @param crtime          The creation time of the file.
+	 * @param atime           The accessed time of the file
+	 * @param mtime           The modified time of the file.
+	 * @param md5Hash         The MD5 hash of the file
+	 * @param sha256Hash      The SHA256 hash of the file
+	 * @param sha1Hash        SHA1 Hash of the file. May be null.
+	 * @param mimeType        The MIME type of the file
+	 * @param isFile          True, unless the file is a directory.
+	 * @param parent          The parent of the file (e.g., a virtual
+	 *                        directory).
+	 * @param ownerUid        UID of the file owner as found in the file system,
+	 *                        can be null.
+	 * @param osAccount       OS account of owner, may be null.
+	 * @param fileAttributes  A list of file attributes. May be empty.
+	 
+	 * @param transaction     A caller-managed transaction within which the add
+	 *                        file operations are performed.
+	 *
+	 * @return Newly created file
+	 *
+	 * @throws TskCoreException
+	 */
+	public FsContent addFileSystemFile(long dataSourceObjId, long fsObjId,
+			String fileName,
+			long metaAddr, int metaSeq,
+			TSK_FS_ATTR_TYPE_ENUM attrType, int attrId,
+			TSK_FS_NAME_FLAG_ENUM dirFlag, short metaFlags, long size,
+			long ctime, long crtime, long atime, long mtime,
+			String md5Hash, String sha256Hash, String sha1Hash,
+			String mimeType, boolean isFile,
+			Content parent, String ownerUid,
+			OsAccount osAccount, List<Attribute> fileAttributes, 
+			CaseDbTransaction transaction) throws TskCoreException {
+		return addFileSystemFile(dataSourceObjId, fsObjId,
+				fileName,
+				metaAddr, metaSeq,
+				attrType, attrId,
+				dirFlag, metaFlags, size,
+				ctime, crtime, atime, mtime,
+				md5Hash, sha256Hash, sha1Hash,
+				mimeType,
+				isFile, parent, ownerUid,
+				osAccount, TskData.CollectedStatus.UNKNOWN, fileAttributes,
+				transaction);
+	}
+	
+	/**
+	 * Add a file system file.
+	 *
+	 * @param dataSourceObjId The object id of the root data source of this
+	 *                        file.
+	 * @param fsObjId         The file system object id.
+	 * @param fileName        The name of the file.
+	 * @param metaAddr        The meta address of the file.
+	 * @param metaSeq         The meta address sequence of the file.
+	 * @param attrType        The attributed type of the file.
+	 * @param attrId          The attribute id.
+	 * @param dirFlag         The allocated status from the name structure
+	 * @param metaFlags       The allocated status of the file, usually as
+	 *                        reported in the metadata structure of the file
+	 *                        system.
+	 * @param size            The size of the file in bytes.
+	 * @param ctime           The changed time of the file.
+	 * @param crtime          The creation time of the file.
+	 * @param atime           The accessed time of the file
+	 * @param mtime           The modified time of the file.
+	 * @param md5Hash         The MD5 hash of the file
+	 * @param sha256Hash      The SHA256 hash of the file
+	 * @param sha1Hash        SHA1 Hash of the file. May be null.
+	 * @param mimeType        The MIME type of the file
+	 * @param isFile          True, unless the file is a directory.
+	 * @param parent          The parent of the file (e.g., a virtual
+	 *                        directory).
+	 * @param ownerUid        UID of the file owner as found in the file system,
+	 *                        can be null.
+	 * @param osAccount       OS account of owner, may be null.
+	 * @param collected       Collected status for file content, may be null
+	 * @param fileAttributes  A list of file attributes. May be empty.
+	 
+	 * @param transaction     A caller-managed transaction within which the add
+	 *                        file operations are performed.
+	 *
+	 * @return Newly created file
+	 *
+	 * @throws TskCoreException
+	 */
+	public FsContent addFileSystemFile(long dataSourceObjId, long fsObjId,
+			String fileName,
+			long metaAddr, int metaSeq,
+			TSK_FS_ATTR_TYPE_ENUM attrType, int attrId,
+			TSK_FS_NAME_FLAG_ENUM dirFlag, short metaFlags, long size,
+			long ctime, long crtime, long atime, long mtime,
+			String md5Hash, String sha256Hash, String sha1Hash,
+			String mimeType, boolean isFile,
+			Content parent, String ownerUid,
+			OsAccount osAccount, TskData.CollectedStatus collected,
+			List<Attribute> fileAttributes, 
+			CaseDbTransaction transaction) throws TskCoreException {
 		TimelineManager timelineManager = getTimelineManager();
 
 		Statement queryStatement = null;
@@ -7285,24 +7334,28 @@ public FsContent addFileSystemFile(long dataSourceObjId, long fsObjId,
 			statement.setLong(19, mtime);
 			statement.setString(20, md5Hash);
 			statement.setString(21, sha256Hash);
-			statement.setString(22, mimeType);
-			statement.setString(23, parentPath);
+			statement.setString(22, sha1Hash);
+			statement.setString(23, mimeType);
+			statement.setString(24, parentPath);
 			final String extension = extractExtension(fileName);
-			statement.setString(24, extension);
-			statement.setString(25, ownerUid);
+			statement.setString(25, extension);
+			statement.setString(26, ownerUid);
 			if (null != osAccount) {
-				statement.setLong(26, osAccount.getId());
+				statement.setLong(27, osAccount.getId());
 			} else {
-				statement.setNull(26, java.sql.Types.BIGINT); // osAccountObjId
+				statement.setNull(27, java.sql.Types.BIGINT); // osAccountObjId
 			}
-
+			statement.setLong(28, collected.getType());
+			
 			connection.executeUpdate(statement);
 
 			Long osAccountId = (osAccount != null) ? osAccount.getId() : null;
-			DerivedFile derivedFile = new DerivedFile(this, objectId, dataSourceObjId, fileName, dirType, metaType, dirFlag, metaFlags,
-					size, ctime, crtime, atime, mtime, md5Hash, sha256Hash, null, parentPath, null, parent.getId(), mimeType, null, extension, ownerUid, osAccountId);
+			DerivedFile derivedFile = new DerivedFile(this, objectId, dataSourceObjId, fsObjId, fileName, dirType, metaType, dirFlag, metaFlags,
+					size, ctime, crtime, atime, mtime, md5Hash, sha256Hash, sha1Hash, null, parentPath, null, parent.getId(), mimeType, null, extension, ownerUid, osAccountId);
 
-			timelineManager.addEventsForNewFile(derivedFile, connection);
+			if (!timelineEventsDisabled.get()) {
+				timelineManager.addEventsForNewFile(derivedFile, connection);
+			}
 
 			for (Attribute fileAttribute : fileAttributes) {
 				fileAttribute.setAttributeParentId(objectId);
@@ -7311,15 +7364,15 @@ public FsContent addFileSystemFile(long dataSourceObjId, long fsObjId,
 			}
 
 			if (osAccount != null) {
-				osAccountManager.newOsAccountInstance(osAccount.getId(), dataSourceObjId, OsAccountInstance.OsAccountInstanceType.LAUNCHED, connection);
+				osAccountManager.newOsAccountInstance(osAccount.getId(), dataSourceObjId, OsAccountInstance.OsAccountInstanceType.ACCESSED, connection);
 			}
 
 			return new org.sleuthkit.datamodel.File(this, objectId, dataSourceObjId, fsObjId,
 					attrType, attrId, fileName, metaAddr, metaSeq,
 					dirType, metaType, dirFlag, metaFlags,
 					size, ctime, crtime, atime, mtime,
-					(short) 0, 0, 0, md5Hash, sha256Hash, null, parentPath, mimeType,
-					extension, ownerUid, osAccountId, fileAttributes);
+					(short) 0, 0, 0, md5Hash, sha256Hash, sha1Hash, null, parentPath, mimeType,
+					extension, ownerUid, osAccountId, collected, fileAttributes);
 
 		} catch (SQLException ex) {
 			throw new TskCoreException(String.format("Failed to INSERT file system file %s (%s) with parent id %d in tsk_files table", fileName, parentPath, parent.getId()), ex);
@@ -7385,6 +7438,13 @@ public final List<LayoutFile> addLayoutFiles(Content parent, List<TskFileRange>
 		if (null == parent) {
 			throw new TskCoreException("Conent is null");
 		}
+		
+		String parentPath;
+		if (parent instanceof AbstractFile) {
+			parentPath = ((AbstractFile) parent).getParentPath() + parent.getName() + '/'; //NON-NLS
+		} else {
+			parentPath = "/";
+		}
 
 		CaseDbTransaction transaction = null;
 		Statement statement = null;
@@ -7393,8 +7453,19 @@ public final List<LayoutFile> addLayoutFiles(Content parent, List<TskFileRange>
 		try {
 			transaction = beginTransaction();
 			CaseDbConnection connection = transaction.getConnection();
+			
+			// If the parent is part of a file system, grab its file system ID
+			Long fileSystemObjectId;
+			if (0 != parent.getId()) {
+				fileSystemObjectId = this.getFileSystemId(parent.getId(), connection);
+				if (fileSystemObjectId == -1) {
+					fileSystemObjectId = null;
+				}
+			} else {
+				fileSystemObjectId = null;
+			}
 
-			List<LayoutFile> fileRangeLayoutFiles = new ArrayList<LayoutFile>();
+			List<LayoutFile> fileRangeLayoutFiles = new ArrayList<>();
 			for (TskFileRange fileRange : fileRanges) {
 				/*
 				 * Insert a row for the Tsk file range into the tsk_objects
@@ -7407,15 +7478,19 @@ public final List<LayoutFile> addLayoutFiles(Content parent, List<TskFileRange>
 				 * Insert a row for the Tsk file range into the tsk_files table:
 				 * INSERT INTO tsk_files (obj_id, fs_obj_id, name, type,
 				 * has_path, dir_type, meta_type, dir_flags, meta_flags, size,
-				 * ctime, crtime, atime, mtime, md5, known, mime_type,
+				 * ctime, crtime, atime, mtime, md5, sha256, sha1, known, mime_type,
 				 * parent_path, data_source_obj_id,extension, owner_uid,
 				 * os_account_obj_id) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,
-				 * ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,?, ?, ?)
+				 * ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,?, ?, ?, ?)
 				 */
 				PreparedStatement prepStmt = connection.getPreparedStatement(PREPARED_STATEMENT.INSERT_FILE);
 				prepStmt.clearParameters();
-				prepStmt.setLong(1, fileRangeId); // obj_id	from tsk_objects			
-				prepStmt.setNull(2, java.sql.Types.BIGINT); // fs_obj_id				
+				prepStmt.setLong(1, fileRangeId); // obj_id	from tsk_objects
+				if (fileSystemObjectId != null) {
+					prepStmt.setLong(2, fileSystemObjectId);// fs_obj_id
+				} else {
+					prepStmt.setNull(2, java.sql.Types.BIGINT); 	
+				}
 				prepStmt.setString(3, "Unalloc_" + parent.getId() + "_" + fileRange.getByteStart() + "_" + end_byte_in_parent); // name of form Unalloc_[image obj_id]_[start byte in parent]_[end byte in parent]
 				prepStmt.setShort(4, TSK_DB_FILES_TYPE_ENUM.UNALLOC_BLOCKS.getFileType()); // type
 				prepStmt.setNull(5, java.sql.Types.BIGINT); // has_path
@@ -7430,17 +7505,20 @@ public final List<LayoutFile> addLayoutFiles(Content parent, List<TskFileRange>
 				prepStmt.setNull(14, java.sql.Types.BIGINT); // mtime
 				prepStmt.setNull(15, java.sql.Types.VARCHAR); // MD5
 				prepStmt.setNull(16, java.sql.Types.VARCHAR); // SHA-256
-				prepStmt.setByte(17, FileKnown.UNKNOWN.getFileKnownValue()); // Known
-				prepStmt.setNull(18, java.sql.Types.VARCHAR); // MIME type
-				prepStmt.setNull(19, java.sql.Types.VARCHAR); // parent path
-				prepStmt.setLong(20, parent.getId()); // data_source_obj_id
+				prepStmt.setNull(17, java.sql.Types.VARCHAR); // SHA-1
+				
+				prepStmt.setByte(18, FileKnown.UNKNOWN.getFileKnownValue()); // Known
+				prepStmt.setNull(19, java.sql.Types.VARCHAR); // MIME type
+				prepStmt.setString(20, parentPath); // parent path
+				prepStmt.setLong(21, parent.getId()); // data_source_obj_id
 
 				//extension, since this is not a FS file we just set it to null
-				prepStmt.setString(21, null);
-
-				prepStmt.setString(22, OsAccount.NO_OWNER_ID); // ownerUid
-				prepStmt.setNull(23, java.sql.Types.BIGINT); // osAccountObjId
+				prepStmt.setString(22, null);
 
+				prepStmt.setString(23, OsAccount.NO_OWNER_ID); // ownerUid
+				prepStmt.setNull(24, java.sql.Types.BIGINT); // osAccountObjId
+				prepStmt.setLong(25, TskData.CollectedStatus.UNKNOWN.getType()); // collected
+				
 				connection.executeUpdate(prepStmt);
 
 				/*
@@ -7462,6 +7540,7 @@ public final List<LayoutFile> addLayoutFiles(Content parent, List<TskFileRange>
 				fileRangeLayoutFiles.add(new LayoutFile(this,
 						fileRangeId,
 						parent.getId(),
+						fileSystemObjectId,
 						Long.toString(fileRange.getSequence()),
 						TSK_DB_FILES_TYPE_ENUM.UNALLOC_BLOCKS,
 						TSK_FS_NAME_TYPE_ENUM.REG,
@@ -7470,7 +7549,7 @@ public final List<LayoutFile> addLayoutFiles(Content parent, List<TskFileRange>
 						TSK_FS_META_FLAG_ENUM.UNALLOC.getValue(),
 						fileRange.getByteLen(),
 						0L, 0L, 0L, 0L,
-						null, null,
+						null, null, null,
 						FileKnown.UNKNOWN,
 						parent.getUniquePath(),
 						null,
@@ -7498,6 +7577,114 @@ public final List<LayoutFile> addLayoutFiles(Content parent, List<TskFileRange>
 		}
 	}
 
+	/**
+	 * Utility class to hold the current subfolder being used for carved files
+	 * and a count of how many files are in the folder. Note that this count
+	 * will not be accurate if multiple nodes are writing carved files to the
+	 * same folder at once.
+	 */
+	private class CarvedFileDirInfo {
+
+		final VirtualDirectory currentFolder;
+		AtomicInteger count;
+
+		CarvedFileDirInfo(VirtualDirectory currentFolder) {
+			this.currentFolder = currentFolder;
+			count = new AtomicInteger(0);
+		}
+
+		CarvedFileDirInfo(VirtualDirectory currentFolder, int count) {
+			this.currentFolder = currentFolder;
+			this.count = new AtomicInteger(count);
+		}
+
+		/**
+		 * Check if the folder is "full" and we should start a new subfolder.
+		 *
+		 * @return True if the maximum number of files have been written to the
+		 *         folder, false otherwise.
+		 */
+		boolean isFull() {
+			return count.get() >= MAX_CARVED_FILES_PER_FOLDER;
+		}
+
+		/**
+		 * Increment the file counter.
+		 */
+		void incrementFileCounter() {
+			count.incrementAndGet();
+		}
+	}
+
+	/**
+	 * Find the newest subfolder of $CarvedFiles and load its data.
+	 *
+	 * @param carvedFilesBaseDir The $CarvedFiles directory
+	 *
+	 * @return The subfolder of $CarvedFiles with the highest object ID.
+	 *
+	 * @throws TskCoreException
+	 */
+	private CarvedFileDirInfo getMostRecentCarvedDirInfo(VirtualDirectory carvedFilesBaseDir) throws TskCoreException {
+		VirtualDirectory mostRecentDir = null;
+		for (Content child : carvedFilesBaseDir.getChildren()) {
+			if (isValidCarvedFileSubfolder(child)) {
+				if (mostRecentDir == null
+						|| (mostRecentDir.getId() < child.getId())) {
+					mostRecentDir = (VirtualDirectory) child;
+				}
+			}
+		}
+
+		if (mostRecentDir != null) {
+			return new CarvedFileDirInfo(mostRecentDir, mostRecentDir.getChildrenCount());
+		}
+		return null;
+	}
+
+	/**
+	 * Check if the name of the folder matches the expected pattern for a
+	 * subfolder and is a virtual directory.
+	 *
+	 * @param subfolder The subfolder to test.
+	 *
+	 * @return true if the format appears valid, false otherwise.
+	 */
+	private boolean isValidCarvedFileSubfolder(Content subfolder) {
+		if (!(subfolder instanceof VirtualDirectory)) {
+			return false;
+		}
+		return subfolder.getName().matches("^[0-9]+$");
+	}
+
+	/**
+	 * Create the next carved files subfolder. If the current subfolder is
+	 * given, the new subfolder will be one higher than the name of the current
+	 * subfolder.
+	 *
+	 * @param carvedFilesBaseDir   The base $CarvedFiles folder.
+	 * @param currentSubfolderInfo Optional name of the current subfolder in use
+	 *                             (can be null).
+	 *
+	 * @return The new subfolder for carved files.
+	 *
+	 * @throws TskCoreException
+	 */
+	private CarvedFileDirInfo createCarvedFilesSubfolder(Content carvedFilesBaseDir, CarvedFileDirInfo currentSubfolderInfo) throws TskCoreException {
+		int nextIndex = 1;
+		if (currentSubfolderInfo != null) {
+			try {
+				int currentIndex = Integer.parseInt(currentSubfolderInfo.currentFolder.getName());
+				nextIndex = currentIndex + 1;
+			} catch (NumberFormatException ex) {
+				throw new TskCoreException("Unexpected name format for carved files subdirectory with ID: " + currentSubfolderInfo.currentFolder.getId() + " (" + currentSubfolderInfo.currentFolder.getName() + ")", ex);
+			}
+		}
+
+		VirtualDirectory carvedFilesSubdir = addVirtualDirectory(carvedFilesBaseDir.getId(), Integer.toString(nextIndex));
+		return new CarvedFileDirInfo(carvedFilesSubdir);
+	}
+
 	/**
 	 * Adds a carving result to the case database.
 	 *
@@ -7548,10 +7735,20 @@ public final List<LayoutFile> addCarvedFiles(CarvingResult carvingResult) throws
 			 * Get or create the $CarvedFiles virtual directory for the root
 			 * ancestor.
 			 */
-			VirtualDirectory carvedFilesDir;
-			synchronized(carvedFileDirsLock) {
-				carvedFilesDir = rootIdsToCarvedFileDirs.get(root.getId());
-				if (null == carvedFilesDir) {
+			CarvedFileDirInfo carvedFilesDirInfo = null;
+			synchronized (carvedFileDirsLock) {
+				// Get the subfolder currently in use (if there is one)
+				carvedFilesDirInfo = rootIdsToCarvedFileDirs.get(root.getId());
+				if (carvedFilesDirInfo != null) {
+					carvedFilesDirInfo.incrementFileCounter();
+
+					// If the current folder is full, create a new one.
+					if (carvedFilesDirInfo.isFull()) {
+						carvedFilesDirInfo = createCarvedFilesSubfolder(carvedFilesDirInfo.currentFolder.getParent(), carvedFilesDirInfo);
+					}
+				}
+
+				if (null == carvedFilesDirInfo) {
 					List<Content> rootChildren;
 					if (root instanceof FileSystem) {
 						rootChildren = ((FileSystem) root).getRootDirectory().getChildren();
@@ -7560,20 +7757,40 @@ public final List<LayoutFile> addCarvedFiles(CarvingResult carvingResult) throws
 					}
 					for (Content child : rootChildren) {
 						if (child instanceof VirtualDirectory && child.getName().equals(VirtualDirectory.NAME_CARVED)) {
-							carvedFilesDir = (VirtualDirectory) child;
+
+							VirtualDirectory baseDir = (VirtualDirectory) child;
+
+							// Get the most recent subfolder in the carved files folder.
+							carvedFilesDirInfo = getMostRecentCarvedDirInfo(baseDir);
+
+							// If there are no subfolders, create one.
+							if (carvedFilesDirInfo == null) {
+								carvedFilesDirInfo = createCarvedFilesSubfolder(baseDir, null);
+							}
+
+							// If there are already too many files in the subfolder, create a new one.
+							if (carvedFilesDirInfo.isFull()) {
+								carvedFilesDirInfo = createCarvedFilesSubfolder(baseDir, carvedFilesDirInfo);
+							}
+
+							rootIdsToCarvedFileDirs.put(root.getId(), carvedFilesDirInfo);
 							break;
 						}
 					}
-					if (null == carvedFilesDir) {
+					if (carvedFilesDirInfo == null) {
+						// If we get here, we didn't have a carved files base folder in the case, so we need to make that and 
+						// the first subfolder.
+
 						long parId = root.getId();
 						// $CarvedFiles should be a child of the root directory, not the file system
 						if (root instanceof FileSystem) {
 							Content rootDir = ((FileSystem) root).getRootDirectory();
 							parId = rootDir.getId();
 						}
-						carvedFilesDir = addVirtualDirectory(parId, VirtualDirectory.NAME_CARVED);
+						VirtualDirectory carvedFilesBaseDir = addVirtualDirectory(parId, VirtualDirectory.NAME_CARVED);
+						carvedFilesDirInfo = createCarvedFilesSubfolder(carvedFilesBaseDir, null);
+						rootIdsToCarvedFileDirs.put(root.getId(), carvedFilesDirInfo);
 					}
-					rootIdsToCarvedFileDirs.put(root.getId(), carvedFilesDir);
 				}
 			}
 
@@ -7581,33 +7798,67 @@ public final List<LayoutFile> addCarvedFiles(CarvingResult carvingResult) throws
 			 * Add the carved files to the database as children of the
 			 * $CarvedFile directory of the root ancestor.
 			 */
+			VirtualDirectory carvedFilesBaseDir = (VirtualDirectory) carvedFilesDirInfo.currentFolder.getParent();
 			transaction = beginTransaction();
 			CaseDbConnection connection = transaction.getConnection();
-			String parentPath = getFileParentPath(carvedFilesDir.getId(), connection) + carvedFilesDir.getName() + "/";
+			String parentPath = getFileParentPath(carvedFilesDirInfo.currentFolder.getId(), connection) + carvedFilesDirInfo.currentFolder.getName() + "/";
 			List<LayoutFile> carvedFiles = new ArrayList<>();
 			for (CarvingResult.CarvedFile carvedFile : carvingResult.getCarvedFiles()) {
+
+				/*
+				 * Check if we need to change to a new subfolder.
+				 */
+				VirtualDirectory carvedFilesDir = carvedFilesDirInfo.currentFolder;
+				if (carvedFilesDirInfo.isFull()) {
+					// To prevent deadlocks involving the case write lock and the carvedFileDirsLock, 
+					// commit the current transaction and then start a new one
+					// after switching to the new folder.
+					transaction.commit();
+
+					synchronized (carvedFileDirsLock) {
+						// Get the current copy from the map - another thread may have just created a new folder.
+						carvedFilesDirInfo = rootIdsToCarvedFileDirs.get(root.getId());
+						if (carvedFilesDirInfo.isFull()) {
+							carvedFilesDirInfo = createCarvedFilesSubfolder(carvedFilesBaseDir, carvedFilesDirInfo);
+							rootIdsToCarvedFileDirs.put(root.getId(), carvedFilesDirInfo);
+							carvedFilesDir = carvedFilesDirInfo.currentFolder;
+						}
+					}
+
+					// Start a new transaction.
+					transaction = beginTransaction();
+					connection = transaction.getConnection();
+					parentPath = getFileParentPath(carvedFilesDir.getId(), connection) + carvedFilesDir.getName() + "/";
+
+				}
+				carvedFilesDirInfo.incrementFileCounter();
+
 				/*
 				 * Insert a row for the carved file into the tsk_objects table:
 				 * INSERT INTO tsk_objects (par_obj_id, type) VALUES (?, ?)
 				 */
 				long carvedFileId = addObject(carvedFilesDir.getId(), TskData.ObjectType.ABSTRACTFILE.getObjectType(), connection);
 
+
 				/*
 				 * Insert a row for the carved file into the tsk_files table:
 				 * INSERT INTO tsk_files (obj_id, fs_obj_id, name, type,
 				 * has_path, dir_type, meta_type, dir_flags, meta_flags, size,
-				 * ctime, crtime, atime, mtime, md5, known, mime_type,
+				 * ctime, crtime, atime, mtime, md5, sha256, sha1, known, mime_type,
 				 * parent_path, data_source_obj_id,extenion, owner_uid,
 				 * os_account_obj_id) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,
-				 * ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
+				 * ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
 				 */
 				PreparedStatement prepStmt = connection.getPreparedStatement(PREPARED_STATEMENT.INSERT_FILE);
 				prepStmt.clearParameters();
 				prepStmt.setLong(1, carvedFileId); // obj_id
+				Long fileSystemObjectId;
 				if (root instanceof FileSystem) {
 					prepStmt.setLong(2, root.getId()); // fs_obj_id
+					fileSystemObjectId = root.getId();
 				} else {
 					prepStmt.setNull(2, java.sql.Types.BIGINT); // fs_obj_id
+					fileSystemObjectId = null;
 				}
 				prepStmt.setString(3, carvedFile.getName()); // name
 				prepStmt.setShort(4, TSK_DB_FILES_TYPE_ENUM.CARVED.getFileType()); // type
@@ -7623,15 +7874,18 @@ public final List<LayoutFile> addCarvedFiles(CarvingResult carvingResult) throws
 				prepStmt.setNull(14, java.sql.Types.BIGINT); // mtime
 				prepStmt.setNull(15, java.sql.Types.VARCHAR); // MD5
 				prepStmt.setNull(16, java.sql.Types.VARCHAR); // SHA-256
-				prepStmt.setByte(17, FileKnown.UNKNOWN.getFileKnownValue()); // Known
-				prepStmt.setNull(18, java.sql.Types.VARCHAR); // MIME type	
-				prepStmt.setString(19, parentPath); // parent path
-				prepStmt.setLong(20, carvedFilesDir.getDataSourceObjectId()); // data_source_obj_id
-				prepStmt.setString(21, extractExtension(carvedFile.getName())); //extension
-
-				prepStmt.setString(22, OsAccount.NO_OWNER_ID); // ownerUid
-				prepStmt.setNull(23, java.sql.Types.BIGINT); // osAccountObjId
-
+				prepStmt.setNull(17, java.sql.Types.VARCHAR); // SHA-1
+				
+				prepStmt.setByte(18, FileKnown.UNKNOWN.getFileKnownValue()); // Known
+				prepStmt.setNull(19, java.sql.Types.VARCHAR); // MIME type	
+				prepStmt.setString(20, parentPath); // parent path
+				prepStmt.setLong(21, carvedFilesDir.getDataSourceObjectId()); // data_source_obj_id
+				prepStmt.setString(22, extractExtension(carvedFile.getName())); //extension
+
+				prepStmt.setString(23, OsAccount.NO_OWNER_ID); // ownerUid
+				prepStmt.setNull(24, java.sql.Types.BIGINT); // osAccountObjId
+				prepStmt.setLong(25, TskData.CollectedStatus.UNKNOWN.getType()); // collected
+				
 				connection.executeUpdate(prepStmt);
 
 				/*
@@ -7655,6 +7909,7 @@ public final List<LayoutFile> addCarvedFiles(CarvingResult carvingResult) throws
 				carvedFiles.add(new LayoutFile(this,
 						carvedFileId,
 						carvedFilesDir.getDataSourceObjectId(),
+						fileSystemObjectId,
 						carvedFile.getName(),
 						TSK_DB_FILES_TYPE_ENUM.CARVED,
 						TSK_FS_NAME_TYPE_ENUM.REG,
@@ -7663,7 +7918,7 @@ public final List<LayoutFile> addCarvedFiles(CarvingResult carvingResult) throws
 						TSK_FS_META_FLAG_ENUM.UNALLOC.getValue(),
 						carvedFile.getSizeInBytes(),
 						0L, 0L, 0L, 0L,
-						null, null,
+						null, null, null,
 						FileKnown.UNKNOWN,
 						parentPath,
 						null,
@@ -7740,7 +7995,7 @@ public DerivedFile addDerivedFile(String fileName, String localPath,
 			throw ex;
 		}
 	}
-	
+
 	public DerivedFile addDerivedFile(String fileName, String localPath,
 			long size, long ctime, long crtime, long atime, long mtime,
 			boolean isFile, Content parentObj,
@@ -7775,10 +8030,11 @@ public DerivedFile addDerivedFile(String fileName, String localPath,
 			statement.setLong(1, newObjId);
 
 			// If the parentFile is part of a file system, use its file system object ID.
-			long fsObjId = this.getFileSystemId(parentId, connection);
+			Long fsObjId = this.getFileSystemId(parentId, connection);
 			if (fsObjId != -1) {
 				statement.setLong(2, fsObjId);
 			} else {
+				fsObjId = null;
 				statement.setNull(2, java.sql.Types.BIGINT);
 			}
 			statement.setString(3, fileName);
@@ -7814,31 +8070,36 @@ public DerivedFile addDerivedFile(String fileName, String localPath,
 
 			statement.setNull(15, java.sql.Types.VARCHAR); // MD5
 			statement.setNull(16, java.sql.Types.VARCHAR); // SHA-256
-			statement.setByte(17, FileKnown.UNKNOWN.getFileKnownValue()); // Known
-			statement.setNull(18, java.sql.Types.VARCHAR); // MIME type	
+			statement.setNull(17, java.sql.Types.VARCHAR); // SHA-1
+			
+			statement.setByte(18, FileKnown.UNKNOWN.getFileKnownValue()); // Known
+			statement.setNull(19, java.sql.Types.VARCHAR); // MIME type	
 
 			//parent path
-			statement.setString(19, parentPath);
+			statement.setString(20, parentPath);
 
 			// root data source object id
 			long dataSourceObjId = getDataSourceObjectId(connection, parentObj);
-			statement.setLong(20, dataSourceObjId);
+			statement.setLong(21, dataSourceObjId);
 			final String extension = extractExtension(fileName);
 			//extension
-			statement.setString(21, extension);
-
-			statement.setString(22, OsAccount.NO_OWNER_ID); // ownerUid
-			statement.setNull(23, java.sql.Types.BIGINT); // osAccountObjId
+			statement.setString(22, extension);
 
+			statement.setString(23, OsAccount.NO_OWNER_ID); // ownerUid
+			statement.setNull(24, java.sql.Types.BIGINT); // osAccountObjId
+			statement.setLong(25, TskData.CollectedStatus.UNKNOWN.getType()); // collected
+			
 			connection.executeUpdate(statement);
 
 			//add localPath
 			addFilePath(connection, newObjId, localPath, encodingType);
 
-			DerivedFile derivedFile = new DerivedFile(this, newObjId, dataSourceObjId, fileName, dirType, metaType, dirFlag, metaFlags,
-					savedSize, ctime, crtime, atime, mtime, null, null, null, parentPath, localPath, parentId, null, encodingType, extension, OsAccount.NO_OWNER_ID, OsAccount.NO_ACCOUNT);
+			DerivedFile derivedFile = new DerivedFile(this, newObjId, dataSourceObjId, fsObjId, fileName, dirType, metaType, dirFlag, metaFlags,
+					savedSize, ctime, crtime, atime, mtime, null, null, null, null, parentPath, localPath, parentId, null, encodingType, extension, OsAccount.NO_OWNER_ID, OsAccount.NO_ACCOUNT);
 
-			timelineManager.addEventsForNewFile(derivedFile, connection);
+			if (!timelineEventsDisabled.get()) {
+				timelineManager.addEventsForNewFile(derivedFile, connection);
+			}
 
 			//TODO add derived method to tsk_files_derived and tsk_files_derived_method
 			return derivedFile;
@@ -7886,7 +8147,7 @@ public DerivedFile updateDerivedFile(DerivedFile derivedFile, String localPath,
 		CaseDbTransaction trans = null;
 		try {
 			Content parentObj = derivedFile.getParent();
-			
+
 			trans = beginTransaction();
 			DerivedFile updatedFile = updateDerivedFile(derivedFile, localPath,
 					size, ctime, crtime, atime, mtime,
@@ -7901,18 +8162,18 @@ public DerivedFile updateDerivedFile(DerivedFile derivedFile, String localPath,
 			}
 			throw ex;
 		}
-	}		
-		
+	}
+
 	public DerivedFile updateDerivedFile(DerivedFile derivedFile, String localPath,
 			long size, long ctime, long crtime, long atime, long mtime,
 			boolean isFile, String mimeType,
 			String rederiveDetails, String toolName, String toolVersion,
-			String otherDetails, TskData.EncodingType encodingType, 
-			Content parentObj, CaseDbTransaction trans) throws TskCoreException {		
-		
+			String otherDetails, TskData.EncodingType encodingType,
+			Content parentObj, CaseDbTransaction trans) throws TskCoreException {
+
 		// Strip off any leading slashes from the local path (leading slashes indicate absolute paths)
 		localPath = localPath.replaceAll("^[/\\\\]+", "");
-		
+
 		ResultSet rs = null;
 		try {
 			final long parentId = parentObj.getId();
@@ -7962,9 +8223,11 @@ public DerivedFile updateDerivedFile(DerivedFile derivedFile, String localPath,
 			updateFilePath(trans.getConnection(), derivedFile.getId(), localPath, encodingType);
 
 			long dataSourceObjId = getDataSourceObjectId(trans.getConnection(), parentObj);
+			Long fileSystemObjId = derivedFile.getFileSystemObjectId().orElse(null);
 			final String extension = extractExtension(derivedFile.getName());
-			return new DerivedFile(this, derivedFile.getId(), dataSourceObjId, derivedFile.getName(), dirType, metaType, dirFlag, metaFlags,
-					savedSize, ctime, crtime, atime, mtime, null, null, null, parentPath, localPath, parentId, null, encodingType, extension, derivedFile.getOwnerUid().orElse(null), derivedFile.getOsAccountObjectId().orElse(null));
+			return new DerivedFile(this, derivedFile.getId(), dataSourceObjId, fileSystemObjId, derivedFile.getName(), dirType, metaType, dirFlag, metaFlags,
+					savedSize, ctime, crtime, atime, mtime, null, null, null, null, parentPath, localPath, parentId, null, encodingType, extension, 
+					derivedFile.getOwnerUid().orElse(null), derivedFile.getOsAccountObjectId().orElse(null));
 		} catch (SQLException ex) {
 			throw new TskCoreException("Failed to add derived file to case database", ex);
 		} finally {
@@ -8042,12 +8305,53 @@ public LocalFile addLocalFile(String fileName, String localPath,
 			boolean isFile, TskData.EncodingType encodingType,
 			Content parent, CaseDbTransaction transaction) throws TskCoreException {
 
-		return addLocalFile(fileName, localPath,
-				size, ctime, crtime, atime, mtime,
-				null, null, null,
-				isFile, encodingType,
-				parent, transaction);
+		return addLocalFile(fileName, localPath,
+				size, ctime, crtime, atime, mtime,
+				null, null, null,
+				isFile, encodingType,
+				parent, transaction);
+	}
+
+	/**
+	 * Adds a local/logical file to the case database. The database operations
+	 * are done within a caller-managed transaction; the caller is responsible
+	 * for committing or rolling back the transaction.
+	 *
+	 * @param fileName     The name of the file.
+	 * @param localPath    The absolute path (including the file name) of the
+	 *                     local/logical in secondary storage.
+	 * @param size         The size of the file in bytes.
+	 * @param ctime        The changed time of the file.
+	 * @param crtime       The creation time of the file.
+	 * @param atime        The accessed time of the file
+	 * @param mtime        The modified time of the file.
+	 * @param md5          The MD5 hash of the file
+	 * @param sha256       the SHA-256 hash of the file.
+	 * @param known        The known status of the file (can be null)
+	 * @param mimeType     The MIME type of the file
+	 * @param isFile       True, unless the file is a directory.
+	 * @param encodingType Type of encoding used on the file
+	 * @param parent       The parent of the file (e.g., a virtual directory)
+	 * @param transaction  A caller-managed transaction within which the add
+	 *                     file operations are performed.
+	 *
+	 * @return An object representing the local/logical file.
+	 *
+	 * @throws TskCoreException if there is an error completing a case database
+	 *                          operation.
+	 */
+	public LocalFile addLocalFile(String fileName, String localPath,
+			long size, long ctime, long crtime, long atime, long mtime,
+			String md5, String sha256, FileKnown known, String mimeType,
+			boolean isFile, TskData.EncodingType encodingType,
+			Content parent, CaseDbTransaction transaction) throws TskCoreException {
+
+		return addLocalFile(fileName, localPath, size, ctime, crtime, atime, mtime,
+				md5, sha256, known, mimeType, isFile, encodingType,
+				OsAccount.NO_ACCOUNT, OsAccount.NO_OWNER_ID, parent, transaction);
+
 	}
+
 	/**
 	 * Adds a local/logical file to the case database. The database operations
 	 * are done within a caller-managed transaction; the caller is responsible
@@ -8067,6 +8371,8 @@ public LocalFile addLocalFile(String fileName, String localPath,
 	 * @param mimeType     The MIME type of the file
 	 * @param isFile       True, unless the file is a directory.
 	 * @param encodingType Type of encoding used on the file
+	 * @param osAccountId  OS account id (can be null)
+	 * @param ownerAccount Owner account (can be null)
 	 * @param parent       The parent of the file (e.g., a virtual directory)
 	 * @param transaction  A caller-managed transaction within which the add
 	 *                     file operations are performed.
@@ -8079,14 +8385,17 @@ public LocalFile addLocalFile(String fileName, String localPath,
 	public LocalFile addLocalFile(String fileName, String localPath,
 			long size, long ctime, long crtime, long atime, long mtime,
 			String md5, String sha256, FileKnown known, String mimeType,
-			boolean isFile, TskData.EncodingType encodingType,
+			boolean isFile, TskData.EncodingType encodingType, Long osAccountId, String ownerAccount,
 			Content parent, CaseDbTransaction transaction) throws TskCoreException {
 		
-		return addLocalFile(fileName, localPath, size, ctime, crtime, atime, mtime,
-				md5, sha256, known, mimeType, isFile, encodingType,
-				OsAccount.NO_ACCOUNT, OsAccount.NO_OWNER_ID, parent, transaction);
-		
+		return addLocalFile(fileName, localPath,
+			size, ctime, crtime, atime, mtime,
+			md5, sha256, null, known, mimeType,
+			isFile, encodingType, osAccountId, ownerAccount, 
+			parent, transaction);
 	}
+	
+	
 	/**
 	 * Adds a local/logical file to the case database. The database operations
 	 * are done within a caller-managed transaction; the caller is responsible
@@ -8102,6 +8411,7 @@ public LocalFile addLocalFile(String fileName, String localPath,
 	 * @param mtime        The modified time of the file.
 	 * @param md5          The MD5 hash of the file
 	 * @param sha256       the SHA-256 hash of the file.
+	 * @param sha1Hash     SHA-1 Hash of the file, may be null.
 	 * @param known        The known status of the file (can be null)
 	 * @param mimeType     The MIME type of the file
 	 * @param isFile       True, unless the file is a directory.
@@ -8109,6 +8419,7 @@ public LocalFile addLocalFile(String fileName, String localPath,
 	 * @param osAccountId  OS account id (can be null)
 	 * @param ownerAccount Owner account (can be null)
 	 * @param parent       The parent of the file (e.g., a virtual directory)
+	
 	 * @param transaction  A caller-managed transaction within which the add
 	 *                     file operations are performed.
 	 *
@@ -8119,9 +8430,9 @@ public LocalFile addLocalFile(String fileName, String localPath,
 	 */
 	public LocalFile addLocalFile(String fileName, String localPath,
 			long size, long ctime, long crtime, long atime, long mtime,
-			String md5, String sha256, FileKnown known, String mimeType,
+			String md5, String sha256, String sha1Hash, FileKnown known, String mimeType,
 			boolean isFile, TskData.EncodingType encodingType, Long osAccountId, String ownerAccount,
-			Content parent, CaseDbTransaction transaction) throws TskCoreException {
+			Content parent,  CaseDbTransaction transaction) throws TskCoreException {
 		CaseDbConnection connection = transaction.getConnection();
 		Statement queryStatement = null;
 		try {
@@ -8132,9 +8443,9 @@ public LocalFile addLocalFile(String fileName, String localPath,
 
 			// Insert a row for the local/logical file into the tsk_files table.
 			// INSERT INTO tsk_files (obj_id, fs_obj_id, name, type, has_path, dir_type, meta_type,
-			// dir_flags, meta_flags, size, ctime, crtime, atime, mtime, md5, known, mime_type,
+			// dir_flags, meta_flags, size, ctime, crtime, atime, mtime, md5, sha256, sha1, known, mime_type,
 			// parent_path, data_source_obj_id,extension, uid_str, os_account_obj_id)
-			// VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,?, ?, ?)
+			// VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,?, ?, ?, ?)
 			PreparedStatement statement = connection.getPreparedStatement(PREPARED_STATEMENT.INSERT_FILE);
 			statement.clearParameters();
 			statement.setLong(1, objectId);
@@ -8159,12 +8470,14 @@ public LocalFile addLocalFile(String fileName, String localPath,
 			statement.setLong(14, mtime);
 			statement.setString(15, md5);
 			statement.setString(16, sha256);
+			statement.setString(17, sha1Hash); // sha1
+			
 			if (known != null) {
-				statement.setByte(17, known.getFileKnownValue());
+				statement.setByte(18, known.getFileKnownValue());
 			} else {
-				statement.setByte(17, FileKnown.UNKNOWN.getFileKnownValue());
+				statement.setByte(18, FileKnown.UNKNOWN.getFileKnownValue());
 			}
-			statement.setString(18, mimeType);
+			statement.setString(19, mimeType);
 			String parentPath;
 			long dataSourceObjId;
 
@@ -8180,23 +8493,25 @@ public LocalFile addLocalFile(String fileName, String localPath,
 				parentPath = "/";
 				dataSourceObjId = getDataSourceObjectId(connection, parent);
 			}
-			statement.setString(19, parentPath);
-			statement.setLong(20, dataSourceObjId);
+			statement.setString(20, parentPath);
+			statement.setLong(21, dataSourceObjId);
 			final String extension = extractExtension(fileName);
-			statement.setString(21, extension);
+			statement.setString(22, extension);
 
 			if (ownerAccount != null) {
-				statement.setString(22, ownerAccount); // ownerUid
+				statement.setString(23, ownerAccount); // ownerUid
 			} else {
-				statement.setNull(22, java.sql.Types.VARCHAR);
+				statement.setNull(23, java.sql.Types.VARCHAR);
 			}
-			
+
 			if (osAccountId != null) {
-				statement.setLong(23, osAccountId); // osAccountObjId
+				statement.setLong(24, osAccountId); // osAccountObjId
 			} else {
-				statement.setNull(23, java.sql.Types.BIGINT);
+				statement.setNull(24, java.sql.Types.BIGINT);
 			}
-
+			
+			statement.setLong(25, TskData.CollectedStatus.UNKNOWN.getType()); // collected
+			
 			connection.executeUpdate(statement);
 			addFilePath(connection, objectId, localPath, encodingType);
 			LocalFile localFile = new LocalFile(this,
@@ -8209,13 +8524,15 @@ public LocalFile addLocalFile(String fileName, String localPath,
 					metaFlags,
 					savedSize,
 					ctime, crtime, atime, mtime,
-					mimeType, md5, sha256, known,
+					mimeType, md5, sha256, sha1Hash, known,
 					parent.getId(), parentPath,
 					dataSourceObjId,
 					localPath,
 					encodingType, extension,
 					ownerAccount, osAccountId);
-			getTimelineManager().addEventsForNewFile(localFile, connection);
+			if (!timelineEventsDisabled.get()) {
+				getTimelineManager().addEventsForNewFile(localFile, connection);
+			}
 			return localFile;
 
 		} catch (SQLException ex) {
@@ -8346,7 +8663,7 @@ private boolean isRootDirectory(AbstractFile file, CaseDbTransaction transaction
 				isRootDirectoryCache.put(file.getId(), true);
 
 				return true; // The file has no parent
-				
+
 			}
 		} catch (SQLException ex) {
 			throw new TskCoreException(String.format("Failed to lookup parent of file (%s) with id %d", file.getName(), file.getId()), ex);
@@ -8419,15 +8736,18 @@ public LayoutFile addLayoutFile(String fileName,
 			prepStmt.setLong(1, newFileId); // obj_id
 
 			// If the parent is part of a file system, grab its file system ID
+			Long fileSystemObjectId;
 			if (0 != parent.getId()) {
-				long parentFs = this.getFileSystemId(parent.getId(), connection);
-				if (parentFs != -1) {
-					prepStmt.setLong(2, parentFs);
+				fileSystemObjectId = this.getFileSystemId(parent.getId(), connection);
+				if (fileSystemObjectId != -1) {
+					prepStmt.setLong(2, fileSystemObjectId);
 				} else {
 					prepStmt.setNull(2, java.sql.Types.BIGINT);
+					fileSystemObjectId = null;
 				}
 			} else {
 				prepStmt.setNull(2, java.sql.Types.BIGINT);
+				fileSystemObjectId = null;
 			}
 			prepStmt.setString(3, fileName); // name
 			prepStmt.setShort(4, TSK_DB_FILES_TYPE_ENUM.LAYOUT_FILE.getFileType()); // type
@@ -8445,16 +8765,19 @@ public LayoutFile addLayoutFile(String fileName,
 			prepStmt.setLong(14, mtime);  // mtime
 			prepStmt.setNull(15, java.sql.Types.VARCHAR); // MD5
 			prepStmt.setNull(16, java.sql.Types.VARCHAR); // SHA-256
-			prepStmt.setByte(17, FileKnown.UNKNOWN.getFileKnownValue()); // Known
-			prepStmt.setNull(18, java.sql.Types.VARCHAR); // MIME type	
-			prepStmt.setString(19, parentPath); // parent path
-			prepStmt.setLong(20, parent.getDataSource().getId()); // data_source_obj_id
-
-			prepStmt.setString(21, extractExtension(fileName)); 				//extension
+			prepStmt.setNull(17, java.sql.Types.VARCHAR); // SHA-1
+			
+			prepStmt.setByte(18, FileKnown.UNKNOWN.getFileKnownValue()); // Known
+			prepStmt.setNull(19, java.sql.Types.VARCHAR); // MIME type	
+			prepStmt.setString(20, parentPath); // parent path
+			prepStmt.setLong(21, parent.getDataSource().getId()); // data_source_obj_id
 
-			prepStmt.setString(22, OsAccount.NO_OWNER_ID); // ownerUid
-			prepStmt.setNull(23, java.sql.Types.BIGINT); // osAccountObjId
+			prepStmt.setString(22, extractExtension(fileName)); 				//extension
 
+			prepStmt.setString(23, OsAccount.NO_OWNER_ID); // ownerUid
+			prepStmt.setNull(24, java.sql.Types.BIGINT); // osAccountObjId
+			prepStmt.setLong(25, TskData.CollectedStatus.UNKNOWN.getType()); // collected
+			
 			connection.executeUpdate(prepStmt);
 
 			/*
@@ -8478,6 +8801,7 @@ public LayoutFile addLayoutFile(String fileName,
 			LayoutFile layoutFile = new LayoutFile(this,
 					newFileId,
 					parent.getDataSource().getId(),
+					fileSystemObjectId,
 					fileName,
 					TSK_DB_FILES_TYPE_ENUM.LAYOUT_FILE,
 					TSK_FS_NAME_TYPE_ENUM.REG,
@@ -8486,7 +8810,7 @@ public LayoutFile addLayoutFile(String fileName,
 					metaFlag.getValue(),
 					savedSize,
 					ctime, crtime, atime, mtime,
-					null, null,
+					null, null, null,
 					FileKnown.UNKNOWN,
 					parentPath,
 					null,
@@ -8512,22 +8836,22 @@ public LayoutFile addLayoutFile(String fileName,
 			}
 		}
 	}
-	
+
 	/**
-	 * Given a Content object, return its data source object ID.
-	 * For AbstractFiles, this simply returns the data source ID field.
-	 * 
+	 * Given a Content object, return its data source object ID. For
+	 * AbstractFiles, this simply returns the data source ID field.
+	 *
 	 * @param connection A case database connection.
 	 * @param content    The content to look up the data source object ID.
-	 * 
-	 * @return 
+	 *
+	 * @return A data source object id.
 	 */
 	private long getDataSourceObjectId(CaseDbConnection connection, Content content) throws TskCoreException {
 		if (content == null) {
 			throw new TskCoreException("Null Content parameter given");
 		}
 		if (content instanceof AbstractFile) {
-			return ((AbstractFile)content).getDataSourceObjectId();
+			return ((AbstractFile) content).getDataSourceObjectId();
 		} else {
 			return getDataSourceObjectId(connection, content.getId());
 		}
@@ -8631,29 +8955,29 @@ private void updateFilePath(CaseDbConnection connection, long objId, String path
 	public List<AbstractFile> findFilesInFolder(String fileName, AbstractFile parentFile) throws TskCoreException {
 		String ext = "";
 		if (!containsLikeWildcard(fileName)) {
-			ext = SleuthkitCase.extractExtension(fileName);	
+			ext = SleuthkitCase.extractExtension(fileName);
 		}
-		
+
 		CaseDbConnection connection = null;
 		ResultSet rs = null;
 		long parentId = parentFile.getId();
-		
+
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			PreparedStatement statement;
 			if (ext.isEmpty()) {
 				statement = connection.getPreparedStatement(PREPARED_STATEMENT.SELECT_FILES_BY_PARENT_AND_NAME);
 				statement.clearParameters();
 				statement.setLong(1, parentId);
-				statement.setString(2, fileName);	
+				statement.setString(2, fileName);
 			} else {
 				statement = connection.getPreparedStatement(PREPARED_STATEMENT.SELECT_FILES_BY_EXTENSION_AND_PARENT_AND_NAME);
 				statement.clearParameters();
 				statement.setString(1, ext);
 				statement.setLong(2, parentId);
-				statement.setString(3, fileName);	
+				statement.setString(3, fileName);
 			}
 
 			rs = connection.executeQuery(statement);
@@ -8889,7 +9213,7 @@ public Image getImageById(long id) throws TskCoreException {
 		ResultSet rs = null;
 		acquireSingleUserCaseReadLock();
 		try {
-			connection = connections.getConnection();	
+			connection = connections.getConnection();
 			s = connection.createStatement();
 			rs = connection.executeQuery(s, "SELECT tsk_image_info.type, tsk_image_info.ssize, tsk_image_info.tzone, tsk_image_info.size, tsk_image_info.md5, tsk_image_info.sha1, tsk_image_info.sha256, tsk_image_info.display_name, data_source_info.device_id, tsk_image_names.name "
 					+ "FROM tsk_image_info "
@@ -8964,7 +9288,7 @@ VolumeSystem getVolumeSystemById(long id, Content parent) throws TskCoreExceptio
 		ResultSet rs = null;
 		acquireSingleUserCaseReadLock();
 		try {
-			connection = connections.getConnection();	
+			connection = connections.getConnection();
 			s = connection.createStatement();
 			rs = connection.executeQuery(s, "SELECT * FROM tsk_vs_info " //NON-NLS
 					+ "where obj_id = " + id); //NON-NLS
@@ -9134,7 +9458,7 @@ private FileSystem getFileSystemByIdHelper(long id, Content parent) throws TskCo
 		ResultSet rs = null;
 		acquireSingleUserCaseReadLock();
 		try {
-			connection = connections.getConnection();	
+			connection = connections.getConnection();
 			s = connection.createStatement();
 			rs = connection.executeQuery(s, "SELECT * FROM tsk_fs_info " //NON-NLS
 					+ "where obj_id = " + id); //NON-NLS
@@ -9179,7 +9503,7 @@ Volume getVolumeById(long id, VolumeSystem parent) throws TskCoreException {
 		ResultSet rs = null;
 		acquireSingleUserCaseReadLock();
 		try {
-			connection = connections.getConnection();	
+			connection = connections.getConnection();
 			s = connection.createStatement();
 			rs = connection.executeQuery(s, "SELECT * FROM tsk_vs_parts " //NON-NLS
 					+ "where obj_id = " + id); //NON-NLS
@@ -9246,7 +9570,7 @@ Directory getDirectoryById(long id, FileSystem parentFs) throws TskCoreException
 		ResultSet rs = null;
 		acquireSingleUserCaseReadLock();
 		try {
-			connection = connections.getConnection();	
+			connection = connections.getConnection();
 			s = connection.createStatement();
 			rs = connection.executeQuery(s, "SELECT * FROM tsk_files " //NON-NLS
 					+ "WHERE obj_id = " + id);
@@ -9287,7 +9611,7 @@ Directory getDirectoryById(long id, FileSystem parentFs) throws TskCoreException
 	public Collection<FileSystem> getImageFileSystems(Image image) throws TskCoreException {
 		List<FileSystem> fileSystems = new ArrayList<>();
 		String queryStr = "SELECT * FROM tsk_fs_info WHERE data_source_obj_id = " + image.getId();
-		
+
 		CaseDbConnection connection = null;
 		Statement s = null;
 		ResultSet rs = null;
@@ -9742,34 +10066,50 @@ public List<Image> getImages() throws TskCoreException {
 	 *                          within tsk core and the update fails
 	 */
 	public void setImagePaths(long obj_id, List<String> paths) throws TskCoreException {
-		CaseDbConnection connection = null;
-		acquireSingleUserCaseWriteLock();
-		PreparedStatement statement;
+		CaseDbTransaction transaction = beginTransaction();
 		try {
-			connection = connections.getConnection();
-			connection.beginTransaction();
-			statement = connection.getPreparedStatement(PREPARED_STATEMENT.DELETE_IMAGE_NAME);
+			setImagePaths(obj_id, paths, transaction);
+			transaction.commit();
+			transaction = null;
+		} finally {
+			if (transaction != null) {
+				transaction.rollback();
+			}
+		}
+	}
+	
+	/**
+	 * Set the file paths for the image given by obj_id
+	 *
+	 * @param obj_id the ID of the image to update
+	 * @param paths  the fully qualified path to the files that make up the
+	 *               image
+	 * @param trans  The case database transaction.
+	 *
+	 * @throws TskCoreException exception thrown when critical error occurs
+	 *                          within tsk core and the update fails
+	 */
+	@Beta
+	public void setImagePaths(long objId, List<String> paths, CaseDbTransaction trans) throws TskCoreException {	
+		try {
+			PreparedStatement statement = trans.getConnection().getPreparedStatement(PREPARED_STATEMENT.DELETE_IMAGE_NAME);
 			statement.clearParameters();
-			statement.setLong(1, obj_id);
-			connection.executeUpdate(statement);
+			statement.setLong(1, objId);
+			trans.getConnection().executeUpdate(statement);
 			for (int i = 0; i < paths.size(); i++) {
-				statement = connection.getPreparedStatement(PREPARED_STATEMENT.INSERT_IMAGE_NAME);
+				statement = trans.getConnection().getPreparedStatement(PREPARED_STATEMENT.INSERT_IMAGE_NAME);
 				statement.clearParameters();
-				statement.setLong(1, obj_id);
+				statement.setLong(1, objId);
 				statement.setString(2, paths.get(i));
 				statement.setLong(3, i);
-				connection.executeUpdate(statement);
+				trans.getConnection().executeUpdate(statement);
 			}
-			connection.commitTransaction();
 		} catch (SQLException ex) {
-			rollbackTransaction(connection);
 			throw new TskCoreException("Error updating image paths.", ex);
-		} finally {
-			closeConnection(connection);
-			releaseSingleUserCaseWriteLock();
-		}
+		} 
 	}
-
+	
+	
 	/**
 	 * Deletes a datasource from the open case, the database has foreign keys
 	 * with a delete cascade so that all the tables that have a datasource
@@ -9782,7 +10122,7 @@ public void setImagePaths(long obj_id, List<String> paths) throws TskCoreExcepti
 	 *                          within tsk core and the update fails
 	 */
 	void deleteDataSource(long dataSourceObjectId) throws TskCoreException {
-		
+
 		// Check if this data source is the only one associated with its host. If so,
 		// we will delete the host and other associated data.
 		// Note that the cascading deletes were only added in schema 9.1, so we
@@ -9791,13 +10131,13 @@ void deleteDataSource(long dataSourceObjectId) throws TskCoreException {
 		VersionNumber version = getDBSchemaCreationVersion();
 		int major = version.getMajor();
 		int minor = version.getMinor();
-		if(major > 9 || (major == 9 && minor >= 1)) {
+		if (major > 9 || (major == 9 && minor >= 1)) {
 			hostToDelete = getHostManager().getHostByDataSource(dataSourceObjectId);
 			if (getHostManager().getDataSourcesForHost(hostToDelete).size() != 1) {
 				hostToDelete = null;
 			}
 		}
-		
+
 		CaseDbConnection connection = null;
 		Statement statement;
 		acquireSingleUserCaseWriteLock();
@@ -9814,19 +10154,19 @@ void deleteDataSource(long dataSourceObjectId) throws TskCoreException {
 					+ "WHERE account_id NOT IN (SELECT account1_id FROM account_relationships) "
 					+ "AND account_id NOT IN (SELECT account2_id FROM account_relationships))";
 			statement.execute(accountSql);
-			
+
 			// Now delete any host that was only associated with this data source. This will cascade to delete
 			// realms, os accounts, and os account attributes that were associated with the host.
 			if (hostToDelete != null) {
 				statement.execute("DELETE FROM tsk_hosts WHERE id = " + hostToDelete.getHostId());
-				
+
 				// Clean up any stray OS Account objects
-				String deleteOsAcctObjectsQuery = "DELETE FROM tsk_objects " +
-					"WHERE type=" + TskData.ObjectType.OS_ACCOUNT.getObjectType() + " " + 
-					"AND obj_id NOT IN (SELECT os_account_obj_id FROM tsk_os_accounts WHERE  os_account_obj_id IS NOT NULL)";
+				String deleteOsAcctObjectsQuery = "DELETE FROM tsk_objects "
+						+ "WHERE type=" + TskData.ObjectType.OS_ACCOUNT.getObjectType() + " "
+						+ "AND obj_id NOT IN (SELECT os_account_obj_id FROM tsk_os_accounts WHERE  os_account_obj_id IS NOT NULL)";
 				statement.execute(deleteOsAcctObjectsQuery);
 			}
-			
+
 			connection.commitTransaction();
 		} catch (SQLException ex) {
 			rollbackTransaction(connection);
@@ -9901,13 +10241,15 @@ List<AbstractFile> resultSetToAbstractFiles(ResultSet rs, CaseDbConnection conne
 					LayoutFile lf = new LayoutFile(this,
 							rs.getLong("obj_id"), //NON-NLS
 							rs.getLong("data_source_obj_id"),
+							rs.getLong("fs_obj_id"),
 							rs.getString("name"), //NON-NLS
 							atype,
 							TSK_FS_NAME_TYPE_ENUM.valueOf(rs.getShort("dir_type")), TSK_FS_META_TYPE_ENUM.valueOf(rs.getShort("meta_type")), //NON-NLS
 							TSK_FS_NAME_FLAG_ENUM.valueOf(rs.getShort("dir_flags")), rs.getShort("meta_flags"), //NON-NLS
 							rs.getLong("size"), //NON-NLS
 							rs.getLong("ctime"), rs.getLong("crtime"), rs.getLong("atime"), rs.getLong("mtime"), //NON-NLS
-							rs.getString("md5"), rs.getString("sha256"), FileKnown.valueOf(rs.getByte("known")), parentPath,
+							rs.getString("md5"), rs.getString("sha256"), rs.getString("sha1"),
+							FileKnown.valueOf(rs.getByte("known")), parentPath,
 							rs.getString("mime_type"),
 							rs.getString("owner_uid"), osAccountObjId); //NON-NLS
 					results.add(lf);
@@ -9959,8 +10301,10 @@ org.sleuthkit.datamodel.File file(ResultSet rs, FileSystem fs) throws SQLExcepti
 				rs.getShort("meta_flags"), rs.getLong("size"), //NON-NLS
 				rs.getLong("ctime"), rs.getLong("crtime"), rs.getLong("atime"), rs.getLong("mtime"), //NON-NLS
 				(short) rs.getInt("mode"), rs.getInt("uid"), rs.getInt("gid"), //NON-NLS
-				rs.getString("md5"), rs.getString("sha256"), FileKnown.valueOf(rs.getByte("known")), //NON-NLS
-				rs.getString("parent_path"), rs.getString("mime_type"), rs.getString("extension"), rs.getString("owner_uid"), osAccountObjId, Collections.emptyList()); //NON-NLS
+				rs.getString("md5"), rs.getString("sha256"), rs.getString("sha1"), 
+				FileKnown.valueOf(rs.getByte("known")), //NON-NLS
+				rs.getString("parent_path"), rs.getString("mime_type"), rs.getString("extension"), rs.getString("owner_uid"), 
+				osAccountObjId, TskData.CollectedStatus.valueOf(rs.getInt("collected")), Collections.emptyList()); //NON-NLS
 		f.setFileSystem(fs);
 		return f;
 	}
@@ -9991,7 +10335,8 @@ Directory directory(ResultSet rs, FileSystem fs) throws SQLException {
 				rs.getShort("meta_flags"), rs.getLong("size"), //NON-NLS
 				rs.getLong("ctime"), rs.getLong("crtime"), rs.getLong("atime"), rs.getLong("mtime"), //NON-NLS
 				rs.getShort("mode"), rs.getInt("uid"), rs.getInt("gid"), //NON-NLS
-				rs.getString("md5"), rs.getString("sha256"), FileKnown.valueOf(rs.getByte("known")), //NON-NLS
+				rs.getString("md5"), rs.getString("sha256"), rs.getString("sha1"), 
+				FileKnown.valueOf(rs.getByte("known")), //NON-NLS
 				rs.getString("parent_path"), rs.getString("owner_uid"), osAccountObjId); //NON-NLS
 		dir.setFileSystem(fs);
 		return dir;
@@ -10049,16 +10394,18 @@ VirtualDirectory virtualDirectory(ResultSet rs, CaseDbConnection connection) thr
 					timeZone,
 					rs.getString("md5"),
 					rs.getString("sha256"),
+					rs.getString("sha1"),
 					FileKnown.valueOf(rs.getByte("known")),
 					parentPath);
 		} else {
 			final VirtualDirectory vd = new VirtualDirectory(this,
 					objId, dsObjId,
+					rs.getLong("fs_obj_id"),
 					rs.getString("name"), //NON-NLS
 					TSK_FS_NAME_TYPE_ENUM.valueOf(rs.getShort("dir_type")), //NON-NLS
 					TSK_FS_META_TYPE_ENUM.valueOf(rs.getShort("meta_type")), //NON-NLS
 					TSK_FS_NAME_FLAG_ENUM.valueOf(rs.getShort("dir_flags")), //NON-NLS
-					rs.getShort("meta_flags"), rs.getString("md5"), rs.getString("sha256"), //NON-NLS
+					rs.getShort("meta_flags"), rs.getString("md5"), rs.getString("sha256"), rs.getString("sha1"), //NON-NLS
 					FileKnown.valueOf(rs.getByte("known")), parentPath); //NON-NLS
 			return vd;
 		}
@@ -10083,7 +10430,7 @@ LocalDirectory localDirectory(ResultSet rs) throws SQLException {
 				TSK_FS_NAME_TYPE_ENUM.valueOf(rs.getShort("dir_type")), //NON-NLS
 				TSK_FS_META_TYPE_ENUM.valueOf(rs.getShort("meta_type")), //NON-NLS
 				TSK_FS_NAME_FLAG_ENUM.valueOf(rs.getShort("dir_flags")), //NON-NLS
-				rs.getShort("meta_flags"), rs.getString("md5"), rs.getString("sha256"), //NON-NLS
+				rs.getShort("meta_flags"), rs.getString("md5"), rs.getString("sha256"), rs.getString("sha1"), //NON-NLS
 				FileKnown.valueOf(rs.getByte("known")), parentPath); //NON-NLS
 		return ld;
 	}
@@ -10136,13 +10483,15 @@ private DerivedFile derivedFile(ResultSet rs, CaseDbConnection connection, long
 		}
 
 		final DerivedFile df = new DerivedFile(this, objId, rs.getLong("data_source_obj_id"),
+				rs.getLong("fs_obj_id"),
 				rs.getString("name"), //NON-NLS
 				TSK_FS_NAME_TYPE_ENUM.valueOf(rs.getShort("dir_type")), //NON-NLS
 				TSK_FS_META_TYPE_ENUM.valueOf(rs.getShort("meta_type")), //NON-NLS
 				TSK_FS_NAME_FLAG_ENUM.valueOf(rs.getShort("dir_flags")), rs.getShort("meta_flags"), //NON-NLS
 				rs.getLong("size"), //NON-NLS
 				rs.getLong("ctime"), rs.getLong("crtime"), rs.getLong("atime"), rs.getLong("mtime"), //NON-NLS
-				rs.getString("md5"), rs.getString("sha256"), FileKnown.valueOf(rs.getByte("known")), //NON-NLS
+				rs.getString("md5"), rs.getString("sha256"), rs.getString("sha1"), 
+				FileKnown.valueOf(rs.getByte("known")), //NON-NLS
 				parentPath, localPath, parentId, rs.getString("mime_type"),
 				encodingType, rs.getString("extension"),
 				rs.getString("owner_uid"), osAccountObjId);
@@ -10201,7 +10550,8 @@ private LocalFile localFile(ResultSet rs, CaseDbConnection connection, long pare
 				TSK_FS_NAME_FLAG_ENUM.valueOf(rs.getShort("dir_flags")), rs.getShort("meta_flags"), //NON-NLS
 				rs.getLong("size"), //NON-NLS
 				rs.getLong("ctime"), rs.getLong("crtime"), rs.getLong("atime"), rs.getLong("mtime"), //NON-NLS
-				rs.getString("mime_type"), rs.getString("md5"), rs.getString("sha256"), FileKnown.valueOf(rs.getByte("known")), //NON-NLS
+				rs.getString("mime_type"), rs.getString("md5"), rs.getString("sha256"), rs.getString("sha1"), 
+				FileKnown.valueOf(rs.getByte("known")), //NON-NLS
 				parentId, parentPath, rs.getLong("data_source_obj_id"),
 				localPath, encodingType, rs.getString("extension"),
 				rs.getString("owner_uid"), osAccountObjId);
@@ -10234,7 +10584,8 @@ org.sleuthkit.datamodel.SlackFile slackFile(ResultSet rs, FileSystem fs) throws
 				rs.getShort("meta_flags"), rs.getLong("size"), //NON-NLS
 				rs.getLong("ctime"), rs.getLong("crtime"), rs.getLong("atime"), rs.getLong("mtime"), //NON-NLS
 				(short) rs.getInt("mode"), rs.getInt("uid"), rs.getInt("gid"), //NON-NLS
-				rs.getString("md5"), rs.getString("sha256"), FileKnown.valueOf(rs.getByte("known")), //NON-NLS
+				rs.getString("md5"), rs.getString("sha256"), rs.getString("sha1"),
+				FileKnown.valueOf(rs.getByte("known")), //NON-NLS
 				rs.getString("parent_path"), rs.getString("mime_type"), rs.getString("extension"),
 				rs.getString("owner_uid"), osAccountObjId); //NON-NLS
 		f.setFileSystem(fs);
@@ -10295,13 +10646,14 @@ List<Content> fileChildren(ResultSet rs, CaseDbConnection connection, long paren
 							osAccountObjId = null;
 						}
 						final LayoutFile lf = new LayoutFile(this, rs.getLong("obj_id"),
-								rs.getLong("data_source_obj_id"), rs.getString("name"), type,
+								rs.getLong("data_source_obj_id"), rs.getLong("fs_obj_id"),
+								rs.getString("name"), type,
 								TSK_FS_NAME_TYPE_ENUM.valueOf(rs.getShort("dir_type")),
 								TSK_FS_META_TYPE_ENUM.valueOf(rs.getShort("meta_type")),
 								TSK_FS_NAME_FLAG_ENUM.valueOf(rs.getShort("dir_flags")), rs.getShort("meta_flags"),
 								rs.getLong("size"),
 								rs.getLong("ctime"), rs.getLong("crtime"), rs.getLong("atime"), rs.getLong("mtime"),
-								rs.getString("md5"), rs.getString("sha256"),
+								rs.getString("md5"), rs.getString("sha256"), rs.getString("sha1"),
 								FileKnown.valueOf(rs.getByte("known")), parentPath, rs.getString("mime_type"),
 								rs.getString("owner_uid"), osAccountObjId);
 						children.add(lf);
@@ -10329,41 +10681,6 @@ List<Content> fileChildren(ResultSet rs, CaseDbConnection connection, long paren
 		return children;
 	}
 
-	/**
-	 * Creates BlackboardArtifact objects for the result set of a
-	 * blackboard_artifacts table query of the form "SELECT * FROM
-	 * blackboard_artifacts WHERE XYZ".
-	 *
-	 * @param rs A result set from a query of the blackboard_artifacts table of
-	 *           the form "SELECT * FROM blackboard_artifacts WHERE XYZ".
-	 *
-	 * @return A list of BlackboardArtifact objects.
-	 *
-	 * @throws SQLException     Thrown if there is a problem iterating through
-	 *                          the result set.
-	 * @throws TskCoreException Thrown if there is an error looking up the
-	 *                          artifact type id
-	 */
-	private List<BlackboardArtifact> resultSetToArtifacts(ResultSet rs) throws SQLException, TskCoreException {
-		ArrayList<BlackboardArtifact> artifacts = new ArrayList<BlackboardArtifact>();
-		try {
-			while (rs.next()) {
-				BlackboardArtifact.Type artifactType = getArtifactType(rs.getInt("artifact_type_id"));
-				if (artifactType != null) {
-					artifacts.add(new BlackboardArtifact(this, rs.getLong("artifact_id"), rs.getLong("obj_id"), rs.getLong("artifact_obj_id"), rs.getLong("data_source_obj_id"),
-							rs.getInt("artifact_type_id"), artifactType.getTypeName(), artifactType.getDisplayName(),
-							BlackboardArtifact.ReviewStatus.withID(rs.getInt("review_status_id"))));
-				} else {
-					throw new TskCoreException("Error looking up artifact type ID " + rs.getInt("artifact_type_id") + " from artifact " + rs.getLong("artifact_id"));
-				}
-			} //end for each resultSet
-		} catch (SQLException e) {
-			logger.log(Level.SEVERE, "Error getting artifacts from result set", e); //NON-NLS
-		}
-
-		return artifacts;
-	}
-
 	/**
 	 * This method allows developers to run arbitrary SQL "SELECT" queries. The
 	 * CaseDbQuery object will take care of acquiring the necessary database
@@ -10436,6 +10753,7 @@ String getCaseHandleIdentifier() {
 		return caseHandleIdentifier;
 	}
 
+	@SuppressWarnings("deprecation")
 	@Override
 	protected void finalize() throws Throwable {
 		try {
@@ -10491,7 +10809,7 @@ public boolean setKnown(AbstractFile file, FileKnown fileKnown) throws TskCoreEx
 		}
 		acquireSingleUserCaseWriteLock();
 		try (CaseDbConnection connection = connections.getConnection();
-			Statement statement = connection.createStatement();) {
+				Statement statement = connection.createStatement();) {
 			connection.executeUpdate(statement, "UPDATE tsk_files " //NON-NLS
 					+ "SET known='" + fileKnown.getFileKnownValue() + "' " //NON-NLS
 					+ "WHERE obj_id=" + id); //NON-NLS
@@ -10627,7 +10945,7 @@ public void setFileUnalloc(AbstractFile file) throws TskCoreException {
 
 		acquireSingleUserCaseWriteLock();
 		try (CaseDbConnection connection = connections.getConnection();
-			Statement statement = connection.createStatement();) {
+				Statement statement = connection.createStatement();) {
 			connection.executeUpdate(statement, String.format("UPDATE tsk_files SET meta_flags = '%d', dir_flags = '%d'  WHERE obj_id = %d", newMetaFlgs, newDirFlags, file.getId()));
 
 			file.removeMetaFlag(TSK_FS_META_FLAG_ENUM.ALLOC);
@@ -10641,7 +10959,7 @@ public void setFileUnalloc(AbstractFile file) throws TskCoreException {
 			releaseSingleUserCaseWriteLock();
 		}
 	}
-
+	
 	/**
 	 * Store the md5Hash for the file in the database
 	 *
@@ -10717,7 +11035,7 @@ String getMd5ImageHash(Image img) throws TskCoreException {
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			PreparedStatement statement = connection.getPreparedStatement(PREPARED_STATEMENT.SELECT_IMAGE_MD5);
 			statement.clearParameters();
 			statement.setLong(1, id);
@@ -10781,7 +11099,7 @@ String getSha1ImageHash(Image img) throws TskCoreException {
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			PreparedStatement statement = connection.getPreparedStatement(PREPARED_STATEMENT.SELECT_IMAGE_SHA1);
 			statement.clearParameters();
 			statement.setLong(1, id);
@@ -10845,7 +11163,7 @@ String getSha256ImageHash(Image img) throws TskCoreException {
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			PreparedStatement statement = connection.getPreparedStatement(PREPARED_STATEMENT.SELECT_IMAGE_SHA256);
 			statement.clearParameters();
 			statement.setLong(1, id);
@@ -10957,7 +11275,7 @@ String getAcquisitionDetails(DataSource datasource) throws TskCoreException {
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			PreparedStatement statement = connection.getPreparedStatement(PREPARED_STATEMENT.SELECT_ACQUISITION_DETAILS);
 			statement.clearParameters();
 			statement.setLong(1, id);
@@ -10993,7 +11311,7 @@ String getDataSourceInfoString(DataSource datasource, String columnName) throws
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			PreparedStatement statement = connection.getPreparedStatement(PREPARED_STATEMENT.SELECT_ACQUISITION_TOOL_SETTINGS);
 			statement.clearParameters();
 			statement.setLong(1, id);
@@ -11029,7 +11347,7 @@ Long getDataSourceInfoLong(DataSource datasource, String columnName) throws TskC
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			PreparedStatement statement = connection.getPreparedStatement(PREPARED_STATEMENT.SELECT_ACQUISITION_TOOL_SETTINGS);
 			statement.clearParameters();
 			statement.setLong(1, id);
@@ -11063,7 +11381,7 @@ public void setReviewStatus(BlackboardArtifact artifact, BlackboardArtifact.Revi
 		}
 		acquireSingleUserCaseWriteLock();
 		try (CaseDbConnection connection = connections.getConnection();
-			Statement statement = connection.createStatement();) {
+				Statement statement = connection.createStatement();) {
 			connection.executeUpdate(statement, "UPDATE blackboard_artifacts "
 					+ " SET review_status_id=" + newStatus.getID()
 					+ " WHERE blackboard_artifacts.artifact_id = " + artifact.getArtifactID());
@@ -11136,7 +11454,7 @@ public List<AbstractFile> findFilesByMd5(String md5Hash) {
 		if (md5Hash == null) {
 			return Collections.<AbstractFile>emptyList();
 		}
-		
+
 		CaseDbConnection connection = null;
 		Statement s = null;
 		ResultSet rs = null;
@@ -11167,7 +11485,7 @@ public List<AbstractFile> findFilesByMd5(String md5Hash) {
 	 */
 	public boolean allFilesMd5Hashed() {
 		boolean allFilesAreHashed = false;
-		
+
 		CaseDbConnection connection = null;
 		Statement s = null;
 		ResultSet rs = null;
@@ -11198,9 +11516,9 @@ public boolean allFilesMd5Hashed() {
 	 *
 	 * @return the number of files with an MD5 hash
 	 */
-	public int countFilesMd5Hashed() {		
+	public int countFilesMd5Hashed() {
 		int count = 0;
-		
+
 		acquireSingleUserCaseReadLock();
 		CaseDbConnection connection = null;
 		Statement s = null;
@@ -11240,7 +11558,7 @@ public List<TagName> getAllTagNames() throws TskCoreException {
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			// SELECT * FROM tag_names
 			PreparedStatement statement = connection.getPreparedStatement(PREPARED_STATEMENT.SELECT_TAG_NAMES);
 			resultSet = connection.executeQuery(statement);
@@ -11276,7 +11594,7 @@ public List<TagName> getTagNamesInUse() throws TskCoreException {
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			// SELECT * FROM tag_names WHERE tag_name_id IN (SELECT tag_name_id from content_tags UNION SELECT tag_name_id FROM blackboard_artifact_tags)
 			PreparedStatement statement = connection.getPreparedStatement(PREPARED_STATEMENT.SELECT_TAG_NAMES_IN_USE);
 			resultSet = connection.executeQuery(statement);
@@ -11321,7 +11639,7 @@ public List<TagName> getTagNamesInUse(long dsObjId) throws TskCoreException {
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			PreparedStatement statement = connection.getPreparedStatement(PREPARED_STATEMENT.SELECT_TAG_NAMES_IN_USE_BY_DATASOURCE);
 			statement.setLong(1, dsObjId);
 			statement.setLong(2, dsObjId);
@@ -11351,10 +11669,11 @@ public List<TagName> getTagNamesInUse(long dsObjId) throws TskCoreException {
 	 * @return A TagName data transfer object (DTO) for the new row.
 	 *
 	 * @throws TskCoreException
-	 * @deprecated addOrUpdateTagName should be used this method calls
-	 * addOrUpdateTagName with a default knownStatus value
+	 * @deprecated TaggingManager.addOrUpdateTagName should be used instead with
+	 * the default knowStatus of TskData.FileKnown.UNKNOWN
 	 */
 	@Deprecated
+	@SuppressWarnings("deprecation")
 	public TagName addTagName(String displayName, String description, TagName.HTML_COLOR color) throws TskCoreException {
 		return addOrUpdateTagName(displayName, description, color, TskData.FileKnown.UNKNOWN);
 	}
@@ -11372,35 +11691,12 @@ public TagName addTagName(String displayName, String description, TagName.HTML_C
 	 * @return A TagName data transfer object (DTO) for the new row.
 	 *
 	 * @throws TskCoreException
+	 * @deprecated This method has been replaced by
+	 * TaggingManager.addOrUpdateTagName.
 	 */
+	@Deprecated
 	public TagName addOrUpdateTagName(String displayName, String description, TagName.HTML_COLOR color, TskData.FileKnown knownStatus) throws TskCoreException {
-		acquireSingleUserCaseWriteLock();
-		try (CaseDbConnection connection = connections.getConnection();) {
-			PreparedStatement statement;
-			// INSERT INTO tag_names (display_name, description, color, knownStatus) VALUES (?, ?, ?, ?) ON CONFLICT (display_name) DO UPDATE SET description = ?, color = ?, knownStatus = ?
-			statement = connection.getPreparedStatement(PREPARED_STATEMENT.INSERT_OR_UPDATE_TAG_NAME, Statement.RETURN_GENERATED_KEYS);
-			statement.clearParameters();
-			statement.setString(5, description);
-			statement.setString(6, color.getName());
-			statement.setByte(7, knownStatus.getFileKnownValue());
-			statement.setString(1, displayName);
-			statement.setString(2, description);
-			statement.setString(3, color.getName());
-			statement.setByte(4, knownStatus.getFileKnownValue());
-			connection.executeUpdate(statement);
-
-			statement = connection.getPreparedStatement(PREPARED_STATEMENT.SELECT_TAG_NAME_BY_NAME);
-			statement.clearParameters();
-			statement.setString(1, displayName);
-			try (ResultSet resultSet = connection.executeQuery(statement)) {
-				resultSet.next();
-				return new TagName(resultSet.getLong("tag_name_id"), displayName, description, color, knownStatus, resultSet.getLong("tag_set_id"), resultSet.getInt("rank"));
-			}
-		} catch (SQLException ex) {
-			throw new TskCoreException("Error adding row for " + displayName + " tag name to tag_names table", ex);
-		} finally {
-			releaseSingleUserCaseWriteLock();
-		}
+		return getTaggingManager().addOrUpdateTagName(displayName, description, color, knownStatus);
 	}
 
 	/**
@@ -11435,15 +11731,15 @@ public void deleteContentTag(ContentTag tag) throws TskCoreException {
 			statement.clearParameters();
 			statement.setLong(1, tag.getId());
 			trans.getConnection().executeUpdate(statement);
-			
+
 			// update the aggregate score for the content
 			Long contentId = tag.getContent() != null ? tag.getContent().getId() : null;
-			Long dataSourceId = tag.getContent() != null && tag.getContent().getDataSource() != null 
-					? tag.getContent().getDataSource().getId() 
+			Long dataSourceId = tag.getContent() != null && tag.getContent().getDataSource() != null
+					? tag.getContent().getDataSource().getId()
 					: null;
-			
+
 			this.getScoringManager().updateAggregateScoreAfterDeletion(contentId, dataSourceId, trans);
-			
+
 			trans.commit();
 			trans = null;
 		} catch (SQLException ex) {
@@ -11469,7 +11765,7 @@ public List<ContentTag> getAllContentTags() throws TskCoreException {
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			// SELECT content_tags.tag_id, content_tags.obj_id, content_tags.tag_name_id, content_tags.comment, content_tags.begin_byte_offset, content_tags.end_byte_offset, tag_names.display_name, tag_names.description, tag_names.color, tag_names.knownStatus, tsk_examiners.login_name 
 			//	FROM content_tags 
 			//	INNER JOIN tag_names ON content_tags.tag_name_id = tag_names.tag_name_id 
@@ -11514,7 +11810,7 @@ public long getContentTagsCountByTagName(TagName tagName) throws TskCoreExceptio
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			// SELECT COUNT(*) AS count FROM content_tags WHERE tag_name_id = ?
 			PreparedStatement statement = connection.getPreparedStatement(PREPARED_STATEMENT.COUNT_CONTENT_TAGS_BY_TAG_NAME);
 			statement.clearParameters();
@@ -11560,7 +11856,7 @@ public long getContentTagsCountByTagName(TagName tagName, long dsObjId) throws T
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			// "SELECT COUNT(*) AS count FROM content_tags as content_tags, tsk_files as tsk_files WHERE content_tags.obj_id = tsk_files.obj_id"
 			//		+ " AND content_tags.tag_name_id = ? "
 			//		+ " AND tsk_files.data_source_obj_id = ? "
@@ -11602,7 +11898,7 @@ public ContentTag getContentTagByID(long contentTagID) throws TskCoreException {
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			// SELECT content_tags.tag_id, content_tags.obj_id, content_tags.tag_name_id, content_tags.comment, content_tags.begin_byte_offset, content_tags.end_byte_offset, tag_names.display_name, tag_names.description, tag_names.color, tag_names.knownStatus, tsk_examiners.login_name 
 			//	FROM content_tags 
 			//	INNER JOIN tag_names ON content_tags.tag_name_id = tag_names.tag_name_id 
@@ -11652,7 +11948,7 @@ public List<ContentTag> getContentTagsByTagName(TagName tagName) throws TskCoreE
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			// SELECT content_tags.tag_id, content_tags.obj_id, content_tags.tag_name_id, content_tags.comment, content_tags.begin_byte_offset, content_tags.end_byte_offset, tsk_examiners.login_name 
 			//	FROM content_tags 
 			//  LEFT OUTER JOIN tsk_examiners ON content_tags.examiner_id = tsk_examiners.examiner_id 
@@ -11743,7 +12039,7 @@ public List<ContentTag> getContentTagsByContent(Content content) throws TskCoreE
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			// SELECT content_tags.tag_id, content_tags.obj_id, content_tags.tag_name_id, content_tags.comment, content_tags.begin_byte_offset, content_tags.end_byte_offset, tag_names.display_name, tag_names.description, tag_names.color, tag_names.knownStatus, tsk_examiners.login_name 
 			//	FROM content_tags 
 			//	INNER JOIN tag_names ON content_tags.tag_name_id = tag_names.tag_name_id 
@@ -11804,15 +12100,15 @@ public void deleteBlackboardArtifactTag(BlackboardArtifactTag tag) throws TskCor
 			statement.clearParameters();
 			statement.setLong(1, tag.getId());
 			trans.getConnection().executeUpdate(statement);
-			
+
 			// update the aggregate score for the artifact
 			Long artifactObjId = tag.getArtifact().getId();
-			Long dataSourceId = tag.getContent() != null && tag.getContent().getDataSource() != null 
-					? tag.getContent().getDataSource().getId() 
+			Long dataSourceId = tag.getContent() != null && tag.getContent().getDataSource() != null
+					? tag.getContent().getDataSource().getId()
 					: null;
-			
+
 			this.getScoringManager().updateAggregateScoreAfterDeletion(artifactObjId, dataSourceId, trans);
-			
+
 			trans.commit();
 			trans = null;
 		} catch (SQLException ex) {
@@ -11839,7 +12135,7 @@ public List<BlackboardArtifactTag> getAllBlackboardArtifactTags() throws TskCore
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			// SELECT blackboard_artifact_tags.tag_id, blackboard_artifact_tags.artifact_id, blackboard_artifact_tags.tag_name_id, blackboard_artifact_tags.comment, tag_names.display_name, tag_names.description, tag_names.color, tag_names.knownStatus, tsk_examiners.login_name
 			//	FROM blackboard_artifact_tags 
 			//	INNER JOIN tag_names ON blackboard_artifact_tags.tag_name_id = tag_names.tag_name_id 
@@ -11886,7 +12182,7 @@ public long getBlackboardArtifactTagsCountByTagName(TagName tagName) throws TskC
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			// SELECT COUNT(*) AS count FROM blackboard_artifact_tags WHERE tag_name_id = ?
 			PreparedStatement statement = connection.getPreparedStatement(PREPARED_STATEMENT.COUNT_ARTIFACTS_BY_TAG_NAME);
 			statement.clearParameters();
@@ -11931,7 +12227,7 @@ public long getBlackboardArtifactTagsCountByTagName(TagName tagName, long dsObjI
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			// "SELECT COUNT(*) AS count FROM blackboard_artifact_tags as artifact_tags, blackboard_artifacts AS arts WHERE artifact_tags.artifact_id = arts.artifact_id"
 			//    + " AND artifact_tags.tag_name_id = ?"
 			//	 + " AND arts.data_source_obj_id =  ? "
@@ -11974,7 +12270,7 @@ public List<BlackboardArtifactTag> getBlackboardArtifactTagsByTagName(TagName ta
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			// SELECT blackboard_artifact_tags.tag_id, blackboard_artifact_tags.artifact_id, blackboard_artifact_tags.tag_name_id, blackboard_artifact_tags.comment, tsk_examiners.login_name 
 			//	FROM blackboard_artifact_tags 
 			//	LEFT OUTER JOIN tsk_examiners ON blackboard_artifact_tags.examiner_id = tsk_examiners.examiner_id 
@@ -12026,7 +12322,7 @@ public List<BlackboardArtifactTag> getBlackboardArtifactTagsByTagName(TagName ta
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			//	SELECT artifact_tags.tag_id, artifact_tags.artifact_id, artifact_tags.tag_name_id, artifact_tags.comment, arts.obj_id, arts.artifact_obj_id, arts.data_source_obj_id, arts.artifact_type_id, arts.review_status_id, tsk_examiners.login_name 
 			//	 FROM blackboard_artifact_tags as artifact_tags, blackboard_artifacts AS arts 
 			//	 LEFT OUTER JOIN tsk_examiners ON artifact_tags.examiner_id = tsk_examiners.examiner_id 
@@ -12076,7 +12372,7 @@ public BlackboardArtifactTag getBlackboardArtifactTagByID(long artifactTagID) th
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			//SELECT blackboard_artifact_tags.tag_id, blackboard_artifact_tags.artifact_id, blackboard_artifact_tags.tag_name_id, blackboard_artifact_tags.comment, tag_names.display_name, tag_names.description, tag_names.color, tag_names.knownStatus, tsk_examiners.login_name 
 			//	FROM blackboard_artifact_tags 
 			//	INNER JOIN tag_names ON blackboard_artifact_tags.tag_name_id = tag_names.tag_name_id  
@@ -12126,7 +12422,7 @@ public List<BlackboardArtifactTag> getBlackboardArtifactTagsByArtifact(Blackboar
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			//  SELECT blackboard_artifact_tags.tag_id, blackboard_artifact_tags.artifact_id, blackboard_artifact_tags.tag_name_id, blackboard_artifact_tags.comment, tag_names.display_name, tag_names.description, tag_names.color, tag_names.knownStatus, tsk_examiners.login_name 
 			//	FROM blackboard_artifact_tags 
 			//	INNER JOIN tag_names ON blackboard_artifact_tags.tag_name_id = tag_names.tag_name_id 
@@ -12293,7 +12589,7 @@ public List<Report> getAllReports() throws TskCoreException {
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			// SELECT * FROM reports
 			statement = connection.getPreparedStatement(PREPARED_STATEMENT.SELECT_REPORTS);
 			parentStatement = connection.createStatement();
@@ -12358,7 +12654,7 @@ public Report getReportById(long id) throws TskCoreException {
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			// SELECT * FROM reports WHERE obj_id = ?
 			statement = connection.getPreparedStatement(PREPARED_STATEMENT.SELECT_REPORT_BY_ID);
 			parentStatement = connection.createStatement();
@@ -12445,13 +12741,13 @@ static void closeStatement(Statement statement) {
 			}
 		}
 	}
-	
+
 	static void closeConnection(CaseDbConnection connection) {
 		if (connection != null) {
 			connection.close();
 		}
 	}
-	
+
 	private static void rollbackTransaction(CaseDbConnection connection) {
 		if (connection != null) {
 			connection.rollbackTransaction();
@@ -12481,7 +12777,7 @@ void setIngestJobEndDateTime(long ingestJobId, long endDateTime) throws TskCoreE
 	void setIngestJobStatus(long ingestJobId, IngestJobStatusType status) throws TskCoreException {
 		acquireSingleUserCaseWriteLock();
 		try (CaseDbConnection connection = connections.getConnection();
-			Statement statement = connection.createStatement();) {
+				Statement statement = connection.createStatement();) {
 			statement.executeUpdate("UPDATE ingest_jobs SET status_id=" + status.ordinal() + " WHERE ingest_job_id=" + ingestJobId + ";");
 		} catch (SQLException ex) {
 			throw new TskCoreException("Error ingest job status (ingest_job_id = " + ingestJobId + ".", ex);
@@ -12562,7 +12858,7 @@ public final IngestModuleInfo addIngestModule(String displayName, String factory
 		CaseDbConnection connection = null;
 		ResultSet resultSet = null;
 		Statement statement = null;
-		String uniqueName = factoryClassName + "-" + displayName + "-" + type.toString() + "-" + version;
+		String uniqueName = factoryClassName + "-" + displayName + "-" + version;
 		acquireSingleUserCaseWriteLock();
 		try {
 			connection = connections.getConnection();
@@ -12702,6 +12998,46 @@ String getInsertOrIgnoreSQL(String sql) {
 		}
 	}
 
+	/**
+	 * Returns a list of Blackboard artifact whoes values in dbColumn match the
+	 * list of values. The method will generate an SQL OR statement that can be
+	 * used as part of a where clause to retrieve artifacts for a set of values.
+	 *
+	 * For example getArtifactsForValues("artifacts.artifact_obj_id",
+	 * artifactObjIdList) will return a list of artifacts for the artifactObjID
+	 * values in the given list.
+	 *
+	 * When using this method be sure to use the tables as nicknamed in
+	 * DATA_ARTIFACT_QUERY_STRING and ANALYSIS_RESULT_QUERY_STRING;
+	 *
+	 * @param category The type of artifacts to return.
+	 * @param dbColumn The database column.
+	 * @param value    List of values.
+	 *
+	 * @return A list of BlackboardArtifacts
+	 *
+	 * @throws TskCoreException
+	 */
+	private List<? extends BlackboardArtifact> getArtifactsForValues(BlackboardArtifact.Category category, String dbColumn, List<? extends Number> values, CaseDbConnection connection) throws TskCoreException {
+		String where = "";
+		// This look creates the OR statment with the following format:
+		// <dbColumn> = <value> OR <dbColumn> = <value2> OR ...  
+		for (Number value : values) {
+			if (!where.isEmpty()) {
+				where += " OR ";
+			}
+			where += dbColumn + " = " + value;
+		}
+
+		// Base on the category pass the OR statement to the approprate method
+		// that will retrieve the artifacts.
+		if (category == BlackboardArtifact.Category.DATA_ARTIFACT) {
+			return blackboard.getDataArtifactsWhere(where, connection);
+		} else {
+			return blackboard.getAnalysisResultsWhere(where, connection);
+		}
+	}
+
 	/**
 	 * Stores a pair of object ID and its type
 	 */
@@ -12815,10 +13151,10 @@ private enum PREPARED_STATEMENT {
 		SELECT_FILE_DERIVATION_METHOD("SELECT tool_name, tool_version, other FROM tsk_files_derived_method WHERE derived_id = ?"), //NON-NLS
 		SELECT_MAX_OBJECT_ID("SELECT MAX(obj_id) AS max_obj_id FROM tsk_objects"), //NON-NLS
 		INSERT_OBJECT("INSERT INTO tsk_objects (par_obj_id, type) VALUES (?, ?)"), //NON-NLS
-		INSERT_FILE("INSERT INTO tsk_files (obj_id, fs_obj_id, name, type, has_path, dir_type, meta_type, dir_flags, meta_flags, size, ctime, crtime, atime, mtime, md5, sha256, known, mime_type, parent_path, data_source_obj_id, extension, owner_uid, os_account_obj_id  ) " //NON-NLS
-				+ "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"), //NON-NLS
-		INSERT_FILE_SYSTEM_FILE("INSERT INTO tsk_files(obj_id, fs_obj_id, data_source_obj_id, attr_type, attr_id, name, meta_addr, meta_seq, type, has_path, dir_type, meta_type, dir_flags, meta_flags, size, ctime, crtime, atime, mtime, md5, sha256, mime_type, parent_path, extension, owner_uid, os_account_obj_id )"
-				+ " VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"), // NON-NLS
+		INSERT_FILE("INSERT INTO tsk_files (obj_id, fs_obj_id, name, type, has_path, dir_type, meta_type, dir_flags, meta_flags, size, ctime, crtime, atime, mtime, md5, sha256, sha1, known, mime_type, parent_path, data_source_obj_id, extension, owner_uid, os_account_obj_id, collected) " //NON-NLS
+				+ "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"), //NON-NLS
+		INSERT_FILE_SYSTEM_FILE("INSERT INTO tsk_files(obj_id, fs_obj_id, data_source_obj_id, attr_type, attr_id, name, meta_addr, meta_seq, type, has_path, dir_type, meta_type, dir_flags, meta_flags, size, ctime, crtime, atime, mtime, md5, sha256, sha1, mime_type, parent_path, extension, owner_uid, os_account_obj_id, collected)"
+				+ " VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"), // NON-NLS
 		UPDATE_DERIVED_FILE("UPDATE tsk_files SET type = ?, dir_type = ?, meta_type = ?, dir_flags = ?,  meta_flags = ?, size= ?, ctime= ?, crtime= ?, atime= ?, mtime= ?, mime_type = ?  "
 				+ "WHERE obj_id = ?"), //NON-NLS
 		INSERT_LAYOUT_FILE("INSERT INTO tsk_file_layout (obj_id, byte_start, byte_len, sequence) " //NON-NLS
@@ -12922,7 +13258,6 @@ private enum PREPARED_STATEMENT {
 				+ "FROM tsk_objects INNER JOIN blackboard_artifacts " //NON-NLS
 				+ "ON tsk_objects.obj_id=blackboard_artifacts.obj_id " //NON-NLS
 				+ "WHERE (tsk_objects.par_obj_id = ?)"),
-		INSERT_OR_UPDATE_TAG_NAME("INSERT INTO tag_names (display_name, description, color, knownStatus) VALUES (?, ?, ?, ?) ON CONFLICT (display_name) DO UPDATE SET description = ?, color = ?, knownStatus = ?"),
 		SELECT_EXAMINER_BY_ID("SELECT * FROM tsk_examiners WHERE examiner_id = ?"),
 		SELECT_EXAMINER_BY_LOGIN_NAME("SELECT * FROM tsk_examiners WHERE login_name = ?"),
 		INSERT_EXAMINER_POSTGRESQL("INSERT INTO tsk_examiners (login_name) VALUES (?) ON CONFLICT DO NOTHING"),
@@ -12941,8 +13276,7 @@ private enum PREPARED_STATEMENT {
 		INSERT_POOL_INFO("INSERT INTO tsk_pool_info (obj_id, pool_type) VALUES (?, ?)"),
 		INSERT_FS_INFO("INSERT INTO tsk_fs_info (obj_id, data_source_obj_id, img_offset, fs_type, block_size, block_count, root_inum, first_inum, last_inum, display_name)"
 				+ "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"),
-		SELECT_TAG_NAME_BY_ID("SELECT * FROM tag_names where tag_name_id = ?"),
-		SELECT_TAG_NAME_BY_NAME("SELECT * FROM tag_names where display_name = ?");
+		SELECT_TAG_NAME_BY_ID("SELECT * FROM tag_names where tag_name_id = ?");
 
 		private final String sql;
 
@@ -13335,7 +13669,7 @@ PreparedStatement getPreparedStatement(PREPARED_STATEMENT statementKey, int gene
 		PreparedStatement getPreparedStatement(String sqlStatement, int generateKeys) throws SQLException {
 			PreparedStatement statement;
 			String statementKey = "SQL:" + sqlStatement + " Key:" + generateKeys;
-			if (adHocPreparedStatements.containsKey(statementKey)) {
+			if (adHocPreparedStatements.containsKey(statementKey) && !adHocPreparedStatements.get(statementKey).isClosed()) {
 				statement = this.adHocPreparedStatements.get(statementKey);
 			} else {
 				statement = prepareStatement(sqlStatement, generateKeys);
@@ -13580,9 +13914,26 @@ void executeCommand(DbCommand command) throws SQLException {
 	}
 
 	/**
-	 * Wraps the transactional capabilities of a CaseDbConnection object to
-	 * support use cases where control of a transaction is given to a
-	 * SleuthkitCase client. Note that this class does not implement the
+     * Allows callers to execute multiple database operations in a single
+     * transaction.  The usual motivations for this are for speed and
+     * atomicity. 
+     *
+     * WARNING: You need to be very careful when using this because it is
+     * easy to get the system into a deadlock when using a SQLite database. 
+     * For example, if you get this transaction, perform some inserts 
+     * (and you therefore have a write lock on the DB), and then need to
+     * query the database from the same thread.  If your query does not use
+     * this transaction, then it will get a new connection and will be 
+     * blocked by the connection that is held by the transaction. 
+     *
+     * If you are using CaseDbTransaction, you need to use only DB methods
+     * that also take in a transaction. We recommend that you preprocess
+     * as much as possible before getting the transaction to:
+     * - prevent deadlocks
+     * - hold on to the transaction for as little time as possible. 
+     *
+     * 
+	 * Note that this class does not implement the
 	 * Transaction interface because that sort of flexibility and its associated
 	 * complexity is not needed. Also, TskCoreExceptions are thrown to be
 	 * consistent with the outer SleuthkitCase class.
@@ -13598,18 +13949,24 @@ public static final class CaseDbTransaction {
 		private final CaseDbConnection connection;
 		private SleuthkitCase sleuthkitCase;
 
-		// A collection of object score changes that ocuured as part of this transaction.
-		// When the transaction is committed, events are fired to notify any listeners.
+        /* This class can store information about what was 
+         * inserted as part of the transaction so that we can
+         * fire events after the data has been persisted. */
+
 		// Score changes are stored as a map keyed by objId to prevent duplicates.
 		private Map<Long, ScoreChange> scoreChangeMap = new HashMap<>();
 		private List<Host> hostsAdded = new ArrayList<>();
+		private List<TimelineEventAddedEvent> timelineEvents = new ArrayList<>();
 		private List<OsAccount> accountsChanged = new ArrayList<>();
 		private List<OsAccount> accountsAdded = new ArrayList<>();
+		private List<TskEvent.MergedAccountsPair> accountsMerged = new ArrayList<>();
+
 		private List<Long> deletedOsAccountObjectIds = new ArrayList<>();
 		private List<Long> deletedResultObjectIds = new ArrayList<>();
 
-		private static Set<Long> threadsWithOpenTransaction = new HashSet<>();
-		private static final Object threadsWithOpenTransactionLock = new Object();
+    // Keep track of which threads have connections to debug deadlocks
+    private static Set<Long> threadsWithOpenTransaction = new HashSet<>();
+    private static final Object threadsWithOpenTransactionLock = new Object();
 
 		private CaseDbTransaction(SleuthkitCase sleuthkitCase) throws TskCoreException {
 			this.sleuthkitCase = sleuthkitCase;
@@ -13647,6 +14004,16 @@ CaseDbConnection getConnection() {
 		void registerScoreChange(ScoreChange scoreChange) {
 			scoreChangeMap.put(scoreChange.getObjectId(), scoreChange);
 		}
+		
+		/**
+		 * Register timeline event to be fired when transaction finishes.
+		 * @param timelineEvent The timeline event.
+		 */
+		void registerTimelineEvent(TimelineEventAddedEvent timelineEvent) {
+			if (timelineEvent != null) {
+				timelineEvents.add(timelineEvent);
+			}
+		}
 
 		/**
 		 * Saves a host that has been added as a part of this transaction.
@@ -13690,6 +14057,16 @@ void registerAddedOsAccount(OsAccount account) {
 			}
 		}
 
+		/**
+		 * Saves an account that has been merged as part of this transaction.
+		 *
+		 * @param sourceOsAccountObjId
+		 * @param destinationOsAccountObjId
+		 */
+		void registerMergedOsAccount(long sourceOsAccountObjId, long destinationOsAccountObjId) {
+			accountsMerged.add(new TskEvent.MergedAccountsPair(sourceOsAccountObjId, destinationOsAccountObjId));
+		}
+
 		/**
 		 * Saves an analysis result that has been deleted as a part of this
 		 * transaction.
@@ -13735,6 +14112,11 @@ public void commit() throws TskCoreException {
 						sleuthkitCase.fireTSKEvent(new TskEvent.AggregateScoresChangedEvent(entry.getKey(), ImmutableSet.copyOf(entry.getValue())));
 					}
 				}
+				if (!timelineEvents.isEmpty()) {
+					for (TimelineEventAddedEvent evt : timelineEvents) {
+						sleuthkitCase.fireTSKEvent(evt);
+					}
+				}
 				if (!hostsAdded.isEmpty()) {
 					sleuthkitCase.fireTSKEvent(new TskEvent.HostsAddedTskEvent(hostsAdded));
 				}
@@ -13744,6 +14126,9 @@ public void commit() throws TskCoreException {
 				if (!accountsChanged.isEmpty()) {
 					sleuthkitCase.fireTSKEvent(new TskEvent.OsAccountsUpdatedTskEvent(accountsChanged));
 				}
+				if (!accountsMerged.isEmpty()) {
+					sleuthkitCase.fireTSKEvent(new TskEvent.OsAccountsMergedTskEvent(accountsMerged));
+				}
 				if (!deletedOsAccountObjectIds.isEmpty()) {
 					sleuthkitCase.fireTSKEvent(new TskEvent.OsAccountsDeletedTskEvent(deletedOsAccountObjectIds));
 				}
@@ -13806,7 +14191,7 @@ private CaseDbQuery(String query, boolean allowWriteQuery) throws TskCoreExcepti
 					throw new TskCoreException("Unsupported query: Only SELECT queries are supported.");
 				}
 			}
-			
+
 			SleuthkitCase.this.acquireSingleUserCaseReadLock();
 			try {
 				connection = connections.getConnection();
@@ -13977,7 +14362,7 @@ public long getLastObjectId() throws TskCoreException {
 		acquireSingleUserCaseReadLock();
 		try {
 			connection = connections.getConnection();
-			
+
 			// SELECT MAX(obj_id) AS max_obj_id FROM tsk_objects
 			PreparedStatement statement = connection.getPreparedStatement(PREPARED_STATEMENT.SELECT_MAX_OBJECT_ID);
 			rs = connection.executeQuery(statement);
@@ -14098,7 +14483,7 @@ public ArrayList<BlackboardArtifact.ARTIFACT_TYPE> getBlackboardArtifactTypes()
 	 *
 	 * @throws TskCoreException If there is an error adding the type to the case
 	 *                          database.
-	 * @deprecated Use SleuthkitCase.addBlackboardArtifactType instead.
+	 * @deprecated Use SleuthkitCase.addBlackboardArtifactType() instead.
 	 */
 	@Deprecated
 	public int addArtifactType(String artifactTypeName, String displayName) throws TskCoreException {
@@ -14120,7 +14505,7 @@ public int addArtifactType(String artifactTypeName, String displayName) throws T
 	 *
 	 * @throws TskCoreException If there is an error adding the type to the case
 	 *                          database.
-	 * @deprecated Use SleuthkitCase.addArtifactAttributeType instead.
+	 * @deprecated Use SleuthkitCase.addArtifactAttributeType() instead.
 	 */
 	@Deprecated
 	public int addAttrType(String attrTypeString, String displayName) throws TskCoreException {
@@ -14139,7 +14524,7 @@ public int addAttrType(String attrTypeString, String displayName) throws TskCore
 	 * @return An attribute id or -1 if the attribute type does not exist.
 	 *
 	 * @throws TskCoreException If an error occurs accessing the case database.
-	 * @deprecated Use SleuthkitCase.getAttributeType instead.
+	 * @deprecated Use SleuthkitCase.getAttributeType() instead.
 	 */
 	@Deprecated
 	public int getAttrTypeID(String attrTypeName) throws TskCoreException {
@@ -14550,7 +14935,7 @@ public Collection<FileSystem> getFileSystems(Image image) {
 			return new ArrayList<>();
 		}
 	}
-	
+
 	/**
 	 * Find all files in the data source, by name and parent
 	 *
@@ -14565,7 +14950,7 @@ public Collection<FileSystem> getFileSystems(Image image) {
 	 *         parentFile.
 	 *
 	 * @throws org.sleuthkit.datamodel.TskCoreException
-	 * 
+	 *
 	 * @deprecated Use findFilesInFolder()
 	 */
 	@Deprecated
diff --git a/bindings/java/src/org/sleuthkit/datamodel/SpecialDirectory.java b/bindings/java/src/org/sleuthkit/datamodel/SpecialDirectory.java
index 031b8ace7600e4b68659d0b6d04904b1c05b7a06..84bbe68642eea09afbdd3f79b08a7d2e0f56c2c0 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/SpecialDirectory.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/SpecialDirectory.java
@@ -1,7 +1,7 @@
 /*
  * SleuthKit Java Bindings
  * 
- * Copyright 2011-2017 Basis Technology Corp.
+ * Copyright 2011-2022 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  * 
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -33,6 +33,7 @@ public abstract class SpecialDirectory extends AbstractFile {
 	SpecialDirectory(SleuthkitCase db,
 			long objId,
 			long dataSourceObjectId,
+			Long fileSystemObjectId,
 			TskData.TSK_FS_ATTR_TYPE_ENUM attrType, int attrId,
 			String name,
 			TskData.TSK_DB_FILES_TYPE_ENUM fileType,
@@ -43,12 +44,13 @@ public abstract class SpecialDirectory extends AbstractFile {
 			long ctime, long crtime, long atime, long mtime,
 			short modes,
 			int uid, int gid,
-			String md5Hash, String sha256Hash, FileKnown knownState,
+			String md5Hash, String sha256Hash, String sha1Hash, 
+			FileKnown knownState,
 			String parentPath,
 			String mimeType) {
-		super(db, objId, dataSourceObjectId, attrType, attrId, name,
+		super(db, objId, dataSourceObjectId, fileSystemObjectId, attrType, attrId, name,
 				fileType, metaAddr, metaSeq, dirType, metaType, dirFlag,
-				metaFlags, size, ctime, crtime, atime, mtime, modes, uid, gid, md5Hash, sha256Hash, knownState, parentPath, mimeType, null, OsAccount.NO_OWNER_ID, OsAccount.NO_ACCOUNT, Collections.emptyList());
+				metaFlags, size, ctime, crtime, atime, mtime, modes, uid, gid, md5Hash, sha256Hash, sha1Hash, knownState, parentPath, mimeType, null, OsAccount.NO_OWNER_ID, OsAccount.NO_ACCOUNT, TskData.CollectedStatus.UNKNOWN, Collections.emptyList());
 	}
 
 	/**
diff --git a/bindings/java/src/org/sleuthkit/datamodel/TagSet.java b/bindings/java/src/org/sleuthkit/datamodel/TagSet.java
index 31e86808e21e04a915bb1ecb40da58b7c175469b..8c8dec9ca15b8ffeee706db08587c42b34a8f462 100755
--- a/bindings/java/src/org/sleuthkit/datamodel/TagSet.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/TagSet.java
@@ -72,7 +72,7 @@ public List<TagName> getTagNames() {
 	 *
 	 * @return TagSet id value.
 	 */
-	long getId() {
+	public long getId() {
 		return id;
 	}
 
diff --git a/bindings/java/src/org/sleuthkit/datamodel/TaggingManager.java b/bindings/java/src/org/sleuthkit/datamodel/TaggingManager.java
index df1d09bc61ccd88ac8908efa6972d2f05446cb8a..58cf8d2001274dfff169c73bcc6fa4268d1e4f47 100755
--- a/bindings/java/src/org/sleuthkit/datamodel/TaggingManager.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/TaggingManager.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit Data Model
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -18,6 +18,7 @@
  */
 package org.sleuthkit.datamodel;
 
+import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
@@ -28,6 +29,11 @@
 import org.sleuthkit.datamodel.SleuthkitCase.CaseDbConnection;
 import org.sleuthkit.datamodel.SleuthkitCase.CaseDbTransaction;
 import static org.sleuthkit.datamodel.TskData.DbType.POSTGRESQL;
+import org.sleuthkit.datamodel.TskEvent.TagNamesAddedTskEvent;
+import org.sleuthkit.datamodel.TskEvent.TagNamesDeletedTskEvent;
+import org.sleuthkit.datamodel.TskEvent.TagNamesUpdatedTskEvent;
+import org.sleuthkit.datamodel.TskEvent.TagSetsAddedTskEvent;
+import org.sleuthkit.datamodel.TskEvent.TagSetsDeletedTskEvent;
 
 /**
  * Provides an API to manage Tags.
@@ -54,10 +60,10 @@ public class TaggingManager {
 	 */
 	public List<TagSet> getTagSets() throws TskCoreException {
 		List<TagSet> tagSetList = new ArrayList<>();
-		
+
 		skCase.acquireSingleUserCaseReadLock();
 		String getAllTagSetsQuery = "SELECT * FROM tsk_tag_sets";
-		try (CaseDbConnection connection = skCase.getConnection();Statement stmt = connection.createStatement(); ResultSet resultSet = stmt.executeQuery(getAllTagSetsQuery);) {
+		try (CaseDbConnection connection = skCase.getConnection(); Statement stmt = connection.createStatement(); ResultSet resultSet = stmt.executeQuery(getAllTagSetsQuery);) {
 			while (resultSet.next()) {
 				int setID = resultSet.getInt("tag_set_id");
 				String setName = resultSet.getString("name");
@@ -121,6 +127,8 @@ public TagSet addTagSet(String name, List<TagName> tagNames) throws TskCoreExcep
 					}
 				}
 				tagSet = new TagSet(setID, name, updatedTags);
+				skCase.fireTSKEvent(new TagSetsAddedTskEvent(Collections.singletonList(tagSet)));
+				skCase.fireTSKEvent(new TagNamesUpdatedTskEvent(updatedTags));
 			}
 			trans.commit();
 		} catch (SQLException ex) {
@@ -158,6 +166,14 @@ public void deleteTagSet(TagSet tagSet) throws TskCoreException {
 			queryTemplate = "DELETE FROM tsk_tag_sets WHERE tag_set_id = '%d'";
 			stmt.execute(String.format(queryTemplate, tagSet.getId()));
 			trans.commit();
+
+			List<Long> tagNameIds = new ArrayList<>();
+			for (TagName tagName : tagSet.getTagNames()) {
+				tagNameIds.add(tagName.getId());
+			}
+
+			skCase.fireTSKEvent(new TagSetsDeletedTskEvent(Collections.singletonList(tagSet.getId())));
+			skCase.fireTSKEvent(new TagNamesDeletedTskEvent(tagNameIds));
 		} catch (SQLException ex) {
 			trans.rollback();
 			throw new TskCoreException(String.format("Error deleting tag set where id = %d.", tagSet.getId()), ex);
@@ -177,15 +193,15 @@ public TagSet getTagSet(TagName tagName) throws TskCoreException {
 		if (tagName == null) {
 			throw new IllegalArgumentException("Null tagName argument");
 		}
-		
+
 		if (tagName.getTagSetId() <= 0) {
 			return null;
 		}
-		
+
 		skCase.acquireSingleUserCaseReadLock();
 		TagSet tagSet = null;
 		String sqlQuery = String.format("SELECT * FROM tsk_tag_sets WHERE tag_set_id = %d", tagName.getTagSetId());
-		try (CaseDbConnection connection = skCase.getConnection();Statement stmt = connection.createStatement(); ResultSet resultSet = stmt.executeQuery(sqlQuery);) {
+		try (CaseDbConnection connection = skCase.getConnection(); Statement stmt = connection.createStatement(); ResultSet resultSet = stmt.executeQuery(sqlQuery);) {
 			if (resultSet.next()) {
 				int setID = resultSet.getInt("tag_set_id");
 				String setName = resultSet.getString("name");
@@ -199,6 +215,39 @@ public TagSet getTagSet(TagName tagName) throws TskCoreException {
 		}
 	}
 
+	/**
+	 * Return a TagSet object for the given id.
+	 *
+	 * @param id TagSet id.
+	 *
+	 * @return The TagSet represented by the given it, or null if one was not
+	 *         found.
+	 *
+	 * @throws TskCoreException
+	 */
+	public TagSet getTagSet(long id) throws TskCoreException {
+		TagSet tagSet = null;
+		String preparedQuery = "Select * FROM tsk_tag_sets WHERE tag_set_id = ?";
+		skCase.acquireSingleUserCaseReadLock();
+		try (CaseDbConnection connection = skCase.getConnection(); PreparedStatement statement = connection.getPreparedStatement(preparedQuery, Statement.NO_GENERATED_KEYS)) {
+			statement.setLong(1, id);
+			try (ResultSet resultSet = statement.executeQuery()) {
+				if (resultSet.next()) {
+					int setID = resultSet.getInt("tag_set_id");
+					String setName = resultSet.getString("name");
+					tagSet = new TagSet(setID, setName, getTagNamesByTagSetID(setID));
+				}
+			}
+
+		} catch (SQLException ex) {
+			throw new TskCoreException(String.format("Error occurred getting TagSet (ID=%d)", id), ex);
+		} finally {
+			skCase.releaseSingleUserCaseReadLock();
+		}
+
+		return tagSet;
+	}
+
 	/**
 	 * Inserts a row into the blackboard_artifact_tags table in the case
 	 * database.
@@ -219,7 +268,7 @@ public BlackboardArtifactTagChange addArtifactTag(BlackboardArtifact artifact, T
 
 		List<BlackboardArtifactTag> removedTags = new ArrayList<>();
 		List<String> removedTagIds = new ArrayList<>();
-		CaseDbTransaction trans = skCase.beginTransaction();
+		CaseDbTransaction trans = null;
 		try {
 			// If a TagName is part of a TagSet remove any existing tags from the
 			// set that are currenctly on the artifact
@@ -228,10 +277,10 @@ public BlackboardArtifactTagChange addArtifactTag(BlackboardArtifact artifact, T
 				// Get the list of all of the blackboardArtifactTags that use
 				// TagName for the given artifact.
 				String selectQuery = String.format("SELECT * from blackboard_artifact_tags JOIN tag_names ON tag_names.tag_name_id = blackboard_artifact_tags.tag_name_id JOIN tsk_examiners on tsk_examiners.examiner_id = blackboard_artifact_tags.examiner_id WHERE artifact_id = %d AND tag_names.tag_set_id = %d", artifact.getArtifactID(), tagSetId);
-
-				try (Statement stmt = trans.getConnection().createStatement(); ResultSet resultSet = stmt.executeQuery(selectQuery)) {
+				TagName removedTag;
+				try (Statement stmt = skCase.getConnection().createStatement(); ResultSet resultSet = stmt.executeQuery(selectQuery)) {
 					while (resultSet.next()) {
-						TagName removedTag = new TagName(
+						removedTag = new TagName(
 								resultSet.getLong("tag_name_id"),
 								resultSet.getString("display_name"),
 								resultSet.getString("description"),
@@ -254,19 +303,26 @@ public BlackboardArtifactTagChange addArtifactTag(BlackboardArtifact artifact, T
 					}
 				}
 
-				if (!removedTags.isEmpty()) {
-					// Remove the tags.
-					String removeQuery = String.format("DELETE FROM blackboard_artifact_tags WHERE tag_id IN (%s)", String.join(",", removedTagIds));
-					try (Statement stmt = trans.getConnection().createStatement()) {
-						stmt.executeUpdate(removeQuery);
-					}
+			}
+
+			Content content = skCase.getContentById(artifact.getObjectID());
+			Examiner currentExaminer = skCase.getCurrentExaminer();
+
+			trans = skCase.beginTransaction();
+			CaseDbConnection connection = trans.getConnection();
+
+			if (!removedTags.isEmpty()) {
+				// Remove the tags.
+				String removeQuery = String.format("DELETE FROM blackboard_artifact_tags WHERE tag_id IN (%s)", String.join(",", removedTagIds));
+				try (Statement stmt = connection.createStatement()) {
+					stmt.executeUpdate(removeQuery);
 				}
 			}
 
 			// Add the new Tag.
 			BlackboardArtifactTag artifactTag;
-			try (Statement stmt = trans.getConnection().createStatement()) {
-				Examiner currentExaminer = skCase.getCurrentExaminer();
+			try (Statement stmt = connection.createStatement()) {
+
 				String query = String.format(
 						"INSERT INTO blackboard_artifact_tags (artifact_id, tag_name_id, comment, examiner_id) VALUES (%d, %d, '%s', %d)",
 						artifact.getArtifactID(),
@@ -283,10 +339,10 @@ public BlackboardArtifactTagChange addArtifactTag(BlackboardArtifact artifact, T
 				try (ResultSet resultSet = stmt.getGeneratedKeys()) {
 					resultSet.next();
 					artifactTag = new BlackboardArtifactTag(resultSet.getLong(1), //last_insert_rowid()
-							artifact, skCase.getContentById(artifact.getObjectID()), tagName, comment, currentExaminer.getLoginName());
+							artifact, content, tagName, comment, currentExaminer.getLoginName());
 				}
 			}
-			
+
 			skCase.getScoringManager().updateAggregateScoreAfterAddition(
 					artifact.getId(), artifact.getDataSourceObjectID(), getTagScore(tagName.getKnownStatus()), trans);
 
@@ -294,33 +350,47 @@ public BlackboardArtifactTagChange addArtifactTag(BlackboardArtifact artifact, T
 
 			return new BlackboardArtifactTagChange(artifactTag, removedTags);
 		} catch (SQLException ex) {
-			trans.rollback();
+			if (trans != null) {
+				trans.rollback();
+			}
 			throw new TskCoreException("Error adding row to blackboard_artifact_tags table (obj_id = " + artifact.getArtifactID() + ", tag_name_id = " + tagName.getId() + ")", ex);
 		}
 	}
-	
 
 	/**
-	 * Returns the score based on this TagName object.
-	 * @param knownStatus The known status of the tag.
-	 * @return The relevant score.
+	 * Translates the known status of a tag defnition into an item score. This
+	 * supports scoring of tagged items.
+	 *
+	 * @param knownStatus The known status of a tag definition.
+	 *
+	 * @return The corresponding item score.
 	 */
 	static Score getTagScore(TskData.FileKnown knownStatus) {
 		switch (knownStatus) {
-			case BAD: 
+			case BAD:
+				/*
+				 * The "bad" known status is used to define tags that are
+				 * "notable." An item tagged with a "notable" tag is scored as
+				 * notable.
+				 */
 				return Score.SCORE_NOTABLE;
-			case UNKNOWN: 
+			case UNKNOWN:
 			case KNOWN:
-			default:
+			default: // N/A
+				/*
+				 * All other known status values have no special significance in
+				 * a tag definition. However, if an item has been tagged at all
+				 * by a user, the item is scored as likely notable.
+				 */
 				return Score.SCORE_LIKELY_NOTABLE;
 		}
 	}
-	
-		/**
+
+	/**
 	 * Retrieves the maximum FileKnown status of any tag associated with the
 	 * object id.
 	 *
-	 * @param objectId   The object id of the item.
+	 * @param objectId    The object id of the item.
 	 * @param transaction The case db transaction to perform this query.
 	 *
 	 * @return The maximum FileKnown status for this object or empty.
@@ -372,14 +442,17 @@ Optional<TskData.FileKnown> getMaxTagKnownStatus(long objectId, CaseDbTransactio
 	public ContentTagChange addContentTag(Content content, TagName tagName, String comment, long beginByteOffset, long endByteOffset) throws TskCoreException {
 		List<ContentTag> removedTags = new ArrayList<>();
 		List<String> removedTagIds = new ArrayList<>();
+		Examiner currentExaminer = skCase.getCurrentExaminer();
 		CaseDbTransaction trans = skCase.beginTransaction();
+		CaseDbConnection connection = trans.getConnection();
+
 		try {
 			long tagSetId = tagName.getTagSetId();
 
 			if (tagSetId > 0) {
 				String selectQuery = String.format("SELECT * from content_tags JOIN tag_names ON tag_names.tag_name_id = content_tags.tag_name_id JOIN tsk_examiners on tsk_examiners.examiner_id = content_tags.examiner_id WHERE obj_id = %d AND tag_names.tag_set_id = %d", content.getId(), tagSetId);
 
-				try (Statement stmt = trans.getConnection().createStatement(); ResultSet resultSet = stmt.executeQuery(selectQuery)) {
+				try (Statement stmt = connection.createStatement(); ResultSet resultSet = stmt.executeQuery(selectQuery)) {
 					while (resultSet.next()) {
 						TagName removedTag = new TagName(
 								resultSet.getLong("tag_name_id"),
@@ -407,7 +480,7 @@ public ContentTagChange addContentTag(Content content, TagName tagName, String c
 
 				if (!removedTags.isEmpty()) {
 					String removeQuery = String.format("DELETE FROM content_tags WHERE tag_id IN (%s)", String.join(",", removedTagIds));
-					try (Statement stmt = trans.getConnection().createStatement()) {
+					try (Statement stmt = connection.createStatement()) {
 						stmt.executeUpdate(removeQuery);
 					}
 				}
@@ -415,8 +488,8 @@ public ContentTagChange addContentTag(Content content, TagName tagName, String c
 
 			String queryTemplate = "INSERT INTO content_tags (obj_id, tag_name_id, comment, begin_byte_offset, end_byte_offset, examiner_id) VALUES (%d, %d, '%s', %d, %d, %d)";
 			ContentTag contentTag = null;
-			try (Statement stmt = trans.getConnection().createStatement()) {
-				Examiner currentExaminer = skCase.getCurrentExaminer();
+			try (Statement stmt = connection.createStatement()) {
+
 				String query = String.format(queryTemplate,
 						content.getId(),
 						tagName.getId(),
@@ -437,7 +510,7 @@ public ContentTagChange addContentTag(Content content, TagName tagName, String c
 							content, tagName, comment, beginByteOffset, endByteOffset, currentExaminer.getLoginName());
 				}
 			}
-			
+
 			Long dataSourceId = content.getDataSource() != null ? content.getDataSource().getId() : null;
 			skCase.getScoringManager().updateAggregateScoreAfterAddition(
 					content.getId(), dataSourceId, getTagScore(tagName.getKnownStatus()), trans);
@@ -450,6 +523,104 @@ public ContentTagChange addContentTag(Content content, TagName tagName, String c
 		}
 	}
 
+	/**
+	 * Inserts row into the tags_names table, or updates the existing row if the
+	 * displayName already exists in the tag_names table in the case database.
+	 *
+	 * @param displayName The display name for the new tag name.
+	 * @param description The description for the new tag name.
+	 * @param color       The HTML color to associate with the new tag name.
+	 * @param knownStatus The TskData.FileKnown value to associate with the new
+	 *                    tag name.
+	 *
+	 * @return A TagName data transfer object (DTO) for the new row.
+	 *
+	 * @throws TskCoreException
+	 */
+	public TagName addOrUpdateTagName(String displayName, String description, TagName.HTML_COLOR color, TskData.FileKnown knownStatus) throws TskCoreException {
+		String insertQuery = "INSERT INTO tag_names (display_name, description, color, knownStatus) VALUES (?, ?, ?, ?) ON CONFLICT (display_name) DO UPDATE SET description = ?, color = ?, knownStatus = ?";
+		boolean isUpdated = false;
+		skCase.acquireSingleUserCaseWriteLock();
+		try (CaseDbConnection connection = skCase.getConnection()) {
+			try (PreparedStatement statement = connection.getPreparedStatement("SELECT * FROM tag_names WHERE display_name = ?", Statement.NO_GENERATED_KEYS)) {
+				statement.setString(1, displayName);
+				try (ResultSet resultSet = statement.executeQuery()) {
+					isUpdated = resultSet.next();
+				}
+			}
+
+			try (PreparedStatement statement = connection.getPreparedStatement(insertQuery, Statement.RETURN_GENERATED_KEYS);) {
+				statement.clearParameters();
+				statement.setString(5, description);
+				statement.setString(6, color.getName());
+				statement.setByte(7, knownStatus.getFileKnownValue());
+				statement.setString(1, displayName);
+				statement.setString(2, description);
+				statement.setString(3, color.getName());
+				statement.setByte(4, knownStatus.getFileKnownValue());
+				statement.executeUpdate();
+			}
+
+			try (PreparedStatement statement = connection.getPreparedStatement("SELECT * FROM tag_names where display_name = ?", Statement.NO_GENERATED_KEYS)) {
+				statement.setString(1, displayName);
+				try (ResultSet resultSet = connection.executeQuery(statement)) {
+					resultSet.next();
+					TagName newTag = new TagName(resultSet.getLong("tag_name_id"), displayName, description, color, knownStatus, resultSet.getLong("tag_set_id"), resultSet.getInt("rank"));
+
+					if (!isUpdated) {
+						skCase.fireTSKEvent(new TagNamesAddedTskEvent(Collections.singletonList(newTag)));
+					} else {
+						skCase.fireTSKEvent(new TagNamesUpdatedTskEvent(Collections.singletonList(newTag)));
+					}
+
+					return newTag;
+				}
+			}
+		} catch (SQLException ex) {
+			throw new TskCoreException("Error adding row for " + displayName + " tag name to tag_names table", ex);
+		} finally {
+			skCase.releaseSingleUserCaseWriteLock();
+		}
+	}
+
+	/**
+	 * Return the TagName object for the given id.
+	 *
+	 * @param id The TagName id.
+	 *
+	 * @return The TagName object for the given id.
+	 *
+	 * @throws TskCoreException
+	 */
+	public TagName getTagName(long id) throws TskCoreException {
+		String preparedQuery = "SELECT * FROM tag_names where tag_name_id = ?";
+
+		skCase.acquireSingleUserCaseReadLock();
+		try (CaseDbConnection connection = skCase.getConnection()) {
+			try (PreparedStatement statement = connection.getPreparedStatement(preparedQuery, Statement.NO_GENERATED_KEYS)) {
+				statement.clearParameters();
+				statement.setLong(1, id);
+				try (ResultSet resultSet = statement.executeQuery()) {
+					if (resultSet.next()) {
+						return new TagName(resultSet.getLong("tag_name_id"),
+								resultSet.getString("display_name"),
+								resultSet.getString("description"),
+								TagName.HTML_COLOR.getColorByName(resultSet.getString("color")),
+								TskData.FileKnown.valueOf(resultSet.getByte("knowStatus")),
+								resultSet.getLong("tag_set_id"),
+								resultSet.getInt("rank"));
+					}
+				}
+			}
+		} catch (SQLException ex) {
+			throw new TskCoreException("", ex);
+		} finally {
+			skCase.releaseSingleUserCaseWriteLock();
+		}
+
+		return null;
+	}
+
 	/**
 	 * Determine if the given TagSet contains TagNames that are currently in
 	 * use, ie there is an existing ContentTag or ArtifactTag that uses TagName.
@@ -509,7 +680,7 @@ private List<TagName> getTagNamesByTagSetID(int tagSetId) throws TskCoreExceptio
 
 		skCase.acquireSingleUserCaseReadLock();
 		String query = String.format("SELECT * FROM tag_names WHERE tag_set_id = %d", tagSetId);
-		try (CaseDbConnection connection = skCase.getConnection();Statement stmt = connection.createStatement(); ResultSet resultSet = stmt.executeQuery(query)) {
+		try (CaseDbConnection connection = skCase.getConnection(); Statement stmt = connection.createStatement(); ResultSet resultSet = stmt.executeQuery(query)) {
 			while (resultSet.next()) {
 				tagNameList.add(new TagName(resultSet.getLong("tag_name_id"),
 						resultSet.getString("display_name"),
diff --git a/bindings/java/src/org/sleuthkit/datamodel/TimelineEventType.java b/bindings/java/src/org/sleuthkit/datamodel/TimelineEventType.java
index 3b36df70a98bc1761ff2282b74c218a03b4dae5f..959c6b5222409bf613b7c1b27630653c4dd47e1f 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/TimelineEventType.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/TimelineEventType.java
@@ -21,10 +21,12 @@
 import com.google.common.annotations.Beta;
 import com.google.common.base.MoreObjects;
 import com.google.common.collect.ImmutableSortedSet;
+import com.google.common.collect.ImmutableSet;
 import java.util.Arrays;
 import java.util.Comparator;
 import java.util.List;
 import java.util.Optional;
+import java.util.Set;
 import java.util.SortedSet;
 import static org.sleuthkit.datamodel.BlackboardArtifact.ARTIFACT_TYPE.*;
 import static org.sleuthkit.datamodel.BlackboardAttribute.ATTRIBUTE_TYPE.*;
@@ -122,6 +124,18 @@ default SortedSet<? extends TimelineEventType> getSiblings() {
 				? ImmutableSortedSet.of(ROOT_EVENT_TYPE)
 				: this.getParent().getChildren();
 	}
+	
+	/**
+	 * Returns true if the particular instance is deprecated. If deprecated, no
+	 * new timeline event types of this type will be created, but it can be
+	 * shown in the timeline.
+	 *
+	 * @return True if deprecated.
+	 */
+	default boolean isDeprecated() {
+		return false;
+	}
+	
 
 	@Override
 	default int compareTo(TimelineEventType otherType) {
@@ -191,7 +205,7 @@ public int compare(TimelineEventType o1, TimelineEventType o2) {
 				}
 			});
 
-			builder.add(FILE_SYSTEM, WEB_ACTIVITY, MISC_TYPES, CUSTOM_TYPES);
+			builder.add(FILE_SYSTEM, WEB_ACTIVITY, MISC_TYPES);
 			return builder.build();
 		}
 	};
@@ -212,8 +226,7 @@ public SortedSet< TimelineEventType> getChildren() {
 		@Override
 		public SortedSet< TimelineEventType> getChildren() {
 			return ImmutableSortedSet.of(WEB_DOWNLOADS, WEB_COOKIE,
-					WEB_COOKIE_ACCESSED,
-					WEB_COOKIE_END, WEB_BOOKMARK,
+					WEB_COOKIE_ACCESSED, WEB_COOKIE_END, WEB_BOOKMARK,
 					WEB_HISTORY, WEB_SEARCH, WEB_FORM_AUTOFILL,
 					WEB_FORM_ADDRESSES, WEB_FORM_ADDRESSES_MODIFIED,
 					WEB_FORM_AUTOFILL_ACCESSED, WEB_CACHE, WEB_HISTORY_CREATED);
@@ -236,7 +249,7 @@ public SortedSet<TimelineEventType> getChildren() {
 					PROGRAM_DELETED,
 					OS_INFO, WIFI_NETWORK, USER_DEVICE_EVENT_START, USER_DEVICE_EVENT_END,
 					SERVICE_ACCOUNT, SCREEN_SHOT, PROGRAM_NOTIFICATION,
-					BLUETOOTH_PAIRING_ACCESSED, BLUETOOTH_ADAPTER);
+					BLUETOOTH_PAIRING_ACCESSED, BLUETOOTH_ADAPTER, CUSTOM_ARTIFACT_CATCH_ALL, STANDARD_ARTIFACT_CATCH_ALL, USER_CREATED);
 
 		}
 	};
@@ -432,21 +445,19 @@ public SortedSet<TimelineEventType> getChildren() {
 			new AttributeExtractor(new Type(TSK_DEVICE_MAKE)),
 			new AttributeExtractor(new Type(TSK_DEVICE_MODEL)),
 			new AttributeExtractor(new Type(TSK_DEVICE_ID)));
-
-	//custom event type base type
-	TimelineEventType CUSTOM_TYPES = new TimelineEventTypeImpl(22,
-			getBundle().getString("BaseTypes.customTypes.name"), // NON-NLS
-			HierarchyLevel.CATEGORY, ROOT_EVENT_TYPE) {
-		@Override
-		public SortedSet< TimelineEventType> getChildren() {
-			return ImmutableSortedSet.of(OTHER, USER_CREATED);
-		}
-	};
-
-	//generic catch all other event
-	TimelineEventType OTHER = new TimelineEventArtifactTypeSingleDescription(23,
+	
+	// TimelineEventType with id 22 has been deprecated. Trying to reuse 22
+	// may cause backwards combatibility issues and is not recommened. If 22
+	// is reused create upgrade code to reassign event 22 to MISC_TYPE id = 3.
+	int DEPRECATED_OTHER_EVENT_ID = 22;
+
+	// Event for any artifact event with an artifact type for which we don't have
+	// a hard-corded event type. In other words, we recognize the artifact type
+	// as a standard artifact type, but we have not updated the Timeline code
+	// to have a corresponding inner TimelineEventType
+	TimelineEventType STANDARD_ARTIFACT_CATCH_ALL = new TimelineEventArtifactTypeSingleDescription(23,
 			getBundle().getString("CustomTypes.other.name"), //NON-NLS
-			CUSTOM_TYPES,
+			MISC_TYPES,
 			new BlackboardArtifact.Type(TSK_TL_EVENT),
 			new BlackboardAttribute.Type(TSK_DATETIME),
 			new BlackboardAttribute.Type(TSK_DESCRIPTION));
@@ -466,10 +477,12 @@ public SortedSet< TimelineEventType> getChildren() {
 			new BlackboardAttribute.Type(TSK_DATETIME),
 			new BlackboardAttribute.Type(TSK_DESCRIPTION));
 
-	//generic catch all other event
-	TimelineEventType USER_CREATED = new TimelineEventArtifactTypeSingleDescription(26,
-			getBundle().getString("CustomTypes.userCreated.name"),//NON-NLS
-			CUSTOM_TYPES,
+	// Event for any artifact event with a custom artifact type (e.g. shell bag
+	// artifact)
+	
+	TimelineEventType CUSTOM_ARTIFACT_CATCH_ALL = new TimelineEventArtifactTypeSingleDescription(26,
+			getBundle().getString("CustomTypes.customArtifact.name"),//NON-NLS
+			MISC_TYPES,
 			new BlackboardArtifact.Type(TSK_TL_EVENT),
 			new BlackboardAttribute.Type(TSK_DATETIME),
 			new BlackboardAttribute.Type(TSK_DESCRIPTION));
@@ -656,14 +669,20 @@ public SortedSet< TimelineEventType> getChildren() {
 			new BlackboardArtifact.Type(TSK_WEB_COOKIE),
 			new Type(TSK_DATETIME_ACCESSED),
 			new Type(TSK_URL));
-
+	
 	TimelineEventType WEB_COOKIE_END = new URLArtifactEventType(42,
 			getBundle().getString("WebTypes.webCookiesEnd.name"),// NON-NLS
 			WEB_ACTIVITY,
 			new BlackboardArtifact.Type(TSK_WEB_COOKIE),
 			new Type(TSK_DATETIME_END),
-			new Type(TSK_URL));
-	
+			new Type(TSK_URL)) {
+				
+		@Override
+		public boolean isDeprecated() {
+			return true;
+		}
+	};
+
 	TimelineEventType BACKUP_EVENT_START = new TimelineEventArtifactTypeImpl(43,
 			getBundle().getString("TimelineEventType.BackupEventStart.txt"),// NON-NLS
 			MISC_TYPES,
@@ -810,6 +829,15 @@ public SortedSet< TimelineEventType> getChildren() {
 			new BlackboardArtifact.Type(TSK_BLUETOOTH_PAIRING),
 			new BlackboardAttribute.Type(TSK_DATETIME_ACCESSED),
 			new BlackboardAttribute.Type(TSK_DEVICE_NAME));
+	
+	//User manually created events, created with the "Add Event" button in the 
+	// timeline UI.
+	TimelineEventType USER_CREATED = new TimelineEventArtifactTypeSingleDescription(60,
+			getBundle().getString("CustomTypes.userCreated.name"),//NON-NLS
+			MISC_TYPES,
+			new BlackboardArtifact.Type(TSK_TL_EVENT),
+			new BlackboardAttribute.Type(TSK_DATETIME),
+			new BlackboardAttribute.Type(TSK_DESCRIPTION));
 
 	static SortedSet<? extends TimelineEventType> getCategoryTypes() {
 		return ROOT_EVENT_TYPE.getChildren();
diff --git a/bindings/java/src/org/sleuthkit/datamodel/TimelineFilter.java b/bindings/java/src/org/sleuthkit/datamodel/TimelineFilter.java
index e32295740ddd1ff2560f137671305b4bd9056625..3aed64ef5b9a3ed1092843d219d6b0d84e7717d2 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/TimelineFilter.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/TimelineFilter.java
@@ -150,7 +150,9 @@ private EventTypeFilter(TimelineEventType rootEventType, boolean recursive) {
 			if (recursive) {
 				// add subfilters for each subtype
 				for (TimelineEventType subType : rootEventType.getChildren()) {
-					addSubFilter(new EventTypeFilter(subType));
+					if (!subType.isDeprecated()) {
+						addSubFilter(new EventTypeFilter(subType));	
+					}
 				}
 			}
 		}
diff --git a/bindings/java/src/org/sleuthkit/datamodel/TimelineManager.java b/bindings/java/src/org/sleuthkit/datamodel/TimelineManager.java
index 1e165dfacf37f4d3f02589f8c126d71310750816..0b215868d60e5d045037766c16c7dee15ea31f7f 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/TimelineManager.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/TimelineManager.java
@@ -26,6 +26,7 @@
 import java.sql.SQLException;
 import java.sql.Statement;
 import java.sql.Types;
+import java.text.MessageFormat;
 import java.time.Instant;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -46,9 +47,10 @@
 import static org.sleuthkit.datamodel.BlackboardArtifact.ARTIFACT_TYPE.TSK_TL_EVENT;
 import static org.sleuthkit.datamodel.BlackboardAttribute.ATTRIBUTE_TYPE.TSK_TL_EVENT_TYPE;
 import static org.sleuthkit.datamodel.CollectionUtils.isNotEmpty;
+import static org.sleuthkit.datamodel.CommManagerSqlStringUtils.buildCSVString;
 import org.sleuthkit.datamodel.SleuthkitCase.CaseDbConnection;
+import org.sleuthkit.datamodel.SleuthkitCase.CaseDbTransaction;
 import static org.sleuthkit.datamodel.SleuthkitCase.escapeSingleQuotes;
-import static org.sleuthkit.datamodel.StringUtils.buildCSVString;
 
 /**
  * Provides access to the timeline data in a case database.
@@ -79,10 +81,8 @@ public final class TimelineManager {
 	 */
 	private static final ImmutableList<TimelineEventType> PREDEFINED_EVENT_TYPES
 			= new ImmutableList.Builder<TimelineEventType>()
-					.add(TimelineEventType.CUSTOM_TYPES)
 					.addAll(TimelineEventType.WEB_ACTIVITY.getChildren())
 					.addAll(TimelineEventType.MISC_TYPES.getChildren())
-					.addAll(TimelineEventType.CUSTOM_TYPES.getChildren())
 					.build();
 
 	// all known artifact type ids (used for determining if an artifact is standard or custom event)
@@ -352,6 +352,12 @@ public Long getMinEventTime() throws TskCoreException {
 	 *         the event type is not found.
 	 */
 	public Optional<TimelineEventType> getEventType(long eventTypeID) {
+		// The parent EventType with ID 22 has been deprecated. This ID had two
+		// children which have be reassigned to MISC_TYPES.
+		if(eventTypeID == TimelineEventType.DEPRECATED_OTHER_EVENT_ID) {
+			return Optional.of(TimelineEventType.MISC_TYPES);
+		}
+
 		return Optional.ofNullable(eventTypeIDMap.get(eventTypeID));
 	}
 
@@ -582,6 +588,7 @@ Collection<TimelineEvent> addEventsForNewFile(AbstractFile file, CaseDbConnectio
 	 */
 	Set<TimelineEvent> addEventsForNewFileQuiet(AbstractFile file, CaseDbConnection connection) throws TskCoreException {
 		//gather time stamps into map
+		// if any of these events become deprecated in the future, filtering may need to occur.
 		Map<TimelineEventType, Long> timeMap = ImmutableMap.of(TimelineEventType.FILE_CREATED, file.getCrtime(),
 				TimelineEventType.FILE_ACCESSED, file.getAtime(),
 				TimelineEventType.FILE_CHANGED, file.getCtime(),
@@ -663,15 +670,15 @@ Set<TimelineEvent> addArtifactEvents(BlackboardArtifact artifact) throws TskCore
 			TimelineEventType eventType;//the type of the event to add.
 			BlackboardAttribute attribute = artifact.getAttribute(new BlackboardAttribute.Type(TSK_TL_EVENT_TYPE));
 			if (attribute == null) {
-				eventType = TimelineEventType.OTHER;
+				eventType = TimelineEventType.STANDARD_ARTIFACT_CATCH_ALL;
 			} else {
 				long eventTypeID = attribute.getValueLong();
-				eventType = eventTypeIDMap.getOrDefault(eventTypeID, TimelineEventType.OTHER);
+				eventType = eventTypeIDMap.getOrDefault(eventTypeID, TimelineEventType.STANDARD_ARTIFACT_CATCH_ALL);
 			}
 
 			try {
 				// @@@ This casting is risky if we change class hierarchy, but was expedient.  Should move parsing to another class
-				addArtifactEvent(((TimelineEventArtifactTypeImpl) TimelineEventType.OTHER).makeEventDescription(artifact), eventType, artifact)
+				addArtifactEvent(((TimelineEventArtifactTypeImpl) TimelineEventType.STANDARD_ARTIFACT_CATCH_ALL).makeEventDescription(artifact), eventType, artifact)
 						.ifPresent(newEvents::add);
 			} catch (DuplicateException ex) {
 				logger.log(Level.SEVERE, getDuplicateExceptionMessage(artifact, "Attempt to make a timeline event artifact duplicate"), ex);
@@ -780,11 +787,63 @@ private Optional<TimelineEvent> addOtherEventDesc(BlackboardArtifact artifact) t
 		TimelineEventDescriptionWithTime evtWDesc = new TimelineEventDescriptionWithTime(timeVal, description, description, description);
 
 		TimelineEventType evtType = (ARTIFACT_TYPE_IDS.contains(artifact.getArtifactTypeID()))
-				? TimelineEventType.OTHER
-				: TimelineEventType.USER_CREATED;
+				? TimelineEventType.STANDARD_ARTIFACT_CATCH_ALL
+				: TimelineEventType.CUSTOM_ARTIFACT_CATCH_ALL;
 
 		return addArtifactEvent(evtWDesc, evtType, artifact);
 	}
+	
+	
+	/**
+	 * Adds a timeline event to the database in a transaction.
+	 * @param eventType The event type.
+	 * @param shortDesc The short description.
+	 * @param medDesc The medium description.
+	 * @param longDesc The long description.
+	 * @param dataSourceId The data source id of the event.
+	 * @param contentId The content id of the event.
+	 * @param artifactId The artifact id of the event (can be null).
+	 * @param time Unix epoch offset time of the event in seconds.
+	 * @param hashHit True if a hash hit.
+	 * @param tagged True if tagged.
+	 * @param trans The transaction.
+	 * @return The added event.
+	 * @throws TskCoreException 
+	 */
+	@Beta
+	public TimelineEvent addTimelineEvent(
+			TimelineEventType eventType, String shortDesc, String medDesc, String longDesc,
+			long dataSourceId, long contentId, Long artifactId, long time,
+			boolean hashHit, boolean tagged,
+			CaseDbTransaction trans
+	) throws TskCoreException {
+		caseDB.acquireSingleUserCaseWriteLock();
+		try {
+			Long descriptionID = addEventDescription(dataSourceId, contentId, artifactId,
+					longDesc, medDesc, shortDesc, hashHit, tagged, trans.getConnection());
+
+			if (descriptionID == null) {
+				descriptionID = getEventDescription(dataSourceId, contentId, artifactId, longDesc, trans.getConnection());
+			}
+			if (descriptionID != null) {
+				long eventID = addEventWithExistingDescription(time, eventType, descriptionID, trans.getConnection());
+				TimelineEvent timelineEvt = new TimelineEvent(eventID, descriptionID, contentId, artifactId, time, eventType,
+						longDesc, medDesc, shortDesc, hashHit, tagged);
+				
+				trans.registerTimelineEvent(new TimelineEventAddedEvent(timelineEvt));
+				return timelineEvt;
+			} else {
+				throw new TskCoreException(MessageFormat.format(
+						"Failed to get event description for [shortDesc: {0}, dataSourceId: {1}, contentId: {2}, artifactId: {3}]",
+						shortDesc, dataSourceId, contentId, artifactId == null ? "<null>" : artifactId));
+			}
+		} catch (DuplicateException dupEx) {
+			logger.log(Level.WARNING, "Attempt to make duplicate", dupEx);
+			return null;
+		} finally {
+			caseDB.releaseSingleUserCaseWriteLock();
+		}
+	}
 
 	/**
 	 * Add an event of the given type from the given artifact to the database.
@@ -802,7 +861,9 @@ private Optional<TimelineEvent> addOtherEventDesc(BlackboardArtifact artifact) t
 	private Optional<TimelineEvent> addArtifactEvent(TimelineEventDescriptionWithTime eventPayload,
 			TimelineEventType eventType, BlackboardArtifact artifact) throws TskCoreException, DuplicateException {
 
-		if (eventPayload == null) {
+		// make sure event payload is present
+		// only create event for a timeline event type if not deprecated
+		if (eventPayload == null || eventType.isDeprecated()) {
 			return Optional.empty();
 		}
 		long time = eventPayload.getTime();
diff --git a/bindings/java/src/org/sleuthkit/datamodel/TskCaseDbBridge.java b/bindings/java/src/org/sleuthkit/datamodel/TskCaseDbBridge.java
index b164210aabd23194fbdeed13d0d54c966270fc9f..0da8c2a846cca449749b58250e49aa0482d6315d 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/TskCaseDbBridge.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/TskCaseDbBridge.java
@@ -66,6 +66,7 @@ class TskCaseDbBridge {
     private final Queue<FileInfo> batchedFiles = new LinkedList<>();
     private final Queue<LayoutRangeInfo> batchedLayoutRanges = new LinkedList<>();
     private final List<Long> layoutFileIds = new ArrayList<>();
+    private final Map<Long, VirtualDirectory> unallocFileDirs = new HashMap<>();
     
     TskCaseDbBridge(SleuthkitCase caseDb, AddDataSourceCallbacks addDataSourceCallbacks, Host host) {
         this.caseDb = caseDb;
@@ -386,12 +387,19 @@ private long addBatchedFilesToDb() {
 							// Currently we expect only NTFS systems to provide a windows style SID as owner id.
 							OsAccountManager accountMgr = caseDb.getOsAccountManager();
 							OsAccount newAccount = accountMgr.newWindowsOsAccount(ownerUid, null, null, imageHost, OsAccountRealm.RealmScope.UNKNOWN);
-							accountMgr.newOsAccountInstance(newAccount.getId(), fileInfo.dataSourceObjId, OsAccountInstance.OsAccountInstanceType.LAUNCHED, caseDb.getConnection());
+							Content ds = caseDb.getContentById(fileInfo.dataSourceObjId); // Data sources are cached so this will only access the database once
+							if (ds instanceof DataSource) {
+								accountMgr.newOsAccountInstance(newAccount, (DataSource)ds, OsAccountInstance.OsAccountInstanceType.ACCESSED);
+							}
 							ownerIdToAccountMap.put(ownerUid, newAccount);
 						}
 					} catch (NotUserSIDException ex) {
 						// if the owner SID is not a user SID, set the owner account to null
 						ownerIdToAccountMap.put(ownerUid, null);
+					} catch (Exception ex) {
+						// catch other exceptions to avoid skiping add batched files loop below
+						logger.log(Level.WARNING, "Error mapping ownerId " + ownerUid + " to account", ex);
+						ownerIdToAccountMap.put(ownerUid, null);
 					}
 				}
 			}
@@ -422,10 +430,11 @@ private long addBatchedFilesToDb() {
 					}
 					
 					// We've seen a case where the root folder comes in with an undefined meta type.
-					// In that case, we alter the type to TSK_FS_META_TYPE_DIR so it will be cached
-					// properly and will not cause errors later for being an unexpected type.
+					// We've also seen a case where it comes in as a regular file. The root folder should always be
+					// a directory so it will be cached properly and will not cause errors later for
+					// being an unexpected type.
 					if ((fileInfo.parentObjId == fileInfo.fsObjId)
-							&& (fileInfo.metaType == TskData.TSK_FS_META_TYPE_ENUM.TSK_FS_META_TYPE_UNDEF.getValue())) {
+							&& (fileInfo.metaType != TskData.TSK_FS_META_TYPE_ENUM.TSK_FS_META_TYPE_DIR.getValue())) {
 						fileInfo.metaType = TskData.TSK_FS_META_TYPE_ENUM.TSK_FS_META_TYPE_DIR.getValue();
 					}
 					
@@ -479,7 +488,7 @@ private long addBatchedFilesToDb() {
                 // Exception firewall to prevent unexpected return to the native code
                 logger.log(Level.SEVERE, "Unexpected error from files added callback", ex);
             }
-        } catch (TskCoreException ex) {
+        } catch (Throwable ex) {
             logger.log(Level.SEVERE, "Error adding batched files to database", ex);
             revertTransaction();
             return -1;
@@ -539,6 +548,12 @@ long addLayoutFile(long parentObjId,
             if (fsObjId == 0) {
                 fsObjIdForDb = null;
             }
+			
+            // If the layout file is in an $Unalloc folder, add the name. Otherwise use "/".
+            String parentPath = "/";
+            if (unallocFileDirs.containsKey(parentObjId)) {
+                parentPath = "/" + unallocFileDirs.get(parentObjId).getName() + "/";
+            }
             
             beginTransaction();
             long objId = addFileToDb(parentObjId, 
@@ -554,7 +569,7 @@ long addLayoutFile(long parentObjId,
                 null, null, null, null,
                 null, null, null,
                 null, TskData.FileKnown.UNKNOWN,
-                null, null, null, OsAccount.NO_ACCOUNT,
+                parentPath, null, null, OsAccount.NO_ACCOUNT,
                 true, trans);
             commitTransaction();
 
@@ -644,6 +659,7 @@ long addUnallocFsBlockFilesParent(long fsObjId, String name) {
             beginTransaction();
             VirtualDirectory dir = caseDb.addVirtualDirectory(fsIdToRootDir.get(fsObjId), name, trans);
             commitTransaction();
+            unallocFileDirs.put(dir.getId(), dir);
             addDataSourceCallbacks.onFilesAdded(Arrays.asList(dir.getId()));
             return dir.getId();
         } catch (TskCoreException ex) {
@@ -867,8 +883,8 @@ private long addFileToDb(long parentObjId,
 			// INSERT INTO tsk_objects (par_obj_id, type) VALUES (?, ?)
 			long objectId = caseDb.addObject(parentObjId, TskData.ObjectType.ABSTRACTFILE.getObjectType(), connection);
 				
-			String fileInsert = "INSERT INTO tsk_files (fs_obj_id, obj_id, data_source_obj_id, type, attr_type, attr_id, name, meta_addr, meta_seq, dir_type, meta_type, dir_flags, meta_flags, size, crtime, ctime, atime, mtime, mode, gid, uid, md5, known, parent_path, extension, has_layout, owner_uid, os_account_obj_id)"
-				+ " VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; // NON-NLS
+			String fileInsert = "INSERT INTO tsk_files (fs_obj_id, obj_id, data_source_obj_id, type, attr_type, attr_id, name, meta_addr, meta_seq, dir_type, meta_type, dir_flags, meta_flags, size, crtime, ctime, atime, mtime, mode, gid, uid, md5, known, parent_path, extension, has_layout, owner_uid, os_account_obj_id, collected)"
+				+ " VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; // NON-NLS
 			PreparedStatement preparedStatement = connection.getPreparedStatement(fileInsert, Statement.NO_GENERATED_KEYS);			
 			preparedStatement.clearParameters();
 			
@@ -959,6 +975,8 @@ private long addFileToDb(long parentObjId,
 				preparedStatement.setNull(28, java.sql.Types.BIGINT);
 			}
 			
+			preparedStatement.setLong(29, TskData.CollectedStatus.UNKNOWN.getType());
+			
 			connection.executeUpdate(preparedStatement);
 
 			// If this is not a slack file create the timeline events
@@ -966,12 +984,12 @@ private long addFileToDb(long parentObjId,
 					&& TskData.TSK_DB_FILES_TYPE_ENUM.SLACK.getFileType() != fsType
 					&& (!name.equals(".")) && (!name.equals(".."))) {
 				TimelineManager timelineManager = caseDb.getTimelineManager();
-				DerivedFile derivedFile = new DerivedFile(caseDb, objectId, dataSourceObjId, name,
+				DerivedFile derivedFile = new DerivedFile(caseDb, objectId, dataSourceObjId, fsObjId, name,
 						TskData.TSK_FS_NAME_TYPE_ENUM.valueOf((short) dirType),
 						TskData.TSK_FS_META_TYPE_ENUM.valueOf((short) metaType),
 						TskData.TSK_FS_NAME_FLAG_ENUM.valueOf(dirFlags),
 						(short) metaFlags,
-						size, ctime, crtime, atime, mtime, null, null, null, escaped_path, null, parentObjId, null, null, extension, ownerUid, ownerAcctObjId);
+						size, ctime, crtime, atime, mtime, null, null, null, null, escaped_path, null, parentObjId, null, null, extension, ownerUid, ownerAcctObjId);
 
 				timelineManager.addEventsForNewFileQuiet(derivedFile, connection);
 			}
diff --git a/bindings/java/src/org/sleuthkit/datamodel/TskData.java b/bindings/java/src/org/sleuthkit/datamodel/TskData.java
index 805770328239cff06fe6536eecb206862ede928b..0972d09e4b663a62d851bf77f80da79e978f744b 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/TskData.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/TskData.java
@@ -29,7 +29,7 @@
  */
 public class TskData {
 
-	private static ResourceBundle bundle = ResourceBundle.getBundle("org.sleuthkit.datamodel.Bundle");
+	private final static ResourceBundle bundle = ResourceBundle.getBundle("org.sleuthkit.datamodel.Bundle");
 
 	/**
 	 * The type of the file system file, as reported in the name structure of
@@ -153,6 +153,7 @@ public static TSK_FS_META_TYPE_ENUM valueOf(short metaType) {
 	 */
 	public enum TSK_FS_NAME_FLAG_ENUM {
 
+		UNKNOWN(0, bundle.getString("TskData.tskFsNameFlagEnum.unknown")), ///< Unknown
 		ALLOC(1, bundle.getString("TskData.tskFsNameFlagEnum.allocated")), ///< Name is in an allocated state
 		UNALLOC(2, bundle.getString("TskData.tskFsNameFlagEnum.unallocated"));    ///< Name is in an unallocated state
 
@@ -191,8 +192,8 @@ public static TSK_FS_NAME_FLAG_ENUM valueOf(int dirFlag) {
 					return flag;
 				}
 			}
-			throw new IllegalArgumentException(
-					MessageFormat.format(bundle.getString("TskData.tskFsNameFlagEnum.exception.msg1.text"), dirFlag));
+
+			return TSK_FS_NAME_FLAG_ENUM.UNKNOWN;
 		}
 	}
 
@@ -203,6 +204,7 @@ public static TSK_FS_NAME_FLAG_ENUM valueOf(int dirFlag) {
 	 */
 	public enum TSK_FS_META_FLAG_ENUM {
 
+		UNKNOWN(0, bundle.getString("TskData.tskFsMetaFlagEnum.unknown")), ///< Unknown
 		ALLOC(1, bundle.getString("TskData.tskFsMetaFlagEnum.allocated")), ///< Metadata structure is currently in an allocated state
 		UNALLOC(2, bundle.getString("TskData.tskFsMetaFlagEnum.unallocated")), ///< Metadata structure is currently in an unallocated state
 		USED(4, bundle.getString("TskData.tskFsMetaFlagEnum.used")), ///< Metadata structure has been allocated at least once
@@ -247,6 +249,11 @@ public String toString() {
 		public static Set<TSK_FS_META_FLAG_ENUM> valuesOf(short metaFlags) {
 			Set<TSK_FS_META_FLAG_ENUM> matchedFlags = EnumSet.noneOf(TSK_FS_META_FLAG_ENUM.class);
 
+			if (metaFlags == TSK_FS_META_FLAG_ENUM.UNKNOWN.getValue()) {
+				matchedFlags.add(TSK_FS_META_FLAG_ENUM.UNKNOWN);
+				return matchedFlags;
+			}
+			
 			for (TSK_FS_META_FLAG_ENUM v : TSK_FS_META_FLAG_ENUM.values()) {
 				long flag = v.getValue();
 
@@ -254,7 +261,7 @@ public static Set<TSK_FS_META_FLAG_ENUM> valuesOf(short metaFlags) {
 					matchedFlags.add(v);
 				}
 			}
-
+			
 			return matchedFlags;
 		}
 
@@ -462,6 +469,7 @@ public enum TSK_FS_TYPE_ENUM {
 		TSK_FS_TYPE_YAFFS2_DETECT(0x00004000, bundle.getString("TskData.tskFsTypeEnum.YAFFS2autoDetect")), ///< YAFFS2 auto detection
 		TSK_FS_TYPE_APFS(0x00010000, "APFS"), ///< APFS file system
 		TSK_FS_TYPE_APFS_DETECT(0x00010000, bundle.getString("TskData.tskFsTypeEnum.APFSautoDetect")), ///< APFS auto detection
+		TSK_FS_TYPE_LOGICAL(0x00020000, "Logical"),
 		TSK_FS_TYPE_UNSUPP(0xffffffff, bundle.getString("TskData.tskFsTypeEnum.unsupported"));        ///< Unsupported file system
 
 		private int value;
@@ -527,6 +535,7 @@ public enum TSK_IMG_TYPE_ENUM {
 		TSK_IMG_TYPE_VMDK_VMDK(128, "VMDK"), // VMware Virtual Disk (VMDK) NON-NLS
 		TSK_IMG_TYPE_VHD_VHD(256, "VHD"), // Virtual Hard Disk (VHD) image format NON-NLS
 		TSK_IMG_TYPE_POOL_POOL(16384, "POOL"), // Pool (internal use) NON-NLS
+		TSK_IMG_TYPE_LOGICAL(32768, "Logical"),   /// Logical directory
 		TSK_IMG_TYPE_UNSUPP(65535, bundle.getString("TskData.tskImgTypeEnum.unknown"));   // Unsupported Image Type
 
 		private long imgType;
@@ -625,22 +634,24 @@ public String getName() {
 	 */
 	public enum ObjectType {
 
-		IMG(0), ///< Disk Image - see tsk_image_info for more details
-		VS(1), ///< Volume System - see tsk_vs_info for more details
-		VOL(2), ///< Volume - see tsk_vs_parts for more details
-		FS(3), ///< File System - see tsk_fs_info for more details
-		ABSTRACTFILE(4), ///< File - see tsk_files for more details
-		ARTIFACT(5),	/// Artifact - see blackboard_artifacts for more details
-		REPORT(6),	///< Report - see reports for more details
-		POOL(7),	///< Pool
-		OS_ACCOUNT(8), ///< OS Account - see tsk_os_accounts for more details
-		HOST_ADDRESS(9), ///< Host Address - see tsk_host_addresses for more details
-		UNSUPPORTED(-1) ///< Unsupported type
+		IMG(0, bundle.getString("TskData.ObjectType.IMG.name")), ///< Disk Image - see tsk_image_info for more details
+		VS(1, bundle.getString("TskData.ObjectType.VS.name")), ///< Volume System - see tsk_vs_info for more details
+		VOL(2, bundle.getString("TskData.ObjectType.VOL.name")), ///< Volume - see tsk_vs_parts for more details
+		FS(3, bundle.getString("TskData.ObjectType.FS.name")), ///< File System - see tsk_fs_info for more details
+		ABSTRACTFILE(4, bundle.getString("TskData.ObjectType.AbstractFile.name")), ///< File - see tsk_files for more details
+		ARTIFACT(5, bundle.getString("TskData.ObjectType.Artifact.name")),	/// Artifact - see blackboard_artifacts for more details
+		REPORT(6, bundle.getString("TskData.ObjectType.Report.name")),	///< Report - see reports for more details
+		POOL(7, bundle.getString("TskData.ObjectType.Pool.name")),	///< Pool
+		OS_ACCOUNT(8, bundle.getString("TskData.ObjectType.OsAccount.name")), ///< OS Account - see tsk_os_accounts for more details
+		HOST_ADDRESS(9, bundle.getString("TskData.ObjectType.HostAddress.name")), ///< Host Address - see tsk_host_addresses for more details
+		UNSUPPORTED(-1, bundle.getString("TskData.ObjectType.Unsupported.name")) ///< Unsupported type
 		; 
 		private final short objectType;
+		private final String displayName;
 
-		private ObjectType(int objectType) {
+		private ObjectType(int objectType, String displayName) {
 			this.objectType = (short) objectType;
+			this.displayName = displayName;
 		}
 
 		/**
@@ -651,6 +662,11 @@ private ObjectType(int objectType) {
 		public short getObjectType() {
 			return objectType;
 		}
+		
+		@Override
+		public String toString() {
+			return displayName;
+		}
 
 		/**
 		 * Convert object type short value to the enum type
@@ -880,4 +896,77 @@ public static EncodingType valueOf(int type) {
 					MessageFormat.format(bundle.getString("TskData.encodingType.exception.msg1.text"), type));
 		}
 	}
+	
+	/**
+	 * CollectedStatus stores where the data for a file can be found or the
+     * reason no data for the file exists.
+	 */
+	public enum CollectedStatus{
+
+		UNKNOWN(0),
+		NO_SAVE_ERROR(1),
+		NO_EMPTY_FILE(2),
+		NO_NOT_FOUND(3),
+		NO_UNRESOLVED(4),
+		NO_READ_ERROR(5),
+		NO_READ_ERROR_PARTIAL(6),
+		NO_NOT_ATTEMPTED(7),
+		NO_NOT_REGULAR_FILE(8),
+		NO_FILE_TOO_LARGE(9),
+		NO_ONLY_HASH_COLLECTED(10),
+		NO_UNSUPPORTED_COMPRESSION(11),
+		YES_TSK(12),
+		YES_REPO(13);
+
+		private final int type;
+		
+		private CollectedStatus(int type){
+			this.type = type;
+		}
+		
+		public int getType(){
+			return type;
+		}
+		
+		public static CollectedStatus valueOf(int type) {
+			for (CollectedStatus v : CollectedStatus.values()) {
+				if (v.type == type) {
+					return v;
+				}
+			}
+			throw new IllegalArgumentException(
+					MessageFormat.format(bundle.getString("TskData.collectedStatus.exception.msg1.text"), type));
+		}
+	}
+	
+    /** 
+	 * Type of keyword search query 
+	 **/
+    public enum KeywordSearchQueryType {
+        LITERAL(0), 
+		SUBSTRING(1), 
+		REGEX(2);
+		
+		private final int type;
+		
+		private KeywordSearchQueryType(int type){
+			this.type = type;
+		}
+		
+		public int getType(){
+			return type;
+		}
+		
+		public static KeywordSearchQueryType valueOf(int type) {
+			for (KeywordSearchQueryType v : KeywordSearchQueryType.values()) {
+				if (v.type == type) {
+					return v;
+				}
+			}
+			throw new IllegalArgumentException(
+					MessageFormat.format(bundle.getString("TskData.keywordSearchQueryType.exception.msg1.text"), type));
+		}		
+    };	
+	
+	
 }
diff --git a/bindings/java/src/org/sleuthkit/datamodel/TskEvent.java b/bindings/java/src/org/sleuthkit/datamodel/TskEvent.java
index d67686c466ace311eec1d2da324fbdd517d4929f..723a7bfff7b4c8ddd686077fa5a25e7ecb35506a 100755
--- a/bindings/java/src/org/sleuthkit/datamodel/TskEvent.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/TskEvent.java
@@ -306,6 +306,63 @@ public List<Long> getOsAccountObjectIds() {
 
 	}
 
+	/**
+	 * An event published when one or more OS accounts are merged.
+	 */
+	public final static class OsAccountsMergedTskEvent extends TskObjectsEvent<MergedAccountsPair> {
+
+		/**
+		 * Constructs an event published when one or more OS accounts are
+		 * merged.
+		 *
+		 * @param mergedAccounts List of the merged OS accounts.
+		 */
+		OsAccountsMergedTskEvent(List<MergedAccountsPair> mergedAccounts) {
+			super(mergedAccounts);
+		}
+
+
+		/**
+		 * Gets the pairs of merged accounts
+		 * 
+		 * @return 
+		 */
+		public List<MergedAccountsPair> getMergedAccountPairs() {
+			return getDataModelObjects();
+		}
+
+	}
+
+	/**
+	 * Container to encapsulate the merged account ids, contains both the source and destination account ids.
+	 */
+	public final static class MergedAccountsPair {
+
+		private final Long sourceOsAccountId;
+		private final Long destinationOsAccountId;
+
+		public MergedAccountsPair(Long sourceOsAccountId, Long destinationOsAccountId) {
+			this.sourceOsAccountId = sourceOsAccountId;
+			this.destinationOsAccountId = destinationOsAccountId;
+		}
+
+		/**
+		 * Gets the source os account id. This is the account that was marked as "MERGED"
+		 * @return The TSK object ID of the source os account
+		 */
+		public Long getSourceOsAccountId() {
+			return sourceOsAccountId;
+		}
+
+		/**
+		 * Gets the destination os account id. This is the account that the source was merged into.
+		 * @return The TSK object ID of the destination os account
+		 */
+		public Long getDestinationOsAccountId() {
+			return destinationOsAccountId;
+		}
+	}
+	
 	/**
 	 * An event published when one or more OS account instances are added.
 	 */
@@ -462,7 +519,7 @@ public final static class HostsRemovedFromPersonTskEvent extends TskObjectsEvent
 		private final Person person;
 
 		/**
-		 * Contructs an event published when one or more hosts are removed from
+		 * Constructs an event published when one or more hosts are removed from
 		 * a person.
 		 *
 		 * @param person  The person.
@@ -493,4 +550,123 @@ public List<Long> getHostIds() {
 
 	}
 
+	static abstract class TagNamesTskEvent extends TskObjectsEvent<TagName> {
+
+		public TagNamesTskEvent(List<TagName> tagNames) {
+			super(tagNames);
+		}
+
+		/**
+		 * Returns the list of added or updated TagName objects.
+		 *
+		 * @return The TagName list.
+		 */
+		public List<TagName> getTagNames() {
+			return getDataModelObjects();
+		}
+
+	}
+
+	/**
+	 * An event published when one or more TagName are added.
+	 */
+	public final static class TagNamesAddedTskEvent extends TagNamesTskEvent {
+
+		/**
+		 * Construct an event when one or more TagName are created or updated.
+		 *
+		 * @param tagNames List of added or modified TagName.
+		 */
+		public TagNamesAddedTskEvent(List<TagName> tagNames) {
+			super(tagNames);
+		}
+	}
+
+	/**
+	 * An event published when one or more TagName are updated.
+	 */
+	public final static class TagNamesUpdatedTskEvent extends TagNamesTskEvent {
+
+		/**
+		 * Construct an event when one or more TagName are updated.
+		 *
+		 * @param tagNames List of added or modified TagName.
+		 */
+		public TagNamesUpdatedTskEvent(List<TagName> tagNames) {
+			super(tagNames);
+		}
+	}
+
+	/**
+	 * An event published when one or more TagName are deleted.
+	 */
+	public final static class TagNamesDeletedTskEvent extends TskObjectsEvent<Long> {
+
+		/**
+		 * Constructs a new event with the given list of TagName ids.
+		 *
+		 * @param tagNameIds Deleted TagName id list.
+		 */
+		public TagNamesDeletedTskEvent(List<Long> tagNameIds) {
+			super(tagNameIds);
+		}
+
+		/**
+		 * List of the deleted TagName ids.
+		 *
+		 * @return The list of deleted TagName Ids.
+		 */
+		public List<Long> getTagNameIds() {
+			return getDataModelObjects();
+		}
+
+	}
+
+	/**
+	 * An event published when one or more TagSets have been added.
+	 */
+	public final static class TagSetsAddedTskEvent extends TskObjectsEvent<TagSet> {
+
+		/**
+		 * Constructs an added event for one or more TagSets.
+		 *
+		 * @param tagSets The added TagSet.
+		 */
+		public TagSetsAddedTskEvent(List<TagSet> tagSets) {
+			super(tagSets);
+		}
+
+		/**
+		 * Return the TagSets list.
+		 *
+		 * @return The TagSet list.
+		 */
+		public List<TagSet> getTagSets() {
+			return getDataModelObjects();
+		}
+	}
+
+	/**
+	 * An event published when one or more TagSets have been deleted.
+	 */
+	public final static class TagSetsDeletedTskEvent extends TskObjectsEvent<Long> {
+
+		/**
+		 * Constructs a deleted event for one or more TagSets.
+		 *
+		 * @param tagSetIds The ids of the deleted TagSets.
+		 */
+		public TagSetsDeletedTskEvent(List<Long> tagSetIds) {
+			super(tagSetIds);
+		}
+
+		/**
+		 * Returns the list of deleted TagSet ids.
+		 *
+		 * @return The list of deleted TagSet ids.
+		 */
+		public List<Long> getTagSetIds() {
+			return getDataModelObjects();
+		}
+	}
 }
diff --git a/bindings/java/src/org/sleuthkit/datamodel/VirtualDirectory.java b/bindings/java/src/org/sleuthkit/datamodel/VirtualDirectory.java
index 729707128e93f097312caca5a16222c30927943b..df34ec23bca9df59956859758828afc36c3b9b56 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/VirtualDirectory.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/VirtualDirectory.java
@@ -1,7 +1,7 @@
 /*
  * SleuthKit Java Bindings
  *
- * Copyright 2011-2017 Basis Technology Corp.
+ * Copyright 2011-2022 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -54,6 +54,7 @@ public class VirtualDirectory extends SpecialDirectory {
 	 * @param dataSourceObjectId The object id of the data source for the
 	 *                           virtual directory; same as objId if the virtual
 	 *                           directory is a data source.
+	 * @param fileSystemObjectId The object id of the file system. May be null.
 	 * @param name               The name of the virtual directory.
 	 * @param dirType            The TSK_FS_NAME_TYPE_ENUM for the virtual
 	 *                           directory.
@@ -63,6 +64,8 @@ public class VirtualDirectory extends SpecialDirectory {
 	 *                           directory.
 	 * @param metaFlags          The meta flags for the virtual directory.
 	 * @param md5Hash            The MD5 hash for the virtual directory.
+	 * @param sha1Hash           SHA-1 hash for the virtual directory, may be
+	 *                           null.
 	 * @param knownState         The known state for the virtual directory
 	 * @param parentPath         The parent path for the virtual directory,
 	 *                           should be "/" if the virtual directory is a
@@ -71,14 +74,16 @@ public class VirtualDirectory extends SpecialDirectory {
 	VirtualDirectory(SleuthkitCase db,
 			long objId,
 			long dataSourceObjectId,
+			Long fileSystemObjectId,
 			String name,
 			TSK_FS_NAME_TYPE_ENUM dirType, TSK_FS_META_TYPE_ENUM metaType,
 			TSK_FS_NAME_FLAG_ENUM dirFlag, short metaFlags,
-			String md5Hash, String sha256Hash, FileKnown knownState,
-			String parentPath) {
-		super(db, objId, dataSourceObjectId, TSK_FS_ATTR_TYPE_ENUM.TSK_FS_ATTR_TYPE_DEFAULT, 0, name,
+			String md5Hash, String sha256Hash, String sha1Hash, 
+			FileKnown knownState,
+			String parentPath ) {
+		super(db, objId, dataSourceObjectId, fileSystemObjectId, TSK_FS_ATTR_TYPE_ENUM.TSK_FS_ATTR_TYPE_DEFAULT, 0, name,
 				TskData.TSK_DB_FILES_TYPE_ENUM.VIRTUAL_DIR, 0L, 0, dirType, metaType, dirFlag,
-				metaFlags, 0L, 0L, 0L, 0L, 0L, (short) 0, 0, 0, md5Hash, sha256Hash, knownState, parentPath, null);
+				metaFlags, 0L, 0L, 0L, 0L, 0L, (short) 0, 0, 0, md5Hash, sha256Hash, sha1Hash, knownState, parentPath, null);
 	}
 
 	/**
diff --git a/bindings/java/src/org/sleuthkit/datamodel/Volume.java b/bindings/java/src/org/sleuthkit/datamodel/Volume.java
index 8350105e5abd072ade826f7bc11e78e0db691bda..48df129195736a44c39f573bf76cf93cadcaf796 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/Volume.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/Volume.java
@@ -92,6 +92,7 @@ public void close() {
         volumeHandle = 0;
 	}
 
+	@SuppressWarnings("deprecation")
 	@Override
 	public void finalize() throws Throwable {
 		try {
@@ -276,7 +277,7 @@ public List<Long> getChildrenIds() throws TskCoreException {
 	}
 
 	/**
-	 * @return a list of FileSystem that are direct descendents of this Image.
+	 * @return a list of FileSystem that are direct descendants of this Image.
 	 *
 	 * @throws TskCoreException
 	 */
diff --git a/bindings/java/src/org/sleuthkit/datamodel/VolumeSystem.java b/bindings/java/src/org/sleuthkit/datamodel/VolumeSystem.java
index e403576142eb9d389bcf627e1395b82cdf9b396f..acb315595c333cce4947c39bfcf56e6e05e28913 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/VolumeSystem.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/VolumeSystem.java
@@ -123,6 +123,7 @@ public void close() {
 		}
 	}
 
+	@SuppressWarnings("deprecation")
 	@Override
 	public void finalize() throws Throwable {
 		try {
diff --git a/bindings/java/src/org/sleuthkit/datamodel/WindowsAccountUtils.java b/bindings/java/src/org/sleuthkit/datamodel/WindowsAccountUtils.java
index 0f1a9d37e8824b86426c4afb6377a0ba695e010b..28a92ca920e1754e4cf89281ac607c6a79d30e55 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/WindowsAccountUtils.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/WindowsAccountUtils.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit Data Model
  *
- * Copyright 2021 Basis Technology Corp.
+ * Copyright 2021-2022 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -18,26 +18,41 @@
  */
 package org.sleuthkit.datamodel;
 
+import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableSet;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Objects;
+import java.util.Optional;
 import java.util.Set;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import org.apache.commons.lang3.StringUtils;
+import com.google.common.collect.HashBasedTable;
+import com.google.common.collect.Table;
+import java.util.Locale;
 
 /**
  * A utility class for handling Windows specific accounts and SIDs.
+ *
+ * Implementation notes:
+ * - SIDs for standard "Service Accounts" are added to a host-scoped special realm. 
+ * - SIDs for standard groups are not added as OS Accounts
  * 
  */
 final class WindowsAccountUtils {
 	
-	// Special Windows Accounts with short SIDS are given a special realm "address".
-	final static String SPECIAL_WINDOWS_REALM_ADDR = "SPECIAL_WINDOWS_ACCOUNTS";
-	
+
 	final static String SPECIAL_WINDOWS_BACK_UP_POSTFIX = ".bak";
 	
+	// Windows sometimes uses a special NULL sid, when a users actual SID is unknown.
+	// Our SID comparisons should ignore it, and treat it as a null/blank. 
+	final static String WINDOWS_NULL_SID = "S-1-0-0";
 	
 	// Windows uses SIDs for groups as well as users. 
 	// We dont want to create "User" account for group SIDs.
 	// The lists here help us identify and weed out group SIDs when creating accounts.
 	private static final Set<String> GROUP_SIDS = ImmutableSet.of(
-			"S-1-0-0",	// Null SID
 			"S-1-1-0",	// Everyone
 			"S-1-2-0",	// Local - anyone who has logged on locally
 			"S-1-2-1",	// Console Logon
@@ -68,12 +83,16 @@ final class WindowsAccountUtils {
 	
 	// Any SIDs with the following prefixes are group SID and should be excluded.
 	private static final Set<String> GROUP_SID_PREFIX = ImmutableSet.of(
-			"S-1-5-32"		// Builtin
+			"S-1-5-32",		// Builtin
+			"S-1-5-87"		// Task ID prefix
 			
 	);
 	
 	// SIDS that begin with a domain SID prefix and have on of these 
-	private static final String DOMAIN_SID_PREFIX = "S-1-5";	
+	private static final String NTAUTHORITY_SID_PREFIX = "S-1-5";	
+	private static final String NTAUTHORITY_REALM_NAME = "NT AUTHORITY";
+	
+	
 	private static final Set<String> DOMAIN_GROUP_SID_SUFFIX = ImmutableSet.of(
 			"-512",		// Domain Admins
 			"-513",		// Domain Users
@@ -103,45 +122,318 @@ final class WindowsAccountUtils {
 	);
 	
 	
+	/**
+	 * This encapsulates a WellKnown windows SID. 
+	 * 
+	 */
+	public static class WellKnownSidInfo {
+
+		WellKnownSidInfo(boolean isUserSID, String addr, String realmName, String loginName, String description) {
+			this.realmAddr = addr;
+			this.isUserSID = isUserSID;
+			this.realmName = realmName;
+			this.loginName =  this.isUserSID ? loginName : "";
+			this.description = description;
+		}
+		
+		private final String realmAddr;		// realm identifier - S-1-5-18
+		private final boolean isUserSID;	// is this a realm SID or a user SID
+		private final String realmName;		// realm name 
+		private final String loginName;		// user login name, may be empty
+		private final String description;	// description 
+
+		public String getRealmAddr() {
+			return realmAddr;
+		}
+
+		public boolean isIsUserSID() {
+			return isUserSID;
+		}
+
+		public String getRealmName() {
+			return realmName;
+		}
+
+		public String getLoginName() {
+			return loginName;
+		}
+
+		public String getDescription() {
+			return description;
+		}
+		
+		
+	}
 	
-	// Some windows SID indicate special account.
-	// These should be handled differently from regular user accounts.
-	private static final Set<String> SPECIAL_SIDS = ImmutableSet.of(
-			"S-1-5-18",	// LOCAL_SYSTEM_ACCOUNT
-			"S-1-5-19", // LOCAL_SERVICE_ACCOUNT
-			"S-1-5-20" // NETWORK_SERVICE_ACCOUNT
-	);
-	private static final Set<String> SPECIAL_SID_PREFIXES = ImmutableSet.of(
-			"S-1-5-80",	// Virtual Service accounts
-			"S-1-5-82", // AppPoolIdentity Virtual accounts. 
-			"S-1-5-83", // Virtual Machine  Virtual Accounts.
-			"S-1-5-90", // Windows Manager Virtual Accounts. 
-			"S-1-5-96" // Font Drive Host Virtual Accounts.
-	);
+	// These windows SID indicate well known windows accounts.
+	// Well known SIDs and account are handled slightly differently from the regular accounts:
+	//  - We can assume and fill in SID from given account name, and vice versa.
+	//  - We map account names in foreign languages (some known set) to english names, for these well known accounts. 
+	private static final Map<String, WellKnownSidInfo> SPECIAL_SIDS_MAP =  ImmutableMap.<String, WellKnownSidInfo>builder() 
+			.put("S-1-5-17", new WellKnownSidInfo(true, "S-1-5", NTAUTHORITY_REALM_NAME, "IUSR", "IIS Default Account"))			
+			.put("S-1-5-18", new WellKnownSidInfo(true, "S-1-5", NTAUTHORITY_REALM_NAME, "SYSTEM", "Local System Account"))
+			.put("S-1-5-19", new WellKnownSidInfo(true, "S-1-5", NTAUTHORITY_REALM_NAME, "LOCAL SERVICE", "Local Service Account"))
+			.put("S-1-5-20", new WellKnownSidInfo(true, "S-1-5", NTAUTHORITY_REALM_NAME, "NETWORK SERVICE", "Network Service Account"))
+			.build();
+		
+
+	// These SID prefixes indicate well known windows accounts.
+	//  - We can fill in the login names for these SID, as well as account user description.
+	private static final Map<String, WellKnownSidInfo> SPECIAL_SID_PREFIXES_MAP = ImmutableMap.<String, WellKnownSidInfo>builder() 
+			.put("S-1-5-80", new WellKnownSidInfo(false, "S-1-5-80", "NT SERVICE", "", "NT Service Virtual Account"))
+			.put("S-1-5-82", new WellKnownSidInfo(false, "S-1-5-82", "IIS APPPOOL", "", "IIS AppPool Virtual Account"))
+			.put("S-1-5-83", new WellKnownSidInfo(false, "S-1-5-83", "NT VIRTUAL MACHINE", "", "Virtual Machine Virtual Account") )
+			.put("S-1-5-90", new WellKnownSidInfo(false, "S-1-5-90", "Window Manager", "", "Windows Manager Virtual Account"))
+			.put("S-1-5-94", new WellKnownSidInfo(false, "S-1-5-94", "WinRM Virtual Users", "", "Windows Remoting Virtual Account"))
+			.put("S-1-5-96",  new WellKnownSidInfo(false, "S-1-5-96", "Font Driver Host", "", "Font Driver Host Virtual Account"))
+			.build();
+			
 	
+	// Looks for security identifier prefixes of the form S-<number>-<number>-<number>
+	// More information on security identifier architecture can be found at: 
+	// https://docs.microsoft.com/en-us/windows/security/identity-protection/access-control/security-identifiers
+	// A number of accounts in the range S-1-5-80-* to S-1-5-111-* are special. 
+	private static final Pattern WINDOWS_SPECIAL_ACCOUNT_PREFIX_REGEX = Pattern.compile("^[sS]\\-1\\-5\\-(\\d+)\\-");
+	
+			
+	// This map reverse maps some of the Well know account names (realm name &login name) to their well known SIDs. 
+	private static final Table<String, String, String> SPECIAL_ACCOUNTS_TO_SID_MAP = HashBasedTable.create();
+	static {
+		SPECIAL_ACCOUNTS_TO_SID_MAP.put(NTAUTHORITY_REALM_NAME, "SYSTEM", "S-1-5-18");
+		SPECIAL_ACCOUNTS_TO_SID_MAP.put(NTAUTHORITY_REALM_NAME, "LOCAL SERVICE", "S-1-5-19");
+		SPECIAL_ACCOUNTS_TO_SID_MAP.put(NTAUTHORITY_REALM_NAME, "NETWORK SERVICE", "S-1-5-20");
+	}
+	
+	// A mapping of various well known realm names to their English names.  
+	// We store only english names in the database for well known SIDs.  
+	// Input names provided by client are first mapped to english before lookup or insert. 
+	private static final Map<String, String> REALM_NAME_TO_ENGLISH_MAP =  ImmutableMap.<String, String>builder() 
+			.put("NT AUTHORITY", NTAUTHORITY_REALM_NAME)	// to facilitate a quick hit on the english name
+			.put("NT-AUTORIT�T", NTAUTHORITY_REALM_NAME)
+			.put("AUTORITE NT", NTAUTHORITY_REALM_NAME)
+			.put("NT INSTANS", NTAUTHORITY_REALM_NAME)
+			.build();
+
+	// A mapping of various well known realm names to their English names.  
+	// We store only english names in the database for well known SIDs.  
+	// Input names provided by client are first mapped to english before lookup or insert. 
+	private static final Map<String, String> LOGINNAME_TO_ENGLISH_MAP =  ImmutableMap.<String, String>builder() 
+			.put("SYSTEM", "SYSTEM")	// to facilitate a quick hit on the english name
+			.put("SYST�ME", "SYSTEM")
+			
+			.put("LOCAL SERVICE", "LOCAL SERVICE")
+			.put("LOKALER DIENST", "LOCAL SERVICE")
+			.put("SERVICE LOCAL", "LOCAL SERVICE")
+			.put("SERVIZIO LOCALE", "LOCAL SERVICE")
+			.put("SERVICIO LOC", "LOCAL SERVICE")
+			
+			.put("NETWORK SERVICE", "NETWORK SERVICE")
+			.put("NETZWERKDIENST", "NETWORK SERVICE")
+			.put("N�TVERKSTJ�NST", "NETWORK SERVICE")
+			.put("SERVICE R�SEAU", "NETWORK SERVICE")
+			.put("SERVIZIO DI RETE", "NETWORK SERVICE")
+			.put("SERVICIO DE RED", "NETWORK SERVICE")
+			.build();
 	
 	/**
-	 * Checks if the given SID is a special Windows SID.
+	 * Checks if the given SID is a well known Windows SID.
 	 * 
 	 * @param sid SID to check.
 	 * 
-	 * @return True if the SID is a Windows special SID, false otherwise 
+	 * @return True if the SID is a Windows well known SID, false otherwise 
 	 */
-	static boolean isWindowsSpecialSid(String sid) {
-		String tempSID = stripWindowsBackupPostfix(sid);
+	static boolean isWindowsWellKnownSid(String sid) {
 		
-		if (SPECIAL_SIDS.contains(tempSID)) {
+		String tempSID = stripWindowsBackupPostfix(sid);
+		if (SPECIAL_SIDS_MAP.containsKey(tempSID)) {
 			return true;
 		}
-		for (String specialPrefix: SPECIAL_SID_PREFIXES) {
+		for (String specialPrefix: SPECIAL_SID_PREFIXES_MAP.keySet()) {
 			if (tempSID.startsWith(specialPrefix)) {
 				return true;
 			}
 		}
+		
+		Matcher match = WINDOWS_SPECIAL_ACCOUNT_PREFIX_REGEX.matcher(tempSID);
+		if (match.find()) {
+			Integer domainIdentifier = Integer.valueOf(match.group(1));
+			// All the prefixes in the range S-1-5-80 to S-1-5-111 are special
+			if (domainIdentifier != null && domainIdentifier >= 80 && domainIdentifier <= 111) {
+				return true;
+			}
+		}
+		
 		return false;
 	}
 	
+	/**
+	 * Checks if the given realmName/loginName is a well known account..
+	 * 
+	 * @param sid SID to check.
+	 * 
+	 * @return True if the SID is a Windows well known SID, false otherwise 
+	 */
+	static boolean isWindowsWellKnownAccountName(String loginName, String realmName) {
+		
+		String resolvedRealmName = toWellknownEnglishRealmName(realmName);
+		String resolvedLoginName = toWellknownEnglishLoginName(loginName);
+		if (StringUtils.isBlank(resolvedRealmName) ||  StringUtils.isBlank(resolvedLoginName)) {
+			return false;
+		}
+		
+		return SPECIAL_ACCOUNTS_TO_SID_MAP.contains(resolvedRealmName.toUpperCase(), resolvedLoginName.toUpperCase());
+		
+	}
+	
+	/**
+	 * Get the realm address for the given well known Windows SID.
+	 * 
+	 * @param sid SID to check.
+	 * @return Realm Name for Windows special SID, an empty string if the SID is not a known special SID. 
+	 * 
+	 * @throws TskCoreException 
+	 */
+	private static String getWindowsWellKnownSidRealmAddr(String sid) throws TskCoreException {
+		String tempSID = stripWindowsBackupPostfix(sid);
+
+		if (SPECIAL_SIDS_MAP.containsKey(tempSID)) {
+			return SPECIAL_SIDS_MAP.get(tempSID).getRealmAddr();
+		}
+		
+		for (Entry<String, WellKnownSidInfo> specialPrefixEntry : SPECIAL_SID_PREFIXES_MAP.entrySet()) {
+			if (tempSID.startsWith(specialPrefixEntry.getKey())) {
+				return specialPrefixEntry.getValue().getRealmAddr();
+			}
+		}
+
+		Matcher match = WINDOWS_SPECIAL_ACCOUNT_PREFIX_REGEX.matcher(tempSID);
+		if (match.find()) {
+			Integer domainIdentifier = Integer.valueOf(match.group(1));
+			// All the prefixes in the range S-1-5-80 to S-1-5-111 are special
+			if (domainIdentifier != null && domainIdentifier >= 80 && domainIdentifier <= 111) {
+				String realmAddr = String.format("%s-%d", NTAUTHORITY_SID_PREFIX, domainIdentifier);
+				return realmAddr;
+			}
+		}
+		
+		return "";
+	}
+	/**
+	 * Get the well known SID info for the given SID. 
+	 * 
+	 * @param sid SID to check.
+	 * 
+	 * @return WellKnownSidInfo for the SID, null if there is no info available. 
+	 */
+	private static WellKnownSidInfo getWindowsWellKnownInfo(String sid) {
+		String tempSID = stripWindowsBackupPostfix(sid);
+		
+		if (SPECIAL_SIDS_MAP.containsKey(tempSID)) {
+			return SPECIAL_SIDS_MAP.get(tempSID);
+		}
+		for (Entry<String, WellKnownSidInfo> specialPrefixEntry: SPECIAL_SID_PREFIXES_MAP.entrySet()) {
+			if (tempSID.startsWith(specialPrefixEntry.getKey())) {
+				return specialPrefixEntry.getValue();
+			}
+		}
+		return null;
+	}
+	
+	/**
+	 * Get the realm address for the given special Windows SID.
+	 * 
+	 * @param sid SID to check.
+	 * 
+	 * @return Name for Windows special SID, an empty string if the SID is not a known special SID. 
+	 */
+	static String getWindowsWellKnownSidFullName(String sid) {
+		WellKnownSidInfo wellKnownSidInfo = getWindowsWellKnownInfo(sid);
+		return Objects.nonNull(wellKnownSidInfo) ? wellKnownSidInfo.getDescription() : "";
+	}
 	
+	/**
+	 * Get the realm name for the given well known Windows SID.
+	 * 
+	 * @param sid SID to check.
+	 * 
+	 * @return Realm Name for Windows special SID, NULL if the SID is not a known special SID. 
+	 */
+	static String getWindowsWellKnownSidRealmName(String sid) {
+		
+		if (StringUtils.isNotBlank(sid) && sid.equals(NTAUTHORITY_SID_PREFIX)) {
+			return NTAUTHORITY_REALM_NAME;
+		}
+		
+		WellKnownSidInfo wellKnownSidInfo = getWindowsWellKnownInfo(sid);
+		return Objects.nonNull(wellKnownSidInfo) 
+				? wellKnownSidInfo.getRealmName() 
+				: null;
+	}
+	
+	/**
+	 * Get the login name for the given well known Windows SID.
+	 * 
+	 * @param sid SID to check.
+	 * 
+	 * @return Login Name for Windows special SID, NULL if the SID is not a known special SID. 
+	 */
+	static String getWindowsWellKnownSidLoginName(String sid) {
+		
+		WellKnownSidInfo wellKnownSidInfo = getWindowsWellKnownInfo(sid);
+		return Objects.nonNull(wellKnownSidInfo) 
+				? wellKnownSidInfo.getLoginName()
+				: null;
+	}
+	
+	
+	/**
+	 * Returns the SID for a well known account name.
+	 * 
+	 * @param loginName Well known login name.
+	 * @param realmName Well known realm name. 
+	 * 
+	 * @return SID corresponding to the well known account name, NULL if its not known. 
+	 */
+	static String getWindowsWellKnownAccountSid( String loginName, String realmName) {
+		
+		String resolvedRealmName = toWellknownEnglishRealmName(realmName);
+		String resolvedLoginName = toWellknownEnglishLoginName(loginName);
+		if (StringUtils.isBlank(resolvedRealmName) ||  StringUtils.isBlank(resolvedLoginName)) {
+			return null;
+		}
+		
+		return SPECIAL_ACCOUNTS_TO_SID_MAP.get(resolvedRealmName.toUpperCase(), resolvedLoginName.toUpperCase());
+		
+	}
+	
+	/**
+	 * Returns english name for a given well known realm name.
+	 *
+	 * @param name Realm name to translate.
+	 *
+	 * @return English realm name corresponding to given realm name, NULL if
+	 *         realm name is not known.
+	 */
+	static String toWellknownEnglishRealmName(String name) {
+		return StringUtils.isNotBlank(name)
+				? REALM_NAME_TO_ENGLISH_MAP.getOrDefault(name.toUpperCase(), name)
+				: null;
+	}
+
+	/**
+	 * Returns english name for the given well known login name.
+	 *
+	 * @param name Login name to translate.
+	 *
+	 * @return English login name corresponding to given login name. NULL if
+	 *         login name is not known.
+	 */
+	static String toWellknownEnglishLoginName(String name) {
+		return StringUtils.isNotBlank(name)
+				? LOGINNAME_TO_ENGLISH_MAP.getOrDefault(name.toUpperCase(), name)
+				: null;
+	}
+		
 	/**
 	 * Checks if the given SID is a user SID.
 	 * 
@@ -166,7 +458,7 @@ static boolean isWindowsUserSid(String sid) {
 		}
 		
 		// check for domain groups - they have a domains specific identifier but have a fixed prefix and suffix
-		if (tempSID.startsWith(DOMAIN_SID_PREFIX)) {
+		if (tempSID.startsWith(NTAUTHORITY_SID_PREFIX)) {
 			for (String suffix : DOMAIN_GROUP_SID_SUFFIX) {
 				if (tempSID.endsWith(suffix)) {
 					return false;
@@ -180,15 +472,15 @@ static boolean isWindowsUserSid(String sid) {
 	
 	/**
 	 * Get the windows realm address from the given SID.
-	 * 
+	 *
 	 * For all regular account SIDs, the realm address is the sub-authority SID.
-	 * For special Windows account the realm address is a special address, 
-	 * SPECIAL_WINDOWS_REALM_ADDR { @link WindowsAccountUtils.SPECIAL_WINDOWS_REALM_ADDR}
-	 * 
+	 * For some well known accounts, the realm address is returned from a
+	 * predetermined list.
+	 *
 	 * @param sid SID
-	 * 
+	 *
 	 * @return Realm address for the SID.
-	 * 
+	 *
 	 * @throws TskCoreException If the given SID is not a valid host/domain SID.
 	 */
 	public static String getWindowsRealmAddress(String sid) throws TskCoreException {
@@ -196,12 +488,13 @@ public static String getWindowsRealmAddress(String sid) throws TskCoreException
 		String realmAddr;
 		String tempSID = stripWindowsBackupPostfix(sid);
 		
-		// When copying realms into portable cases, the SID may already be set to the special windows string.
-		if (isWindowsSpecialSid(tempSID) || tempSID.equals(SPECIAL_WINDOWS_REALM_ADDR)) {
-			realmAddr = SPECIAL_WINDOWS_REALM_ADDR;
+		if ( isWindowsWellKnownSid(tempSID)) {
+			realmAddr = getWindowsWellKnownSidRealmAddr(sid);
 		} else {
-			// regular SIDs should have at least 5 components: S-1-x-y-z
-			if (org.apache.commons.lang3.StringUtils.countMatches(tempSID, "-") < 4) {
+			// SIDs should have at least 4 components: S-1-A-S
+			// A: authority identifier
+			// S: one or more sub-authority identifiers (RIDs)
+			if (org.apache.commons.lang3.StringUtils.countMatches(tempSID, "-") < 3) {
 				throw new TskCoreException(String.format("Invalid SID %s for a host/domain", tempSID));
 			}
 			// get the sub authority SID
diff --git a/bindings/java/src/org/sleuthkit/datamodel/blackboardutils/ArtifactHelperBase.java b/bindings/java/src/org/sleuthkit/datamodel/blackboardutils/ArtifactHelperBase.java
index 426061af68d7aa82bbbd60bac035d7ab2eeda32e..06e8a9a9b8f2e9a2f207e5235def7bf20da613ee 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/blackboardutils/ArtifactHelperBase.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/blackboardutils/ArtifactHelperBase.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit Data Model
  *
- * Copyright 2019-2020 Basis Technology Corp.
+ * Copyright 2019-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -19,69 +19,83 @@
 package org.sleuthkit.datamodel.blackboardutils;
 
 import java.util.Collection;
+import java.util.Optional;
 import org.apache.commons.lang3.StringUtils;
 import org.sleuthkit.datamodel.BlackboardAttribute;
 import org.sleuthkit.datamodel.Content;
 import org.sleuthkit.datamodel.SleuthkitCase;
 
 /**
- * A base class for classes that help ingest modules create artifacts.
- *
+ * A super class for classes that help modules create artifacts.
  */
 class ArtifactHelperBase {
 
 	private final SleuthkitCase caseDb;
-	private final Content srcContent;		// artifact source
-	private final String moduleName;		// module creating the artifacts
+	private final Content srcContent;
+	private final String moduleName;
+	private final Long ingestJobId;
 
 	/**
-	 * Creates an artifact helper.
+	 * Constructs the super class part of an artifact helper.
 	 *
-	 * @param caseDb     Sleuthkit case db
-	 * @param moduleName name module using the helper
-	 * @param srcContent source content
+	 * @param caseDb      The case database.
+	 * @param moduleName  The name of the module creating the artifacts.
+	 * @param srcContent  The source/parent content of the artifacts.
+	 * @param ingestJobId The numeric identifier of the ingest job within which
+	 *                    the artifacts are being created, may be null.
 	 */
-	ArtifactHelperBase(SleuthkitCase caseDb, String moduleName, Content srcContent) {
+	ArtifactHelperBase(SleuthkitCase caseDb, String moduleName, Content srcContent, Long ingestJobId) {
 		this.moduleName = moduleName;
 		this.srcContent = srcContent;
 		this.caseDb = caseDb;
+		this.ingestJobId = ingestJobId;
 	}
 
 	/**
-	 * Returns the source content.
+	 * Get the source/parent content of the artifacts.
 	 *
-	 * @return Source content.
+	 * @return The content.
 	 */
 	Content getContent() {
-		return this.srcContent;
+		return srcContent;
 	}
 
 	/**
-	 * Returns the sleuthkit case.
+	 * Gets the case database.
 	 *
-	 * @return Sleuthkit case database.
+	 * @return The case database.
 	 */
 	SleuthkitCase getSleuthkitCase() {
-		return this.caseDb;
+		return caseDb;
 	}
 
 	/**
-	 * Returns module name.
+	 * Gets the name of the module creating the artifacts.
 	 *
-	 * @return Module name.
+	 * @return The module name.
 	 */
 	String getModuleName() {
-		return this.moduleName;
+		return moduleName;
 	}
 
 	/**
-	 * Creates and adds a string attribute of specified type to the given list, if the
-	 * attribute value is not empty or null.
+	 * Gets the numeric identifier of the ingest job within which the artifacts
+	 * are being created.
 	 *
-	 * @param attributeType Attribute type.
-	 * @param attrValue     String attribute value.
-	 * @param attributes    List of attributes to add to.
+	 * @return The ingest job ID, may be null
+	 */
+	Optional<Long> getIngestJobId() {
+		return Optional.ofNullable(ingestJobId);
+	}
+
+	/**
+	 * Creates an attribute of a specified type with a string value and adds it
+	 * to a given list of attributes.
 	 *
+	 * @param attributeType The attribute type.
+	 * @param attrValue     The attribute value, may not be the empty string or
+	 *                      null.
+	 * @param attributes    The list of attributes.
 	 */
 	void addAttributeIfNotNull(BlackboardAttribute.ATTRIBUTE_TYPE attributeType, String attrValue, Collection<BlackboardAttribute> attributes) {
 		if (!StringUtils.isEmpty(attrValue)) {
@@ -90,30 +104,32 @@ void addAttributeIfNotNull(BlackboardAttribute.ATTRIBUTE_TYPE attributeType, Str
 	}
 
 	/**
-	 * Creates and adds a long attribute of specified type to the given list, if the
-	 * attribute value is not 0.
+	 * Creates an attribute of a specified type with a long value and adds it to
+	 * a given list of attributes.
 	 *
-	 * @param attributeType Attribute type.
-	 * @param attrValue     Long attribute value.
-	 * @param attributes    List of attributes to add to.
+	 * @param attributeType The attribute type.
+	 * @param attrValue     The attribute value, must be greater than zero.
+	 * @param attributes    The list of attributes.
 	 */
 	void addAttributeIfNotZero(BlackboardAttribute.ATTRIBUTE_TYPE attributeType, long attrValue, Collection<BlackboardAttribute> attributes) {
 		if (attrValue > 0) {
 			attributes.add(new BlackboardAttribute(attributeType, getModuleName(), attrValue));
 		}
 	}
-	
+
 	/**
-	 * Creates and adds an integer attribute of specified type to the given list, if the
-	 * attribute value is not 0.
+	 * Creates an attribute of a specified type with an integer value and adds
+	 * it to a given list of attributes.
 	 *
-	 * @param attributeType Attribute type.
-	 * @param attrValue     Integer attribute value.
-	 * @param attributes    List of attributes to add to.
+	 * @param attributeType The attribute type.
+	 * @param attrValue     The attribute value, must be greater than zero.
+	 * @param attributes    The list of attributes to which the new attribute
+	 *                      will be added.
 	 */
 	void addAttributeIfNotZero(BlackboardAttribute.ATTRIBUTE_TYPE attributeType, int attrValue, Collection<BlackboardAttribute> attributes) {
 		if (attrValue > 0) {
 			attributes.add(new BlackboardAttribute(attributeType, getModuleName(), attrValue));
 		}
 	}
+
 }
diff --git a/bindings/java/src/org/sleuthkit/datamodel/blackboardutils/ArtifactsHelper.java b/bindings/java/src/org/sleuthkit/datamodel/blackboardutils/ArtifactsHelper.java
index dbd5eee7fb219d3f87d5f7db1e8c05c1e9dd4b18..ec41f6f73645d52e1644e19ed79b727773fcc634 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/blackboardutils/ArtifactsHelper.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/blackboardutils/ArtifactsHelper.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit Data Model
  *
- * Copyright 2019 Basis Technology Corp.
+ * Copyright 2019-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -22,33 +22,49 @@
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Optional;
-import org.sleuthkit.datamodel.AbstractFile;
 import org.sleuthkit.datamodel.Blackboard.BlackboardException;
 import org.sleuthkit.datamodel.BlackboardArtifact;
 import org.sleuthkit.datamodel.BlackboardArtifact.ARTIFACT_TYPE;
 import org.sleuthkit.datamodel.BlackboardAttribute;
 import org.sleuthkit.datamodel.Content;
-import org.sleuthkit.datamodel.OsAccount;
 import org.sleuthkit.datamodel.SleuthkitCase;
 import org.sleuthkit.datamodel.TskCoreException;
 
 /**
- * This class helps ingest modules create miscellaneous artifacts.
- *
+ * A class that helps modules to create various types of artifacts.
  */
 public final class ArtifactsHelper extends ArtifactHelperBase {
+
 	private static final BlackboardArtifact.Type INSTALLED_PROG_TYPE = new BlackboardArtifact.Type(ARTIFACT_TYPE.TSK_INSTALLED_PROG);
-	
+
+	/**
+	 * Constructs an instance of a class that helps modules to create various
+	 * types of artifacts.
+	 *
+	 * @param caseDb      The case database.
+	 * @param moduleName  The name of the module creating the artifacts.
+	 * @param srcContent  The source/parent content of the artifacts.
+	 * @param ingestJobId The numeric identifier of the ingest job within which
+	 *                    the artifacts are being created, may be null.
+	 */
+	public ArtifactsHelper(SleuthkitCase caseDb, String moduleName, Content srcContent, Long ingestJobId) {
+		super(caseDb, moduleName, srcContent, ingestJobId);
+	}
+
 	/**
-	 * Creates an artifact helper for modules to create artifacts.
+	 * Constructs an instance of a class that helps modules to create various
+	 * types of artifacts.
 	 *
-	 * @param caseDb     Sleuthkit case database.
-	 * @param moduleName Name of module using the helper.
-	 * @param srcContent Source content for the artifacts.
+	 * @param caseDb     The case database.
+	 * @param moduleName The name of the module creating the artifacts.
+	 * @param srcContent The source/parent content of the artifacts.
 	 *
+	 * @deprecated Use ArtifactsHelper(SleuthkitCase caseDb, String moduleName,
+	 * Content srcContent, Long ingestJobId) instead.
 	 */
+	@Deprecated
 	public ArtifactsHelper(SleuthkitCase caseDb, String moduleName, Content srcContent) {
-		super(caseDb, moduleName, srcContent);
+		this(caseDb, moduleName, srcContent, null);
 	}
 
 	/**
@@ -59,7 +75,7 @@ public ArtifactsHelper(SleuthkitCase caseDb, String moduleName, Content srcConte
 	 *
 	 * @return Installed program artifact added.
 	 *
-	 * @throws TskCoreException		If there is an error creating the artifact.
+	 * @throws TskCoreException		  If there is an error creating the artifact.
 	 * @throws BlackboardException	If there is a problem posting the artifact.
 	 */
 	public BlackboardArtifact addInstalledProgram(String programName, long dateInstalled) throws TskCoreException, BlackboardException {
@@ -78,14 +94,14 @@ public BlackboardArtifact addInstalledProgram(String programName, long dateInsta
 	 *
 	 * @return Installed program artifact added.
 	 *
-	 * @throws TskCoreException		If there is an error creating the artifact.
+	 * @throws TskCoreException		  If there is an error creating the artifact.
 	 * @throws BlackboardException	If there is a problem posting the artifact.
 	 */
 	public BlackboardArtifact addInstalledProgram(String programName, long dateInstalled,
 			Collection<BlackboardAttribute> otherAttributesList) throws TskCoreException, BlackboardException {
 
 		Collection<BlackboardAttribute> attributes = new ArrayList<>();
-		
+
 		// construct attributes 
 		attributes.add(new BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_PROG_NAME, getModuleName(), programName));
 		addAttributeIfNotZero(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_DATETIME, dateInstalled, attributes);
@@ -96,9 +112,10 @@ public BlackboardArtifact addInstalledProgram(String programName, long dateInsta
 		// create artifact
 		Content content = getContent();
 		BlackboardArtifact installedProgramArtifact = content.newDataArtifact(INSTALLED_PROG_TYPE, attributes);
-		
+
 		// post artifact 
-		getSleuthkitCase().getBlackboard().postArtifact(installedProgramArtifact, getModuleName());
+		Optional<Long> ingestJobId = getIngestJobId();
+		getSleuthkitCase().getBlackboard().postArtifact(installedProgramArtifact, getModuleName(), ingestJobId.orElse(null));
 
 		// return the artifact
 		return installedProgramArtifact;
diff --git a/bindings/java/src/org/sleuthkit/datamodel/blackboardutils/CommunicationArtifactsHelper.java b/bindings/java/src/org/sleuthkit/datamodel/blackboardutils/CommunicationArtifactsHelper.java
index 374754febb7521522d42dbac337b14b9b4a75a73..b73728217c93e6825fd8a0435819d8d5ca49d93c 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/blackboardutils/CommunicationArtifactsHelper.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/blackboardutils/CommunicationArtifactsHelper.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit Data Model
  *
- * Copyright 2019-2020 Basis Technology Corp.
+ * Copyright 2019-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -36,9 +36,9 @@
 import org.sleuthkit.datamodel.BlackboardArtifact;
 import org.sleuthkit.datamodel.BlackboardAttribute;
 import org.sleuthkit.datamodel.Content;
+import org.sleuthkit.datamodel.DataArtifact;
 import org.sleuthkit.datamodel.DataSource;
 import org.sleuthkit.datamodel.InvalidAccountIDException;
-import org.sleuthkit.datamodel.OsAccount;
 import org.sleuthkit.datamodel.Relationship;
 import org.sleuthkit.datamodel.SleuthkitCase;
 import org.sleuthkit.datamodel.TskCoreException;
@@ -48,24 +48,23 @@
 import org.sleuthkit.datamodel.blackboardutils.attributes.MessageAttachments.FileAttachment;
 
 /**
- * Class to help ingest modules create communication artifacts. Communication
- * artifacts includes contacts, messages, call logs.
+ * A class that helps modules to create communication artifacts: contacts,
+ * messages, and call logs.
  *
  * It creates a 'self' account {@link Account} - an account for the owner/user
  * of the application being processed by the module. As an example, for a module
- * analyzing Facebook application, this would be account associated with the
- * unique Facebook user id of the device owner.
+ * analyzing Facebook application, this would be the account associated with the
+ * unique Facebook user ID of the device owner.
  *
- * In the absence of a 'self' account, a 'device' account may be used in it's
+ * In the absence of a 'self' account, a 'device' account may be used in its
  * place. A 'device' account is an account meant to represent the owner of the
- * device and uses the unique device id as the unique account identifier.
+ * device, and uses the unique device ID as the unique account identifier.
  *
  * It also creates accounts for contacts, and sender/receivers of the messages,
  * and calls.
  *
- * And it also creates relationships between the self account - and the contacts
- * and sender/receiver accounts.
- *
+ * And it creates relationships between the self account and the contacts and
+ * sender/receiver accounts.
  */
 public final class CommunicationArtifactsHelper extends ArtifactHelperBase {
 
@@ -146,60 +145,110 @@ public String getDisplayName() {
 	private final Account.Type moduleAccountsType;
 
 	/**
-	 * Constructs a communications artifacts helper for the given source file.
-	 *
-	 * This is a constructor for modules that do not have a 'self' account, and
-	 * will use a 'Device' account in lieu.
+	 * Constructs an instance of a class that helps modules to create
+	 * communication artifacts: contacts, messages, and call logs.
 	 *
-	 * It creates a DeviceAccount instance to use as a self account.
+	 * This constructor is intended to be used when there is no known
+	 * application account and a device account should be used instead.
 	 *
-	 * @param caseDb       Sleuthkit case db.
-	 * @param moduleName   Name of module using the helper.
-	 * @param srcContent   Source content being processed by the module.
+	 * @param caseDb       The case database.
+	 * @param moduleName   The name of the module creating the artifacts.
+	 * @param srcContent   The source/parent content of the artifacts.
 	 * @param accountsType Account type {@link Account.Type} created by this
 	 *                     module.
+	 * @param ingestJobId  The numeric identifier of the ingest job within which
+	 *                     the artifacts are being created, may be null.
 	 *
-	 * @throws TskCoreException If there is an error creating the device
-	 *                          account.
+	 * @throws TskCoreException The exception is thrown if there is an error
+	 *                          querying the case database.
 	 */
-	public CommunicationArtifactsHelper(SleuthkitCase caseDb,
-			String moduleName, Content srcContent, Account.Type accountsType) throws TskCoreException {
-
-		super(caseDb, moduleName, srcContent);
-
+	public CommunicationArtifactsHelper(SleuthkitCase caseDb, String moduleName, Content srcContent, Account.Type accountsType, Long ingestJobId) throws TskCoreException {
+		super(caseDb, moduleName, srcContent, ingestJobId);
 		this.moduleAccountsType = accountsType;
 		this.selfAccountType = Account.Type.DEVICE;
 		this.selfAccountId = ((DataSource) getContent().getDataSource()).getDeviceId();
 	}
 
 	/**
-	 * Constructs a communications artifacts helper for the given source file.
-	 *
-	 * This constructor is for modules that have the application specific
-	 * account information for the device owner to create a 'self' account.
+	 * Constructs an instance of a class that helps modules to create
+	 * communication artifacts: contacts, messages, and call logs.
 	 *
-	 * It creates an account instance with specified type & id, and uses it as
-	 * the self account.
+	 * This constructor is intended to be used when there is sufficent
+	 * application-specific account information about the device owner to create
+	 * a 'self' account.
 	 *
-	 * @param caseDb          Sleuthkit case db.
-	 * @param moduleName      Name of module using the helper.
-	 * @param srcContent      Source content being processed by the module.
+	 * @param caseDb          The case database.
+	 * @param moduleName      The name of the module creating the artifacts.
+	 * @param srcContent      The source/parent content of the artifacts.
 	 * @param accountsType    Account type {@link Account.Type} created by this
 	 *                        module.
 	 * @param selfAccountType Self account type to be created for this module.
 	 * @param selfAccountId	  Account unique id for the self account.
+	 * @param ingestJobId     The numeric identifier of the ingest job within
+	 *                        which the artifacts are being created, may be
+	 *                        null.
 	 *
-	 * @throws TskCoreException	If there is an error creating the self account
+	 * @throws TskCoreException The exception is thrown if there is an error
+	 *                          querying the case database.
 	 */
-	public CommunicationArtifactsHelper(SleuthkitCase caseDb, String moduleName, Content srcContent, Account.Type accountsType, Account.Type selfAccountType, String selfAccountId) throws TskCoreException {
-
-		super(caseDb, moduleName, srcContent);
-
+	public CommunicationArtifactsHelper(SleuthkitCase caseDb, String moduleName, Content srcContent, Account.Type accountsType, Account.Type selfAccountType, String selfAccountId, Long ingestJobId) throws TskCoreException {
+		super(caseDb, moduleName, srcContent, ingestJobId);
 		this.moduleAccountsType = accountsType;
 		this.selfAccountType = selfAccountType;
 		this.selfAccountId = selfAccountId;
 	}
 
+	/**
+	 * Constructs an instance of a class that helps modules to create
+	 * communication artifacts: contacts, messages, and call logs.
+	 *
+	 * This constructor is intended to be used when there is no known
+	 * application account and a device account should be used instead.
+	 *
+	 * @param caseDb       The case database.
+	 * @param moduleName   The name of the module creating the artifacts.
+	 * @param srcContent   The source/parent content of the artifacts.
+	 * @param accountsType Account type {@link Account.Type} created by this
+	 *                     module.
+	 *
+	 * @throws TskCoreException The exception is thrown if there is an error
+	 *                          updating the case database.
+	 * @deprecated Use CommunicationArtifactsHelper(SleuthkitCase caseDb, String
+	 * moduleName, Content srcContent, Account.Type accountsType, Long
+	 * ingestJobId) instead.
+	 */
+	@Deprecated
+	public CommunicationArtifactsHelper(SleuthkitCase caseDb, String moduleName, Content srcContent, Account.Type accountsType) throws TskCoreException {
+		this(caseDb, moduleName, srcContent, accountsType, null);
+	}
+
+	/**
+	 * Constructs an instance of a class that helps modules to create
+	 * communication artifacts: contacts, messages, and call logs.
+	 *
+	 * This constructor is intended to be used when there is sufficent
+	 * application-specific account information about the device owner to create
+	 * a 'self' account.
+	 *
+	 * @param caseDb          The case database.
+	 * @param moduleName      The name of the module creating the artifacts.
+	 * @param srcContent      The source/parent content of the artifacts.
+	 * @param accountsType    Account type {@link Account.Type} created by this
+	 *                        module.
+	 * @param selfAccountType Self account type to be created for this module.
+	 * @param selfAccountId	  Account unique id for the self account.
+	 *
+	 * @throws TskCoreException The exception is thrown if there is an error
+	 *                          updating the case database.
+	 * @deprecated Use CommunicationArtifactsHelper(SleuthkitCase caseDb, String
+	 * moduleName, Content srcContent, Account.Type accountsType, Account.Type
+	 * selfAccountType, String selfAccountId, Long ingestJobId) instead.
+	 */
+	@Deprecated
+	public CommunicationArtifactsHelper(SleuthkitCase caseDb, String moduleName, Content srcContent, Account.Type accountsType, Account.Type selfAccountType, String selfAccountId) throws TskCoreException {
+		this(caseDb, moduleName, srcContent, accountsType, selfAccountType, selfAccountId, null);
+	}
+
 	/**
 	 * Creates and adds a TSK_CONTACT artifact to the case, with specified
 	 * attributes. Also creates an account instance of specified type for the
@@ -317,7 +366,8 @@ public BlackboardArtifact addContact(String contactName,
 		}
 
 		// post artifact 
-		getSleuthkitCase().getBlackboard().postArtifact(contactArtifact, getModuleName());
+		Optional<Long> ingestJobId = getIngestJobId();
+		getSleuthkitCase().getBlackboard().postArtifact(contactArtifact, getModuleName(), ingestJobId.orElse(null));
 
 		return contactArtifact;
 	}
@@ -367,7 +417,8 @@ private void createContactMethodAccountAndRelationship(Account.Type accountType,
 	 *                          instance.
 	 */
 	private AccountFileInstance createAccountInstance(Account.Type accountType, String accountUniqueID) throws TskCoreException, InvalidAccountIDException {
-		return getSleuthkitCase().getCommunicationsManager().createAccountFileInstance(accountType, accountUniqueID, getModuleName(), getContent());
+		Optional<Long> ingestJobId = getIngestJobId();
+		return getSleuthkitCase().getCommunicationsManager().createAccountFileInstance(accountType, accountUniqueID, getModuleName(), getContent(), null, ingestJobId.orElse(null));
 	}
 
 	/**
@@ -545,7 +596,7 @@ public BlackboardArtifact addMessage(String messageType,
 					try {
 						recipientAccountsList.add(createAccountInstance(moduleAccountsType, recipient));
 					} catch (InvalidAccountIDException ex) {
-						LOGGER.log(Level.WARNING, String.format("Invalid account identifier %s", senderId));
+						LOGGER.log(Level.WARNING, String.format("Invalid account identifier %s", recipient));
 					}
 				}
 			}
@@ -619,7 +670,8 @@ public BlackboardArtifact addMessage(String messageType,
 		}
 
 		// post artifact 
-		getSleuthkitCase().getBlackboard().postArtifact(msgArtifact, getModuleName());
+		Optional<Long> ingestJobId = getIngestJobId();
+		getSleuthkitCase().getBlackboard().postArtifact(msgArtifact, getModuleName(), ingestJobId.orElse(null));
 
 		// return the artifact
 		return msgArtifact;
@@ -794,7 +846,7 @@ public BlackboardArtifact addCalllog(CommunicationDirection direction,
 					try {
 						recipientAccountsList.add(createAccountInstance(moduleAccountsType, callee));
 					} catch (InvalidAccountIDException ex) {
-						LOGGER.log(Level.WARNING, String.format("Failed to create account with id %s", callerId));
+						LOGGER.log(Level.WARNING, String.format("Failed to create account with id %s", callee));
 					}
 				}
 			}
@@ -863,7 +915,8 @@ public BlackboardArtifact addCalllog(CommunicationDirection direction,
 		}
 
 		// post artifact 
-		getSleuthkitCase().getBlackboard().postArtifact(callLogArtifact, getModuleName());
+		Optional<Long> ingestJobId = getIngestJobId();
+		getSleuthkitCase().getBlackboard().postArtifact(callLogArtifact, getModuleName(), ingestJobId.orElse(null));
 
 		// return the artifact
 		return callLogArtifact;
@@ -883,14 +936,23 @@ public void addAttachments(BlackboardArtifact message, MessageAttachments attach
 		message.addAttribute(blackboardAttribute);
 
 		// Associate each attachment file with the message.
+		List<BlackboardArtifact> assocObjectArtifacts = new ArrayList<>();
 		Collection<FileAttachment> fileAttachments = attachments.getFileAttachments();
 		for (FileAttachment fileAttachment : fileAttachments) {
 			long attachedFileObjId = fileAttachment.getObjectId();
 			if (attachedFileObjId >= 0) {
 				AbstractFile attachedFile = message.getSleuthkitCase().getAbstractFileById(attachedFileObjId);
-				associateAttachmentWithMessage(message, attachedFile);
+				DataArtifact artifact = associateAttachmentWithMessage(message, attachedFile);
+				assocObjectArtifacts.add(artifact);
 			}
 		}
+
+		try {
+			Optional<Long> ingestJobId = getIngestJobId();
+			getSleuthkitCase().getBlackboard().postArtifacts(assocObjectArtifacts, getModuleName(), ingestJobId.orElse(null));
+		} catch (BlackboardException ex) {
+			throw new TskCoreException("Error posting TSK_ASSOCIATED_ARTIFACT artifacts for attachments", ex);
+		}
 	}
 
 	/**
@@ -905,10 +967,9 @@ public void addAttachments(BlackboardArtifact message, MessageAttachments attach
 	 * @throws TskCoreException If there is an error creating the
 	 *                          TSK_ASSOCIATED_OBJECT artifact.
 	 */
-	private BlackboardArtifact associateAttachmentWithMessage(BlackboardArtifact message, AbstractFile attachedFile) throws TskCoreException {
+	private DataArtifact associateAttachmentWithMessage(BlackboardArtifact message, AbstractFile attachedFile) throws TskCoreException {
 		Collection<BlackboardAttribute> attributes = new ArrayList<>();
 		attributes.add(new BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_ASSOCIATED_ARTIFACT, this.getModuleName(), message.getArtifactID()));
-
 		return attachedFile.newDataArtifact(ASSOCIATED_OBJ_TYPE, attributes);
 	}
 
@@ -984,7 +1045,8 @@ private void addMessageReadStatusIfKnown(MessageReadStatus readStatus, Collectio
 	 */
 	private synchronized AccountFileInstance getSelfAccountInstance() throws TskCoreException, InvalidAccountIDException {
 		if (selfAccountInstance == null) {
-			selfAccountInstance = getSleuthkitCase().getCommunicationsManager().createAccountFileInstance(selfAccountType, selfAccountId, this.getModuleName(), getContent());
+			Optional<Long> ingestJobId = getIngestJobId();
+			selfAccountInstance = getSleuthkitCase().getCommunicationsManager().createAccountFileInstance(selfAccountType, selfAccountId, this.getModuleName(), getContent(), null, ingestJobId.orElse(null));
 		}
 		return selfAccountInstance;
 	}
diff --git a/bindings/java/src/org/sleuthkit/datamodel/blackboardutils/GeoArtifactsHelper.java b/bindings/java/src/org/sleuthkit/datamodel/blackboardutils/GeoArtifactsHelper.java
index 4e51e3681cce21334c23aca8f1942fb370e4afc2..06f5f0670b0c6dd1597f9185fdc8ddb10d448c6f 100755
--- a/bindings/java/src/org/sleuthkit/datamodel/blackboardutils/GeoArtifactsHelper.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/blackboardutils/GeoArtifactsHelper.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit Data Model
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -20,7 +20,7 @@
 
 import java.util.ArrayList;
 import java.util.List;
-import org.sleuthkit.datamodel.AbstractFile;
+import java.util.Optional;
 import org.sleuthkit.datamodel.Blackboard.BlackboardException;
 import org.sleuthkit.datamodel.BlackboardArtifact;
 import org.sleuthkit.datamodel.BlackboardAttribute;
@@ -33,24 +33,23 @@
 import org.sleuthkit.datamodel.blackboardutils.attributes.GeoAreaPoints;
 
 /**
- * An artifact creation helper that adds geolocation artifacts to the case
- * database.
+ * A class that helps modules to create geolocation artifacts.
  */
 public final class GeoArtifactsHelper extends ArtifactHelperBase {
 
 	private static final BlackboardAttribute.Type WAYPOINTS_ATTR_TYPE = new BlackboardAttribute.Type(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_GEO_WAYPOINTS);
 	private static final BlackboardAttribute.Type TRACKPOINTS_ATTR_TYPE = new BlackboardAttribute.Type(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_GEO_TRACKPOINTS);
 	private static final BlackboardAttribute.Type AREAPOINTS_ATTR_TYPE = new BlackboardAttribute.Type(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_GEO_AREAPOINTS);
-	
+
 	private static final BlackboardArtifact.Type GPS_TRACK_TYPE = new BlackboardArtifact.Type(BlackboardArtifact.ARTIFACT_TYPE.TSK_GPS_TRACK);
 	private static final BlackboardArtifact.Type GPS_ROUTE_TYPE = new BlackboardArtifact.Type(BlackboardArtifact.ARTIFACT_TYPE.TSK_GPS_ROUTE);
 	private static final BlackboardArtifact.Type GPS_AREA_TYPE = new BlackboardArtifact.Type(BlackboardArtifact.ARTIFACT_TYPE.TSK_GPS_AREA);
-	
+
 	private final String programName;
 
 	/**
-	 * Constructs an artifact creation helper that adds geolocation artifacts to
-	 * the case database.
+	 * Constructs an instance of a class that helps modules to create
+	 * geolocation artifacts.
 	 *
 	 * @param caseDb      The case database.
 	 * @param moduleName  The name of the module creating the artifacts.
@@ -59,14 +58,37 @@ public final class GeoArtifactsHelper extends ArtifactHelperBase {
 	 *                    null. If a program name is supplied, it will be added
 	 *                    to each artifact that is created as a TSK_PROG_NAME
 	 *                    attribute.
-	 * @param srcContent  The source content for the artifacts, i.e., either a
-	 *                    file within a data source or a data source.
+	 * @param srcContent  The source/parent content of the artifacts.
+	 * @param ingestJobId The numeric identifier of the ingest job within which
+	 *                    the artifacts are being created, may be null.
 	 */
-	public GeoArtifactsHelper(SleuthkitCase caseDb, String moduleName, String programName, Content srcContent) {
-		super(caseDb, moduleName, srcContent);
+	public GeoArtifactsHelper(SleuthkitCase caseDb, String moduleName, String programName, Content srcContent, Long ingestJobId) {
+		super(caseDb, moduleName, srcContent, ingestJobId);
 		this.programName = programName;
 	}
 
+	/**
+	 * Constructs an instance of a class that helps modules to create
+	 * geolocation artifacts.
+	 *
+	 * @param caseDb      The case database.
+	 * @param moduleName  The name of the module creating the artifacts.
+	 * @param programName The name of the user application associated with the
+	 *                    geolocation data to be recorded as artifacts, may be
+	 *                    null. If a program name is supplied, it will be added
+	 *                    to each artifact that is created as a TSK_PROG_NAME
+	 *                    attribute.
+	 * @param srcContent  The source/parent content of the artifacts.
+	 *
+	 * @deprecated Use GeoArtifactsHelper(SleuthkitCase caseDb, String
+	 * moduleName, String programName, Content srcContent, Long ingestJobId)
+	 * instead.
+	 */
+	@Deprecated
+	public GeoArtifactsHelper(SleuthkitCase caseDb, String moduleName, String programName, Content srcContent) {
+		this(caseDb, moduleName, programName, srcContent, null);
+	}
+
 	/**
 	 * Adds a TSK_GPS_TRACK artifact to the case database. A Global Positioning
 	 * System (GPS) track artifact records the track, or path, of a GPS-enabled
@@ -114,7 +136,8 @@ public BlackboardArtifact addTrack(String trackName, GeoTrackPoints trackPoints,
 		Content content = getContent();
 		BlackboardArtifact artifact = content.newDataArtifact(GPS_TRACK_TYPE, attributes);
 
-		getSleuthkitCase().getBlackboard().postArtifact(artifact, getModuleName());
+		Optional<Long> ingestJobId = getIngestJobId();
+		getSleuthkitCase().getBlackboard().postArtifact(artifact, getModuleName(), ingestJobId.orElse(null));
 
 		return artifact;
 	}
@@ -171,8 +194,9 @@ public BlackboardArtifact addRoute(String routeName, Long creationTime, GeoWaypo
 
 		Content content = getContent();
 		BlackboardArtifact artifact = content.newDataArtifact(GPS_ROUTE_TYPE, attributes);
-		
-		getSleuthkitCase().getBlackboard().postArtifact(artifact, getModuleName());
+
+		Optional<Long> ingestJobId = getIngestJobId();
+		getSleuthkitCase().getBlackboard().postArtifact(artifact, getModuleName(), ingestJobId.orElse(null));
 
 		return artifact;
 	}
@@ -219,8 +243,9 @@ public BlackboardArtifact addArea(String areaName, GeoAreaPoints areaPoints, Lis
 
 		Content content = getContent();
 		BlackboardArtifact artifact = content.newDataArtifact(GPS_AREA_TYPE, attributes);
-		
-		getSleuthkitCase().getBlackboard().postArtifact(artifact, getModuleName());
+
+		Optional<Long> ingestJobId = getIngestJobId();
+		getSleuthkitCase().getBlackboard().postArtifact(artifact, getModuleName(), ingestJobId.orElse(null));
 
 		return artifact;
 	}
diff --git a/bindings/java/src/org/sleuthkit/datamodel/blackboardutils/WebBrowserArtifactsHelper.java b/bindings/java/src/org/sleuthkit/datamodel/blackboardutils/WebBrowserArtifactsHelper.java
index 28f9b1b21d487db5463ab3f02f71891126d22576..704f1191808ad82d01c43f17bafb946732fb6934 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/blackboardutils/WebBrowserArtifactsHelper.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/blackboardutils/WebBrowserArtifactsHelper.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit Data Model
  *
- * Copyright 2019-2020 Basis Technology Corp.
+ * Copyright 2019-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -23,11 +23,11 @@
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.Optional;
 import java.util.StringTokenizer;
 import java.util.logging.Level;
 import java.util.logging.Logger;
 import org.apache.commons.lang3.StringUtils;
-import org.sleuthkit.datamodel.AbstractFile;
 import org.sleuthkit.datamodel.Account;
 import org.sleuthkit.datamodel.Blackboard.BlackboardException;
 import org.sleuthkit.datamodel.BlackboardArtifact;
@@ -39,11 +39,8 @@
 import org.sleuthkit.datamodel.TskCoreException;
 
 /**
- * Class to help ingest modules create Web Browser artifacts.
- *
- * These include bookmarks, cookies, downloads, history, and web form
- * autofill data.
- *
+ * A class that helps modules to create web browser artifacts: bookmarks,
+ * cookies, downloads, history, and web form address and autofill data.
  */
 public final class WebBrowserArtifactsHelper extends ArtifactHelperBase {
 
@@ -54,17 +51,37 @@ public final class WebBrowserArtifactsHelper extends ArtifactHelperBase {
 	private static final BlackboardArtifact.Type WEB_FORM_ADDRESS_TYPE = new BlackboardArtifact.Type(BlackboardArtifact.ARTIFACT_TYPE.TSK_WEB_FORM_ADDRESS);
 	private static final BlackboardArtifact.Type WEB_FORM_AUTOFILL_TYPE = new BlackboardArtifact.Type(BlackboardArtifact.ARTIFACT_TYPE.TSK_WEB_FORM_AUTOFILL);
 	private static final BlackboardArtifact.Type WEB_HISTORY_TYPE = new BlackboardArtifact.Type(BlackboardArtifact.ARTIFACT_TYPE.TSK_WEB_HISTORY);
-		
+
 	/**
-	 * Creates a WebBrowserArtifactsHelper.
+	 * Constructs an instance of a class that helps modules to create web
+	 * browser artifacts: bookmarks, cookies, downloads, history, and web form
+	 * address and autofill data.
+	 *
+	 * @param caseDb      The case database.
+	 * @param moduleName  The name of the module creating the artifacts.
+	 * @param srcContent  The source/parent content of the artifacts.
+	 * @param ingestJobId The numeric identifier of the ingest job within which
+	 *                    the artifacts are being created, may be null.
+	 */
+	public WebBrowserArtifactsHelper(SleuthkitCase caseDb, String moduleName, Content srcContent, Long ingestJobId) {
+		super(caseDb, moduleName, srcContent, ingestJobId);
+	}
+
+	/**
+	 * Constructs an instance of a class that helps modules to create web
+	 * browser artifacts: bookmarks, cookies, downloads, history, and web form
+	 * address and autofill data.
 	 *
-	 * @param caseDb     Sleuthkit case db.
-	 * @param moduleName Name of module using the helper.
-	 * @param srcContent Source content being processed by the module.
+	 * @param caseDb     The case database.
+	 * @param moduleName The name of the module creating the artifacts.
+	 * @param srcContent The source/parent content of the artifacts.
 	 *
+	 * @deprecated Use WebBrowserArtifactsHelper(SleuthkitCase caseDb, String
+	 * moduleName, Content srcContent, Long ingestJobId) instead.
 	 */
+	@Deprecated
 	public WebBrowserArtifactsHelper(SleuthkitCase caseDb, String moduleName, Content srcContent) {
-		super(caseDb, moduleName, srcContent);
+		this(caseDb, moduleName, srcContent, null);
 	}
 
 	/**
@@ -116,12 +133,13 @@ public BlackboardArtifact addWebBookmark(String url, String title, long creation
 
 		// add attributes to artifact
 		attributes.addAll(otherAttributesList);
-		
+
 		Content content = getContent();
 		BlackboardArtifact bookMarkArtifact = content.newDataArtifact(WEB_BOOKMARK_TYPE, attributes);
 
 		// post artifact 
-		getSleuthkitCase().getBlackboard().postArtifact(bookMarkArtifact, getModuleName());
+		Optional<Long> ingestJobId = getIngestJobId();
+		getSleuthkitCase().getBlackboard().postArtifact(bookMarkArtifact, getModuleName(), ingestJobId.orElse(null));
 
 		// return the artifact
 		return bookMarkArtifact;
@@ -185,12 +203,13 @@ public BlackboardArtifact addWebCookie(String url,
 
 		// add attributes to artifact
 		attributes.addAll(otherAttributesList);
-		
+
 		Content content = getContent();
 		BlackboardArtifact cookieArtifact = content.newDataArtifact(WEB_COOKIE_TYPE, attributes);
-		
+
 		// post artifact 
-		getSleuthkitCase().getBlackboard().postArtifact(cookieArtifact, getModuleName());
+		Optional<Long> ingestJobId = getIngestJobId();
+		getSleuthkitCase().getBlackboard().postArtifact(cookieArtifact, getModuleName(), ingestJobId.orElse(null));
 
 		// return the artifact
 		return cookieArtifact;
@@ -244,19 +263,20 @@ public BlackboardArtifact addWebDownload(String url, long startTime, String path
 
 		// add attributes to artifact
 		attributes.addAll(otherAttributesList);
-		
+
 		Content content = getContent();
 		BlackboardArtifact webDownloadArtifact = content.newDataArtifact(WEB_DOWNLOAD_TYPE, attributes);
-		
+
 		// post artifact 
-		getSleuthkitCase().getBlackboard().postArtifact(webDownloadArtifact, getModuleName());
+		Optional<Long> ingestJobId = getIngestJobId();
+		getSleuthkitCase().getBlackboard().postArtifact(webDownloadArtifact, getModuleName(), ingestJobId.orElse(null));
 
 		// return the artifact
 		return webDownloadArtifact;
 	}
 
 	/**
-	 * Adds a TSK_WEB_FORM_AUTOFILL artifact.
+	 * Adds a TSK_WEB_FORM_ADDRESS artifact.
 	 *
 	 * @param personName     Person name, required.
 	 * @param email          Email address, may be empty or null.
@@ -302,25 +322,26 @@ public BlackboardArtifact addWebFormAddress(String personName, String email,
 			Collection<BlackboardAttribute> otherAttributesList) throws TskCoreException, BlackboardException {
 
 		Collection<BlackboardAttribute> attributes = new ArrayList<>();
-		
+
 		CommunicationsManager commManager = this.getSleuthkitCase().getCommunicationsManager();
-		
+
+		Optional<Long> ingestJobId = getIngestJobId();
 		if (StringUtils.isNotEmpty(email)) {
 			try {
-			commManager.createAccountFileInstance(Account.Type.EMAIL, email, this.getModuleName(), this.getContent());
+				commManager.createAccountFileInstance(Account.Type.EMAIL, email, this.getModuleName(), this.getContent(), null, ingestJobId.orElse(null));
 			} catch (InvalidAccountIDException ex) {
 				LOGGER.log(Level.WARNING, String.format("Invalid account identifier %s", email), ex);
 			}
 		}
 
-		if(StringUtils.isNotEmpty(phoneNumber)) {
+		if (StringUtils.isNotEmpty(phoneNumber)) {
 			try {
-			commManager.createAccountFileInstance(Account.Type.PHONE, phoneNumber, this.getModuleName(), this.getContent());
+				commManager.createAccountFileInstance(Account.Type.PHONE, phoneNumber, this.getModuleName(), this.getContent(), null, ingestJobId.orElse(null));
 			} catch (InvalidAccountIDException ex) {
 				LOGGER.log(Level.WARNING, String.format("Invalid account identifier %s", phoneNumber), ex);
 			}
 		}
-		
+
 		// construct attributes 
 		attributes.add(new BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_NAME, getModuleName(), personName));
 
@@ -335,9 +356,9 @@ public BlackboardArtifact addWebFormAddress(String personName, String email,
 		// add artifact
 		Content content = getContent();
 		BlackboardArtifact webFormAddressArtifact = content.newDataArtifact(WEB_FORM_ADDRESS_TYPE, attributes);
-		
+
 		// post artifact 
-		getSleuthkitCase().getBlackboard().postArtifact(webFormAddressArtifact, getModuleName());
+		getSleuthkitCase().getBlackboard().postArtifact(webFormAddressArtifact, getModuleName(), ingestJobId.orElse(null));
 
 		// return the artifact
 		return webFormAddressArtifact;
@@ -383,7 +404,7 @@ public BlackboardArtifact addWebFormAutofill(String name, String value,
 	public BlackboardArtifact addWebFormAutofill(String name, String value,
 			long creationTime, long accessTime, int count,
 			Collection<BlackboardAttribute> otherAttributesList) throws TskCoreException, BlackboardException {
-		
+
 		Collection<BlackboardAttribute> attributes = new ArrayList<>();
 
 		// construct attributes 
@@ -399,9 +420,10 @@ public BlackboardArtifact addWebFormAutofill(String name, String value,
 
 		Content content = getContent();
 		BlackboardArtifact webFormAutofillArtifact = content.newDataArtifact(WEB_FORM_AUTOFILL_TYPE, attributes);
-		
+
 		// post artifact 
-		getSleuthkitCase().getBlackboard().postArtifact(webFormAutofillArtifact, getModuleName());
+		Optional<Long> ingestJobId = getIngestJobId();
+		getSleuthkitCase().getBlackboard().postArtifact(webFormAutofillArtifact, getModuleName(), ingestJobId.orElse(null));
 
 		// return the artifact
 		return webFormAutofillArtifact;
@@ -410,12 +432,12 @@ public BlackboardArtifact addWebFormAutofill(String name, String value,
 	/**
 	 * Adds a Web History artifact.
 	 *
-	 * @param url          Url visited, required.
-	 * @param accessTime   Last access time, may be 0 if not available.
-	 * @param referrer     Referrer, may be empty or null.
-	 * @param title        Website title, may be empty or null.
-	 * @param programName  Application/program recording the history, may be
-	 *                     empty or null.
+	 * @param url         Url visited, required.
+	 * @param accessTime  Last access time, may be 0 if not available.
+	 * @param referrer    Referrer, may be empty or null.
+	 * @param title       Website title, may be empty or null.
+	 * @param programName Application/program recording the history, may be
+	 *                    empty or null.
 	 *
 	 * @return Web history artifact created.
 	 *
@@ -462,12 +484,13 @@ public BlackboardArtifact addWebHistory(String url, long accessTime,
 
 		// add attributes to artifact
 		attributes.addAll(otherAttributesList);
-		
+
 		Content content = getContent();
 		BlackboardArtifact webHistoryArtifact = content.newDataArtifact(WEB_HISTORY_TYPE, attributes);
-		
+
 		// post artifact 
-		getSleuthkitCase().getBlackboard().postArtifact(webHistoryArtifact, getModuleName());
+		Optional<Long> ingestJobId = getIngestJobId();
+		getSleuthkitCase().getBlackboard().postArtifact(webHistoryArtifact, getModuleName(), ingestJobId.orElse(null));
 
 		// return the artifact
 		return webHistoryArtifact;
diff --git a/bindings/java/src/org/sleuthkit/datamodel/localization/lastupdated.properties b/bindings/java/src/org/sleuthkit/datamodel/localization/lastupdated.properties
index 8d365ea9d44c2e2da3f3bb1969d089e6d5c29bad..697a9a63b3bfb902bc30333961da84c3820a413d 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/localization/lastupdated.properties
+++ b/bindings/java/src/org/sleuthkit/datamodel/localization/lastupdated.properties
@@ -1,2 +1,2 @@
-#Thu Jul 01 12:01:30 UTC 2021
-bundles.ja.lastupdated=8e19cd639b4cbc45f216c427008de0afb2ccbe02
+#Thu Sep 30 10:23:46 UTC 2021
+bundles.ja.lastupdated=751fd41efbf6b9f6f9188615ab78a4cc999c4cbf
diff --git a/bindings/java/test/org/sleuthkit/datamodel/OsAccountTest.java b/bindings/java/test/org/sleuthkit/datamodel/OsAccountTest.java
index 7c14f3e124b799ad10e4d468b58166d0f3021974..b18826c9b745bdaad566dd51e4270b7be1dc0bcb 100644
--- a/bindings/java/test/org/sleuthkit/datamodel/OsAccountTest.java
+++ b/bindings/java/test/org/sleuthkit/datamodel/OsAccountTest.java
@@ -396,6 +396,87 @@ public void mergeRealmsTests() throws TskCoreException, OsAccountManager.NotUser
 		}
 	}
 	
+	@Test 
+	public void updateRealmAndMergeTests() throws TskCoreException, OsAccountManager.NotUserSIDException {
+		
+		/**
+		 * Test the scenario where an update of an account triggers an update of 
+		 * a realm and subsequent merge of realms and accounts.
+		 */
+		
+		Host host = caseDB.getHostManager().newHost("updateRealmAndMergeTestHost");
+		
+		
+		
+			// Step 1: create a local account with SID and user name
+			String ownerUid1 = "S-1-5-21-1182664808-117526782-2525957323-13395";
+			String realmName1 = null;
+			String loginName1 = "sandip";
+			
+			OsAccount osAccount1 = caseDB.getOsAccountManager().newWindowsOsAccount(ownerUid1, loginName1, realmName1, host, OsAccountRealm.RealmScope.LOCAL);
+			OsAccountRealm realm1 = caseDB.getOsAccountRealmManager().getRealmByRealmId(osAccount1.getRealmId());
+			
+			assertEquals(realm1.getRealmAddr().isPresent(), true);	// verify the realm has a SID
+			assertEquals(realm1.getRealmNames().isEmpty(), true);	// verify the realm has no name
+			
+			
+			// Step2: create a local account with domain name and username
+			String ownerUid2 = null;
+			String realmName2 = "CORP";
+			String loginName2 = "sandip";
+			
+			Optional<OsAccount> oOsAccount2 = caseDB.getOsAccountManager().getWindowsOsAccount(ownerUid2, loginName2, realmName2, host);
+			
+			// this account should not exists
+			assertEquals(oOsAccount2.isPresent(), false);
+			
+			// create a new account -  a new realm as there is nothing to tie it to realm1 
+			OsAccount osAccount2 = caseDB.getOsAccountManager().newWindowsOsAccount(ownerUid2, loginName2, realmName2, host, OsAccountRealm.RealmScope.LOCAL);
+			OsAccountRealm realm2 = caseDB.getOsAccountRealmManager().getRealmByRealmId(osAccount2.getRealmId());
+			
+			assertTrue(osAccount1.getId() != osAccount2.getId());
+			assertTrue(realm1.getRealmId() != realm2.getRealmId());
+			
+			
+			
+			// Step 3: now create/update the account with sid/domain/username
+			// this should return the existing account1, which needs to be updated.
+			String ownerUid3 = "S-1-5-21-1182664808-117526782-2525957323-13395";
+			String realmAddr3 = "S-1-5-21-1182664808-117526782-2525957323";
+			String loginName3 = "sandip";
+			String realmName3 = "CORP";
+			
+			Optional<OsAccount> oOsAccount3 = caseDB.getOsAccountManager().getWindowsOsAccount(ownerUid3, loginName3, realmName3, host);
+
+			assertTrue(oOsAccount3.isPresent());
+			
+            
+			// update the account so that its domain gets updated.
+			OsAccountManager.OsAccountUpdateResult updateResult = caseDB.getOsAccountManager().updateCoreWindowsOsAccountAttributes(oOsAccount3.get(), ownerUid3, loginName3, realmName3, host);
+			Optional<OsAccount> updatedAccount3 = updateResult.getUpdatedAccount();
+			assertTrue(updatedAccount3.isPresent());
+
+			// this should cause the realm1 to be updated - and then realm2 to be merged into realm1 
+			OsAccountRealm realm3 = caseDB.getOsAccountRealmManager().getRealmByRealmId(updatedAccount3.get().getRealmId());
+
+			assertTrue(realm3.getRealmId() == realm1.getRealmId());
+
+			assertTrue(realm3.getRealmAddr().isPresent());		// verify the realm gets an addr
+			assertTrue(realm3.getRealmAddr().get().equalsIgnoreCase(realmAddr3));
+			
+			assertTrue(realm3.getRealmNames().get(0).equalsIgnoreCase(realmName3));	// verify realm name.
+
+
+			// And now verify that the realm2 has been merged into realm1. 
+			OsAccountRealm realm22 = caseDB.getOsAccountRealmManager().getRealmByRealmId(osAccount2.getRealmId());
+			assertTrue(realm22.getDbStatus() == OsAccountRealm.RealmDbStatus.MERGED);
+
+			//and account2 has been merged into account1
+			OsAccount osAccount22 = caseDB.getOsAccountManager().getOsAccountByObjectId(osAccount2.getId());
+			assertTrue(osAccount22.getOsAccountDbStatus() == OsAccount.OsAccountDbStatus.MERGED);
+				
+	}
+	
 	@Test 
 	public void hostAddressTests() throws TskCoreException {
 		
@@ -440,10 +521,39 @@ public void hostAddressTests() throws TskCoreException {
 		assertEquals(hostAddr.getAddress().equalsIgnoreCase(hostnameStr), true);
 		assertEquals(HostAddress.HostAddressType.HOSTNAME.equals(hostAddr.getAddressType()), true);
 		
+		// Test some IPV6 addresses with zone/interface specifiers
+		String ipv6WithZoneStr1 = "fe80::1ff:fe23:4567:890a%eth2";
+		String ipv6WithZoneStr2 = "fe80::1ff:fe23:4567:890a%3";
+		String ipv6WithZoneStr3 = "fe80::1ff:fe23:4567:890a%12345";
+		String ipv6WithoutZoneStr = "fe80::1ff:fe23:4567:890a";
+		
+		HostAddress addr3 = caseDB.getHostAddressManager().newHostAddress(HostAddress.HostAddressType.DNS_AUTO, ipv6WithZoneStr1);
+		assertEquals(addr3.getAddress().equalsIgnoreCase(ipv6WithoutZoneStr), true);
+		assertEquals(HostAddress.HostAddressType.IPV6.equals(addr3.getAddressType()), true);
+		
+		HostAddress addr4 = caseDB.getHostAddressManager().newHostAddress(HostAddress.HostAddressType.DNS_AUTO, ipv6WithZoneStr2);
+		assertEquals(addr4.getAddress().equalsIgnoreCase(ipv6WithoutZoneStr), true);
+		assertEquals(HostAddress.HostAddressType.IPV6.equals(addr4.getAddressType()), true);
+		
+		
 		// Test get
 		Optional<HostAddress> addr4opt = caseDB.getHostAddressManager().getHostAddress(HostAddress.HostAddressType.IPV4, ipv4Str);
 		assertEquals(addr4opt.isPresent(), true);
 		
+		
+		// Test get on IPv6 Address with zone specifiers - they should all resolve to same address  - the one without the zone.
+		addr4opt = caseDB.getHostAddressManager().getHostAddress(HostAddress.HostAddressType.DNS_AUTO, ipv6WithZoneStr1);
+		assertEquals(addr4opt.isPresent(), true);
+		
+		addr4opt = caseDB.getHostAddressManager().getHostAddress(HostAddress.HostAddressType.DNS_AUTO, ipv6WithZoneStr2);
+		assertEquals(addr4opt.isPresent(), true);
+		
+		addr4opt = caseDB.getHostAddressManager().getHostAddress(HostAddress.HostAddressType.DNS_AUTO, ipv6WithZoneStr3);
+		assertEquals(addr4opt.isPresent(), true);
+		
+		addr4opt = caseDB.getHostAddressManager().getHostAddress(HostAddress.HostAddressType.DNS_AUTO, ipv6WithoutZoneStr);
+		assertEquals(addr4opt.isPresent(), true);
+				
 		// Test host map
 		Host host = caseDB.getHostManager().newHost("TestHostAddress");
 		
@@ -584,6 +694,15 @@ public void basicOsAccountTests() throws TskCoreException, OsAccountManager.NotU
 			assertEquals(osAccount11.getLoginName().orElse("").equalsIgnoreCase(loginName1), true);	
 			
 			
+			// try and get the account with null sid & login name.  It should use the login name to find the account
+			Optional<OsAccount> osAccount21 =  caseDB.getOsAccountManager().getWindowsOsAccount("S-1-0-0", loginName1, realmName1, host1);
+			
+			assertTrue(osAccount21.isPresent());
+			assertEquals(osAccount21.get().getAddr().orElse("").equalsIgnoreCase(ownerUid1), true);	
+			assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(osAccount21.get().getRealmId()).getRealmNames().get(0).equalsIgnoreCase(realmName1), true);
+			assertEquals(osAccount21.get().getLoginName().orElse("").equalsIgnoreCase(loginName1), true);	
+			
+			
 			// Let's update osAccount1
 			String fullName1 = "Johnny Depp";
 			Long creationTime1 = 1611858618L;
@@ -657,14 +776,13 @@ public void windowsSpecialAccountTests() throws TskCoreException, OsAccountManag
 
 		try {
 			
-			String SPECIAL_WINDOWS_REALM_ADDR = "SPECIAL_WINDOWS_ACCOUNTS";
-			
-			
 			// TEST create accounts with special SIDs on host2
 			{
 				String hostname2 = "host222";
 				Host host2 = caseDB.getHostManager().newHost(hostname2);
 
+				String ntAuthorityRealmAddr = "S-1-5";
+				
 				String specialSid1 = "S-1-5-18";
 				String specialSid2 = "S-1-5-19";
 				String specialSid3 = "S-1-5-20";
@@ -673,9 +791,9 @@ public void windowsSpecialAccountTests() throws TskCoreException, OsAccountManag
 				OsAccount specialAccount2 = caseDB.getOsAccountManager().newWindowsOsAccount(specialSid2, null, null, host2, OsAccountRealm.RealmScope.UNKNOWN);
 				OsAccount specialAccount3 = caseDB.getOsAccountManager().newWindowsOsAccount(specialSid3, null, null, host2, OsAccountRealm.RealmScope.UNKNOWN);
 
-				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount1.getRealmId()).getRealmAddr().orElse("").equalsIgnoreCase(SPECIAL_WINDOWS_REALM_ADDR), true);
-				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount2.getRealmId()).getRealmAddr().orElse("").equalsIgnoreCase(SPECIAL_WINDOWS_REALM_ADDR), true);
-				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount3.getRealmId()).getRealmAddr().orElse("").equalsIgnoreCase(SPECIAL_WINDOWS_REALM_ADDR), true);
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount1.getRealmId()).getRealmAddr().orElse("").equalsIgnoreCase(ntAuthorityRealmAddr), true);
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount2.getRealmId()).getRealmAddr().orElse("").equalsIgnoreCase(ntAuthorityRealmAddr), true);
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount3.getRealmId()).getRealmAddr().orElse("").equalsIgnoreCase(ntAuthorityRealmAddr), true);
 			}
 			
 			
@@ -684,22 +802,130 @@ public void windowsSpecialAccountTests() throws TskCoreException, OsAccountManag
 				String hostname3 = "host333";
 				Host host3 = caseDB.getHostManager().newHost(hostname3);
 
-				String specialSid1 = "S-1-5-18";
-				String specialSid2 = "S-1-5-19";
-				String specialSid3 = "S-1-5-20";
-
-				OsAccount specialAccount1 = caseDB.getOsAccountManager().newWindowsOsAccount(specialSid1, null, null, host3, OsAccountRealm.RealmScope.UNKNOWN);
-				OsAccount specialAccount2 = caseDB.getOsAccountManager().newWindowsOsAccount(specialSid2, null, null, host3, OsAccountRealm.RealmScope.UNKNOWN);
-				OsAccount specialAccount3 = caseDB.getOsAccountManager().newWindowsOsAccount(specialSid3, null, null, host3, OsAccountRealm.RealmScope.UNKNOWN);
-
-				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount1.getRealmId()).getRealmAddr().orElse("").equalsIgnoreCase(SPECIAL_WINDOWS_REALM_ADDR), true);
-				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount2.getRealmId()).getRealmAddr().orElse("").equalsIgnoreCase(SPECIAL_WINDOWS_REALM_ADDR), true);
-				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount3.getRealmId()).getRealmAddr().orElse("").equalsIgnoreCase(SPECIAL_WINDOWS_REALM_ADDR), true);
+				String ntAuthorityRealmName = "NT AUTHORITY";
+				String ntAuthorityRealmAddr = "S-1-5";
+				
+				
+				String specialSid1518 = "S-1-5-18";
+				
+				// Create an account with just the well known SID - it should automatically get a realmname and loginName
+				// each of these well known SIDs should get their own realm with the SID as the realm addr
+				OsAccount specialAccount1 = caseDB.getOsAccountManager().newWindowsOsAccount(specialSid1518, null, null, host3, OsAccountRealm.RealmScope.UNKNOWN);
+				
+				// the realm address for this wellknown SIDS are the SIDs themselves. 
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount1.getRealmId()).getRealmAddr().orElse("").equalsIgnoreCase(ntAuthorityRealmAddr), true);
 				
 				// verify a new local realm with host3 was created for these account even they've been seen previously on another host
 				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount1.getRealmId()).getScopeHost().orElse(null).getName().equalsIgnoreCase(hostname3), true);
-				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount1.getRealmId()).getScopeHost().orElse(null).getName().equalsIgnoreCase(hostname3), true);
-				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount1.getRealmId()).getScopeHost().orElse(null).getName().equalsIgnoreCase(hostname3), true);
+				
+				// default realm name for these three special SIDs is 'NT AUTHORITY' 
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount1.getRealmId()).getRealmNames().get(0).equalsIgnoreCase(ntAuthorityRealmName), true);
+				
+					
+				
+				// Get the same account in another language - "NT instans"/SYSTEM - it should resolve to the account create above. 
+				Optional<OsAccount> optSpecialAccount11 = caseDB.getOsAccountManager().getWindowsOsAccount(null, "SYSTEM", "NT instans", host3);
+				assertEquals(optSpecialAccount11.isPresent(), true);
+				
+				OsAccount specialAccount11 = optSpecialAccount11.get();
+				assertEquals(specialAccount11.getId(), specialAccount1.getId());	// should return the same account we created above
+				
+				// should have the same SID
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount11.getRealmId()).getRealmAddr().orElse("").equalsIgnoreCase(ntAuthorityRealmAddr), true);
+				
+				// should have the english realm name
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount11.getRealmId()).getRealmNames().get(0).equalsIgnoreCase(ntAuthorityRealmName), true);
+			
+				
+				
+				// Test getting account in yet another language - AUTORITE NT/SYST�ME
+				Optional<OsAccount> optSpecialAccount12 = caseDB.getOsAccountManager().getWindowsOsAccount(null, "SYST�ME", "AUTORITE NT", host3);
+				assertEquals(optSpecialAccount12.isPresent(), true);
+				
+				OsAccount specialAccount12 = optSpecialAccount12.get();
+				assertEquals(specialAccount11.getId(), specialAccount1.getId());	// should return the same account we created above
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount12.getRealmId()).getRealmAddr().orElse("").equalsIgnoreCase(ntAuthorityRealmAddr), true);
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount12.getRealmId()).getRealmNames().get(0).equalsIgnoreCase(ntAuthorityRealmName), true);
+				
+				
+			
+				//create new account with S-1-5-18/NT instans/SYSTEM - it should be resolved to the exiting account S-1-5-18/NT AUTHORITY/SYSTEM
+				OsAccount specialAccount13 = caseDB.getOsAccountManager().newWindowsOsAccount(specialSid1518, "SYSTEM", "NT instans", host3, OsAccountRealm.RealmScope.UNKNOWN);
+				
+				// Ensure it's the same realm as specialAccount1
+				assertEquals(specialAccount13.getRealmId(), specialAccount1.getRealmId());
+				
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount13.getRealmId()).getRealmAddr().orElse("").equalsIgnoreCase(ntAuthorityRealmAddr), true);
+				
+				// ensure that the name of the realm stays the well known english name - "NT AUTHORITY"
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount13.getRealmId()).getRealmNames().get(0).equalsIgnoreCase(ntAuthorityRealmName), true);
+				
+				
+			
+				// Test another well known SID
+				String specialSid1519 = "S-1-5-19";
+				String localServiceLoginName = "LOCAL SERVICE";
+				
+				// create account with SID and non english names S-1-5-19/SERVIZIO LOCALE/AUTORITE NT - it shuould be created anew with english names. 
+				OsAccount specialAccount2 = caseDB.getOsAccountManager().newWindowsOsAccount(specialSid1519, "SERVIZIO LOCALE", "AUTORITE NT", host3, OsAccountRealm.RealmScope.UNKNOWN);
+						
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount2.getRealmId()).getScopeHost().orElse(null).getName().equalsIgnoreCase(hostname3), true);
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount2.getRealmId()).getRealmNames().get(0).equalsIgnoreCase(ntAuthorityRealmName), true);
+				assertEquals(specialAccount2.getLoginName().get().equalsIgnoreCase(localServiceLoginName), true);
+				
+				
+				// now get account for NT INSTANS/LOKALER DIENST - it should just get the above account
+				Optional<OsAccount> optSpecialAccount21 = caseDB.getOsAccountManager().getWindowsOsAccount(null, "LOKALER DIENST", "NT INSTANS", host3);	
+				assertEquals(optSpecialAccount21.isPresent(), true);
+				
+				OsAccount specialAccount21 = optSpecialAccount21.get();
+			
+				// should be same account as one created above
+				assertEquals(specialAccount2.getId(), specialAccount21.getId());
+				assertEquals(specialAccount2.getRealmId(), specialAccount21.getRealmId());
+				
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount21.getRealmId()).getRealmAddr().orElse("").equalsIgnoreCase(ntAuthorityRealmAddr), true);
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount21.getRealmId()).getRealmNames().get(0).equalsIgnoreCase(ntAuthorityRealmName), true);
+				assertEquals(specialAccount21.getLoginName().get().equalsIgnoreCase(localServiceLoginName), true);
+				
+				
+				//---- 
+				String specialSid1520 = "S-1-5-20";
+				String networkServiceLoginName = "NETWORK SERVICE";
+				
+				//Test where we first create an account with realm/name only and then get it with SID alone.  What should happen in that case ???
+				OsAccount specialAccount3 = caseDB.getOsAccountManager().newWindowsOsAccount(null, "NETZWERKDIENST", "NT instans", host3, OsAccountRealm.RealmScope.UNKNOWN);
+						
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount3.getRealmId()).getScopeHost().orElse(null).getName().equalsIgnoreCase(hostname3), true);
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount3.getRealmId()).getRealmNames().get(0).equalsIgnoreCase(ntAuthorityRealmName), true);
+				assertEquals(specialAccount3.getLoginName().get().equalsIgnoreCase(networkServiceLoginName), true);
+				
+				
+				// Now get the account by correpsonding SID - it should resolve to the account created above. 
+				Optional<OsAccount> optSpecialAccount31 = caseDB.getOsAccountManager().getWindowsOsAccount(specialSid1520, null, null, host3);	
+				assertEquals(optSpecialAccount31.isPresent(), true);
+				
+				OsAccount specialAccount31 = optSpecialAccount31.get();
+				assertEquals(specialAccount3.getId(), specialAccount31.getId());
+				assertEquals(specialAccount3.getRealmId(), specialAccount31.getRealmId());
+				
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount31.getRealmId()).getRealmAddr().orElse("").equalsIgnoreCase(ntAuthorityRealmAddr), true);
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount31.getRealmId()).getScopeHost().orElse(null).getName().equalsIgnoreCase(hostname3), true);
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount31.getRealmId()).getRealmNames().get(0).equalsIgnoreCase(ntAuthorityRealmName), true);
+				assertEquals(specialAccount31.getLoginName().get().equalsIgnoreCase(networkServiceLoginName), true);
+				
+				// Try getting the account with realm/loginName in another language
+				Optional<OsAccount> optSpecialAccount32 = caseDB.getOsAccountManager().getWindowsOsAccount(null, "SERVICE R�SEAU", "AUTORITE NT", host3);	
+				assertEquals(optSpecialAccount32.isPresent(), true);
+				
+				OsAccount specialAccount32 = optSpecialAccount32.get();
+				assertEquals(specialAccount3.getId(), specialAccount32.getId());
+				assertEquals(specialAccount3.getRealmId(), specialAccount32.getRealmId());
+				
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount32.getRealmId()).getRealmAddr().orElse("").equalsIgnoreCase(ntAuthorityRealmAddr), true);
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount32.getRealmId()).getScopeHost().orElse(null).getName().equalsIgnoreCase(hostname3), true);
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount32.getRealmId()).getRealmNames().get(0).equalsIgnoreCase(ntAuthorityRealmName), true);
+				assertEquals(specialAccount32.getLoginName().get().equalsIgnoreCase(networkServiceLoginName), true);
 			}
 
 			
@@ -713,20 +939,52 @@ public void windowsSpecialAccountTests() throws TskCoreException, OsAccountManag
 				String specialSid3 = "S-1-5-90-0-2";
 				String specialSid4 = "S-1-5-96-0-3";
 				
-
+				// expected realm addresses for these special SIDs
+				String specialSidRealmAddr1 = "S-1-5-80";
+				String specialSidRealmAddr2 = "S-1-5-82";
+				String specialSidRealmAddr3 = "S-1-5-90";
+				String specialSidRealmAddr4 = "S-1-5-96";
+				
+				// All accounts in the range S-1-5-80 to S-1-5-111 are well known SIDS
+				String specialSid5 = "S-1-5-99-0-3";
+				String specialSid6 = "S-1-5-100-0-3";
+				String specialSid7 = "S-1-5-111-0-3";
+				String specialSid8 = "S-1-5-112-0-3"; // NOT SPECIAL SID
+				String specialSid9 = "S-1-5-79-0-3"; // NOT SPECIAL SID
+				
+				// expected realm addresses for these special SIDs
+				String specialSidRealmAddr5 = "S-1-5-99";
+				String specialSidRealmAddr6 = "S-1-5-100";
+				String specialSidRealmAddr7 = "S-1-5-111";
+				String specialSidRealmAddr8 = "S-1-5-112-0"; // NOT SPECIAL SID
+				String specialSidRealmAddr9 = "S-1-5-79-0"; // NOT SPECIAL SID
+				
+				
+				
 				OsAccount specialAccount1 = caseDB.getOsAccountManager().newWindowsOsAccount(specialSid1, null, null, host4, OsAccountRealm.RealmScope.UNKNOWN);
 				OsAccount specialAccount2 = caseDB.getOsAccountManager().newWindowsOsAccount(specialSid2, null, null, host4, OsAccountRealm.RealmScope.UNKNOWN);
 				OsAccount specialAccount3 = caseDB.getOsAccountManager().newWindowsOsAccount(specialSid3, null, null, host4, OsAccountRealm.RealmScope.UNKNOWN);
 				OsAccount specialAccount4 = caseDB.getOsAccountManager().newWindowsOsAccount(specialSid4, null, null, host4, OsAccountRealm.RealmScope.UNKNOWN);
 				
 
-				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount1.getRealmId()).getRealmAddr().orElse("").equalsIgnoreCase(SPECIAL_WINDOWS_REALM_ADDR), true);
-				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount2.getRealmId()).getRealmAddr().orElse("").equalsIgnoreCase(SPECIAL_WINDOWS_REALM_ADDR), true);
-				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount3.getRealmId()).getRealmAddr().orElse("").equalsIgnoreCase(SPECIAL_WINDOWS_REALM_ADDR), true);
-				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount4.getRealmId()).getRealmAddr().orElse("").equalsIgnoreCase(SPECIAL_WINDOWS_REALM_ADDR), true);
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount1.getRealmId()).getRealmAddr().orElse("").equalsIgnoreCase(specialSidRealmAddr1), true);
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount2.getRealmId()).getRealmAddr().orElse("").equalsIgnoreCase(specialSidRealmAddr2), true);
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount3.getRealmId()).getRealmAddr().orElse("").equalsIgnoreCase(specialSidRealmAddr3), true);
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount4.getRealmId()).getRealmAddr().orElse("").equalsIgnoreCase(specialSidRealmAddr4), true);
 				
 				
-			}
+				OsAccount specialAccount5 = caseDB.getOsAccountManager().newWindowsOsAccount(specialSid5, null, null, host4, OsAccountRealm.RealmScope.UNKNOWN);
+				OsAccount specialAccount6 = caseDB.getOsAccountManager().newWindowsOsAccount(specialSid6, null, null, host4, OsAccountRealm.RealmScope.UNKNOWN);
+				OsAccount specialAccount7 = caseDB.getOsAccountManager().newWindowsOsAccount(specialSid7, null, null, host4, OsAccountRealm.RealmScope.UNKNOWN);
+				OsAccount specialAccount8 = caseDB.getOsAccountManager().newWindowsOsAccount(specialSid8, null, null, host4, OsAccountRealm.RealmScope.UNKNOWN);
+				OsAccount specialAccount9 = caseDB.getOsAccountManager().newWindowsOsAccount(specialSid9, null, null, host4, OsAccountRealm.RealmScope.UNKNOWN);
+				
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount5.getRealmId()).getRealmAddr().orElse("").equalsIgnoreCase(specialSidRealmAddr5), true);
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount6.getRealmId()).getRealmAddr().orElse("").equalsIgnoreCase(specialSidRealmAddr6), true);
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount7.getRealmId()).getRealmAddr().orElse("").equalsIgnoreCase(specialSidRealmAddr7), true);
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount8.getRealmId()).getRealmAddr().orElse("").equalsIgnoreCase(specialSidRealmAddr8), true);  // specialSid8 is NOT special.
+				assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(specialAccount9.getRealmId()).getRealmAddr().orElse("").equalsIgnoreCase(specialSidRealmAddr9), true);  // specialSid9 is NOT special.
+		}
 			
 			// TEST: create accounts with a invalid user SIDs - these should generate an exception
 			{
@@ -767,6 +1025,34 @@ public void windowsSpecialAccountTests() throws TskCoreException, OsAccountManag
 					// continue
 				}
 
+				try {
+					// try to create account with NULL SID and null name - should fail. 
+					String sid4 = "S-1-0-0"; //  NULL SID
+					OsAccount osAccount4 = caseDB.getOsAccountManager().newWindowsOsAccount(sid4, null, realmName5, host5, OsAccountRealm.RealmScope.UNKNOWN);
+					
+					// above should raise an exception
+					assertEquals(true, false);
+				}
+				catch (TskCoreException ex) {
+					// continue
+				}
+				
+				try {
+					// try to create an account with "NULL SID" and valid login name. Should throw away the NULL SID And create account with login name.
+					String sid5 = "S-1-0-0"; //  NULL SID
+					String loginName5 = "login5";
+					OsAccount osAccount5 = caseDB.getOsAccountManager().newWindowsOsAccount(sid5, loginName5, realmName5, host5, OsAccountRealm.RealmScope.UNKNOWN);
+					
+					assertFalse(osAccount5.getAddr().isPresent());	// should NOT have a SID
+					assertEquals(caseDB.getOsAccountRealmManager().getRealmByRealmId(osAccount5.getRealmId()).getRealmNames().get(0).equalsIgnoreCase(realmName5), true);
+					assertEquals(osAccount5.getLoginName().orElse("").equalsIgnoreCase(loginName5), true);	
+			
+				}
+				catch (OsAccountManager.NotUserSIDException ex) {
+					// DO NOT EXPECT this exception to be thrown here. 
+					assertEquals(true, false);
+				}
+				
 			}
 		}
 		
@@ -790,11 +1076,34 @@ public void osAccountInstanceTests() throws TskCoreException, OsAccountManager.N
 		OsAccount osAccount1 = caseDB.getOsAccountManager().newWindowsOsAccount(ownerUid1, null, realmName1, host1, OsAccountRealm.RealmScope.LOCAL);
 
 		// Test: add an instance
-		caseDB.getOsAccountManager().newOsAccountInstance(osAccount1, image, OsAccountInstance.OsAccountInstanceType.LAUNCHED);
-
-		// Test: add an existing instance - should be a no-op.
-		caseDB.getOsAccountManager().newOsAccountInstance(osAccount1, image, OsAccountInstance.OsAccountInstanceType.LAUNCHED);
+		caseDB.getOsAccountManager().newOsAccountInstance(osAccount1, image, OsAccountInstance.OsAccountInstanceType.ACCESSED);
+		
+		// Verify 
+		List<OsAccountInstance> account1Instances = caseDB.getOsAccountManager().getOsAccountInstances(osAccount1);
+		assertEquals(account1Instances.size(), 1);
+		assertEquals(account1Instances.get(0).getInstanceType().getId(), OsAccountInstance.OsAccountInstanceType.ACCESSED.getId());
 
+		// Test: add an instance that already exists - with less significant instance type - this should be a no op.
+		caseDB.getOsAccountManager().newOsAccountInstance(osAccount1, image, OsAccountInstance.OsAccountInstanceType.REFERENCED); // since ACCESSED > REFERENCED - this should do nothing
+		account1Instances = caseDB.getOsAccountManager().getOsAccountInstances(osAccount1);
+		assertEquals(account1Instances.size(), 1);
+		assertEquals(account1Instances.get(0).getInstanceType().getId(), OsAccountInstance.OsAccountInstanceType.ACCESSED.getId());
+		
+		
+		// Test: add an instance that already exists - with more significant instance type - this update the existing instance.
+		caseDB.getOsAccountManager().newOsAccountInstance(osAccount1, image, OsAccountInstance.OsAccountInstanceType.LAUNCHED); // since LAUNCHED > ACCESSED - this should update the existing instance
+		account1Instances = caseDB.getOsAccountManager().getOsAccountInstances(osAccount1);
+		assertEquals(account1Instances.size(), 1);
+		assertEquals(account1Instances.get(0).getInstanceType().getId(), OsAccountInstance.OsAccountInstanceType.LAUNCHED.getId());
+		
+	
+		// Test: add an instance that already exists - with less significant instance type - should do nothing
+		caseDB.getOsAccountManager().newOsAccountInstance(osAccount1, image, OsAccountInstance.OsAccountInstanceType.REFERENCED); 
+		caseDB.getOsAccountManager().newOsAccountInstance(osAccount1, image, OsAccountInstance.OsAccountInstanceType.ACCESSED); 
+		account1Instances = caseDB.getOsAccountManager().getOsAccountInstances(osAccount1);
+		assertEquals(account1Instances.size(), 1);
+		assertEquals(account1Instances.get(0).getInstanceType().getId(), OsAccountInstance.OsAccountInstanceType.LAUNCHED.getId());
+		
 		// Test: create account instance on a new host
 		String hostname2 = "host2222";
 		Host host2 = caseDB.getHostManager().newHost(hostname2);
@@ -806,11 +1115,11 @@ public void osAccountInstanceTests() throws TskCoreException, OsAccountManager.N
 		
 		// TBD: perhaps add some files to the case and then use one of the files as the source of attributes.
 		
-		OsAccountAttribute attrib1 = osAccount1.new OsAccountAttribute(caseDB.getAttributeType(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_DATETIME_PASSWORD_RESET.getTypeID()), resetTime1, osAccount1, null, image);
+		OsAccountAttribute attrib1 = osAccount1.new OsAccountAttribute(caseDB.getBlackboard().getAttributeType(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_DATETIME_PASSWORD_RESET.getTypeID()), resetTime1, osAccount1, null, image);
 		accountAttributes.add(attrib1);
 		
 		String hint = "HINT";
-		OsAccountAttribute attrib2 = osAccount1.new OsAccountAttribute(caseDB.getAttributeType(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_PASSWORD_HINT.getTypeID()), hint, osAccount1, host2, image);
+		OsAccountAttribute attrib2 = osAccount1.new OsAccountAttribute(caseDB.getBlackboard().getAttributeType(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_PASSWORD_HINT.getTypeID()), hint, osAccount1, host2, image);
 		accountAttributes.add(attrib2);
 		
 		// add attributes to account.
@@ -965,11 +1274,43 @@ public void windowsAccountUpdateTests() throws TskCoreException, OsAccountManage
 		assertTrue(updatedAccount2.getLoginName().orElse("").equalsIgnoreCase(loginname2));
 		assertTrue(updatedAccount2.getSignature().equalsIgnoreCase(ownerUid2));	// account signature should now be addr
 		
-		// RAMAN TODO: CT-4284
-//		OsAccountRealm realm2 = caseDB.getOsAccountRealmManager().getRealmByRealmId(updatedAccount2.getRealmId());
-//		assertTrue(realm2.getRealmAddr().orElse("").equalsIgnoreCase(realmAddr1));
-//		assertTrue(realm2.getSignature().equalsIgnoreCase(realmSignature1));	
+		OsAccountRealm realm2 = caseDB.getOsAccountRealmManager().getRealmByRealmId(updatedAccount2.getRealmId());
+		assertTrue(realm2.getRealmAddr().orElse("").equalsIgnoreCase(realmAddr1));
+		assertTrue(realm2.getSignature().equalsIgnoreCase(realmSignature1));
 	}
 	
+	@Test
+	public void windowsAccountMergeTests() throws TskCoreException, OsAccountManager.NotUserSIDException {
+
+		String hostname1 = "windowsAccountMergeTestHost";
+		Host host1 = caseDB.getHostManager().newHost(hostname1);
 	
+		// 1. Create an account with a SID alone
+		String sid = "S-1-5-21-111111111-222222222-666666666-0001";
+		OsAccount osAccount1 = caseDB.getOsAccountManager().newWindowsOsAccount(sid, null, null, host1, OsAccountRealm.RealmScope.LOCAL);
+		
+		Long realmId = osAccount1.getRealmId();
+		
+		// 2. Create an account with loginName and realmName
+		String loginName = "jdoe";
+		String realmName = "testRealm";
+		OsAccount osAccount2 = caseDB.getOsAccountManager().newWindowsOsAccount(null, loginName, realmName, host1, OsAccountRealm.RealmScope.LOCAL);
+
+		// 3. Lookup account by SID, loginName, and realmName
+		Optional<OsAccount> oOsAccount = caseDB.getOsAccountManager().getWindowsOsAccount(sid, loginName, realmName, host1);
+		assertTrue(oOsAccount.isPresent());
+		
+		// 4. Update this account with all SID, loginName, and realmName
+		caseDB.getOsAccountManager().updateCoreWindowsOsAccountAttributes(oOsAccount.get(), sid, loginName, realmName, host1);
+		
+		// The two accounts should be merged
+		
+		// Test that there is now only one account associated with sid1
+		List<OsAccount> accounts = caseDB.getOsAccountManager().getOsAccounts().stream().filter(a -> a.getAddr().isPresent() && a.getAddr().get().equals(sid)).collect(Collectors.toList());
+		assertEquals(accounts.size() == 1, true);
+		
+		// Test that there is now only one account associated with loginName
+		accounts = caseDB.getOsAccountManager().getOsAccounts().stream().filter(p -> p.getLoginName().isPresent() && p.getLoginName().get().equals(loginName)).collect(Collectors.toList());
+		assertEquals(accounts.size() == 1, true);
+	}
 }
diff --git a/bindings/java/test/org/sleuthkit/datamodel/TimelineEventTypesTest.java b/bindings/java/test/org/sleuthkit/datamodel/TimelineEventTypesTest.java
index 4ae85e6b1d3eed21041682173965b1bfcc29f7a6..de4d2b3e04014c7bbf111fd8b273b6aa26a142c5 100644
--- a/bindings/java/test/org/sleuthkit/datamodel/TimelineEventTypesTest.java
+++ b/bindings/java/test/org/sleuthkit/datamodel/TimelineEventTypesTest.java
@@ -118,7 +118,7 @@ public void testArtifactAttributeEvents() {
 		mapping.put(ARTIFACT_TYPE.TSK_WIFI_NETWORK, EnumSet.of(ATTRIBUTE_TYPE.TSK_DATETIME));
 		mapping.put(ARTIFACT_TYPE.TSK_WEB_FORM_ADDRESS, EnumSet.of(ATTRIBUTE_TYPE.TSK_DATETIME_ACCESSED, ATTRIBUTE_TYPE.TSK_DATETIME_MODIFIED));
 		mapping.put(ARTIFACT_TYPE.TSK_METADATA_EXIF, EnumSet.of(ATTRIBUTE_TYPE.TSK_DATETIME_CREATED));
-		mapping.put(ARTIFACT_TYPE.TSK_WEB_COOKIE, EnumSet.of(ATTRIBUTE_TYPE.TSK_DATETIME_ACCESSED, ATTRIBUTE_TYPE.TSK_DATETIME_CREATED, ATTRIBUTE_TYPE.TSK_DATETIME_END));
+		mapping.put(ARTIFACT_TYPE.TSK_WEB_COOKIE, EnumSet.of(ATTRIBUTE_TYPE.TSK_DATETIME_ACCESSED, ATTRIBUTE_TYPE.TSK_DATETIME_CREATED));
 		mapping.put(ARTIFACT_TYPE.TSK_WEB_DOWNLOAD, EnumSet.of(ATTRIBUTE_TYPE.TSK_DATETIME_ACCESSED));
 		mapping.put(ARTIFACT_TYPE.TSK_TL_EVENT, EnumSet.of(ATTRIBUTE_TYPE.TSK_DATETIME));
 		mapping.put(ARTIFACT_TYPE.TSK_METADATA, EnumSet.of(ATTRIBUTE_TYPE.TSK_DATETIME_CREATED, ATTRIBUTE_TYPE.TSK_DATETIME_MODIFIED, ATTRIBUTE_TYPE.TSK_LAST_PRINTED_DATETIME));
diff --git a/case-uco/java/build.xml b/case-uco/java/build.xml
index 67eecd7d0ae56443246cbde9d9e60ec04172042e..d02f84bb5219e4d75e666b0448819a89ea25b0f5 100755
--- a/case-uco/java/build.xml
+++ b/case-uco/java/build.xml
@@ -1,32 +1,40 @@
 <?xml version="1.0" encoding="UTF-8"?>
-<project xmlns:ivy="antlib:org.apache.ivy.ant" name="SleuthkitCaseUco" default="default" basedir="." >
+<project xmlns:ivy="antlib:org.apache.ivy.ant" name="SleuthkitCaseUco" default="default" basedir=".">
     <description>Builds, tests, and runs the project SleuthkitCaseUco.</description>
     <import file="nbproject/build-impl.xml"/>
     <include file="../../bindings/java/build.xml" as="datamodel"/>
-    
+
     <!-- Hook into the compilation phase of the build process to ensure compile 
          time dependencies are present -->
-    <target name="-pre-compile" depends="get-ivy-dependencies, copy-sleuthkit-java-bindings-jar"
-            description="Resolves ivy dependencies before compilation">
-    </target>    
-    
+    <target name="-pre-compile" depends="get-ivy-dependencies, copy-sleuthkit-java-bindings-jar" description="Resolves ivy dependencies before compilation">
+    </target>
+
     <!-- Hook into the clean phase of the build process to ensure the lib
          folder is being cleared. -->
     <target name="-post-clean">
         <delete dir="lib" />
     </target>
-      
+
     <!-- Copy the sleuthkit jar into lib -->
     <target name="copy-sleuthkit-java-bindings-jar">
         <!-- VERSION here is being sourced from the build.xml in DataModel, which
              is actively maintained with the current TSK version -->
-        <copy file="../../bindings/java/dist/sleuthkit-${VERSION}.jar" 
-              tofile="lib\sleuthkit-${VERSION}.jar"/>
+        <copy file="../../bindings/java/dist/sleuthkit-${VERSION}.jar" tofile="lib\sleuthkit-${VERSION}.jar"/>
     </target>
-    
+
     <target name="get-ivy-dependencies" description="retrieve jar dependencies using ivy" depends="datamodel.init-ivy" unless="offline">
         <ivy:settings file="ivysettings.xml"/>
         <ivy:resolve/>
         <ivy:retrieve sync="true" pattern="lib/[artifact]-[revision](-[classifier]).[ext]"/>
     </target>
+
+    <target name="test-report" description="Runs the regression tests.">
+        <junit fork="on" haltonfailure="yes" dir=".">
+            <classpath>
+                <path path="${run.test.classpath}"/>
+            </classpath>
+            <formatter type="plain" usefile="false" />
+            <test name="org.sleuthkit.caseuco.TestSuite" />
+        </junit>
+    </target>
 </project>
diff --git a/case-uco/java/ivy.xml b/case-uco/java/ivy.xml
index 965825449307ec99ab8f2e7ba717e707b861fae5..9ad58745d3e89da7ae33575f4cf04b6e9e8555e3 100755
--- a/case-uco/java/ivy.xml
+++ b/case-uco/java/ivy.xml
@@ -2,6 +2,7 @@
     <info organisation="org.sleuthkit" module="SleuthkitCaseUco"/>
     <dependencies>		
         <dependency org="com.google.code.gson" name="gson" rev="2.8.5"/>
+        <dependency org="junit" name="junit" rev="4.12"/>
     </dependencies>
 </ivy-module>
 
diff --git a/case-uco/java/nbproject/project.properties b/case-uco/java/nbproject/project.properties
index 54f8132cb17d77668360f088672d856ae3a00ef9..cf6949fec42ecff63c68feffec45a069ee0ab6b7 100644
--- a/case-uco/java/nbproject/project.properties
+++ b/case-uco/java/nbproject/project.properties
@@ -35,14 +35,16 @@ dist.javadoc.dir=${dist.dir}/javadoc
 endorsed.classpath=
 excludes=
 file.reference.gson-2.8.5.jar=lib/gson-2.8.5.jar
-file.reference.sleuthkit-4.11.0.jar=lib/sleuthkit-4.11.0.jar
+file.reference.sleuthkit-4.12.1.jar=lib/sleuthkit-4.12.1.jar
+file.reference.junit-4.12.jar=lib/junit-4.12.jar
+file.reference.hamcrest-core-1.3.jar=lib/hamcrest-core-1.3.jar
 includes=**
 jar.archive.disabled=${jnlp.enabled}
 jar.compress=false
 jar.index=${jnlp.enabled}
 javac.classpath=\
     ${file.reference.gson-2.8.5.jar}:\
-${file.reference.sleuthkit-4.11.0.jar}
+${file.reference.sleuthkit-4.12.1.jar}
 # Space-separated list of extra javac options
 javac.compilerargs=-Xlint
 javac.deprecation=false
@@ -55,7 +57,9 @@ javac.source=1.8
 javac.target=1.8
 javac.test.classpath=\
     ${javac.classpath}:\
-    ${build.classes.dir}
+    ${build.classes.dir}:\
+    ${file.reference.junit-4.12.jar}:\
+    ${file.reference.hamcrest-core-1.3.jar}
 javac.test.modulepath=\
     ${javac.modulepath}
 javac.test.processorpath=\
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/Account.java b/case-uco/java/src/org/sleuthkit/caseuco/Account.java
index 974ba5d2ae3ead1123e5f80bc02953d746fe0d14..defc5924e5603c5470254a28611abf84c8c238c7 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/Account.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/Account.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -48,4 +48,16 @@ Account setOwner(Identity owner) {
         this.owner = owner.getId();
         return this;
     }
+
+    String getAccountType() {
+        return accountType;
+    }
+
+    String getAccountIdentifier() {
+        return accountIdentifier;
+    }
+
+    String getOwner() {
+        return owner;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/AccountAuthentication.java b/case-uco/java/src/org/sleuthkit/caseuco/AccountAuthentication.java
index fd140ccae9d613a103d260093cc9c93edf708d4a..ba8e1450ed9cc6c8f76f99459c505e8c7c7ed308 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/AccountAuthentication.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/AccountAuthentication.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -34,4 +34,10 @@ AccountAuthentication setPassword(String password) {
         this.password = password;
         return this;
     }
+
+    String getPassword() {
+        return password;
+    }
+    
+    
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/Action.java b/case-uco/java/src/org/sleuthkit/caseuco/Action.java
index 17ae1382fa43ab1fd4040b4911c3183d3e9a7025..4aa8db3e24c2fe0472bb8474b1aaf53a49448e71 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/Action.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/Action.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -40,4 +40,8 @@ Action setStartTime(Long startTime) {
 
         return this;
     }
+
+    String getStartTime() {
+        return startTime;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/ActionArgument.java b/case-uco/java/src/org/sleuthkit/caseuco/ActionArgument.java
index 6778375646387597870c91c43ff242cd4ef0646c..54bd565231b069287ec15c8f9e9f7a634732cb96 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/ActionArgument.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/ActionArgument.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -34,4 +34,8 @@ ActionArgument setArgumentName(String argumentName) {
         this.argumentName = argumentName;
         return this;
     }
+
+    String getArgumentName() {
+        return argumentName;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/Annotation.java b/case-uco/java/src/org/sleuthkit/caseuco/Annotation.java
index ef5e3bbfaa422901205b4ab8f56d8de746d97cb3..f5033209ffb3f74aeae33659df99b4311c63be91 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/Annotation.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/Annotation.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -48,4 +48,12 @@ Annotation addObject(String object) {
         this.object.add(object);
         return this;
     }
+
+    List<String> getTags() {
+        return tags;
+    }
+
+    List<String> getObject() {
+        return object;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/Application.java b/case-uco/java/src/org/sleuthkit/caseuco/Application.java
index 1443ff9e09d0dd635a649fb6d305d54b6c70332d..5f26c00da139f62a3a442725a583c2ff3d180aaf 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/Application.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/Application.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -55,4 +55,20 @@ Application setVersion(String version) {
         this.version = version;
         return this;
     }
+
+    String getApplicationIdentifier() {
+        return applicationIdentifier;
+    }
+
+    String getOperatingSystem() {
+        return operatingSystem;
+    }
+
+    Integer getNumberOfLaunches() {
+        return numberOfLaunches;
+    }
+
+    String getVersion() {
+        return version;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/ApplicationAccount.java b/case-uco/java/src/org/sleuthkit/caseuco/ApplicationAccount.java
index 1d63641b4c3ecc82be0bb30973d5dc8f938a11e1..279771e777a1a6e5b6ccf21f7c7363c38c62631d 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/ApplicationAccount.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/ApplicationAccount.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -34,4 +34,8 @@ ApplicationAccount setApplication(CyberItem application) {
         this.application = application.getId();
         return this;
     }
+
+    String getApplication() {
+        return application;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/Assertion.java b/case-uco/java/src/org/sleuthkit/caseuco/Assertion.java
index d214f76c8eb9954e90aee5950b6ed26a4138a313..ef8d20c379ca71adb9b32940702d6914f67c2918 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/Assertion.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/Assertion.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -34,4 +34,8 @@ Assertion setStatement(String statement) {
         this.statement = statement;
         return this;
     }
+
+    String getStatement() {
+        return statement;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/Attachment.java b/case-uco/java/src/org/sleuthkit/caseuco/Attachment.java
index e6edd3c6d849686aecbd8520216e12f59cb9e253..6b6ec4d8f3d7db1b680af193d02c8c2aa96aba57 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/Attachment.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/Attachment.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -34,4 +34,8 @@ Attachment setUrl(String url) {
         this.url = url;
         return this;
     }
+
+    String getUrl() {
+        return url;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/BrowserBookmark.java b/case-uco/java/src/org/sleuthkit/caseuco/BrowserBookmark.java
index 8e72ff5c27a7b791048507b1784ce5b6c1548598..7c524404a6b7c32b34b947306d3f243b0dc9cd75 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/BrowserBookmark.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/BrowserBookmark.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -41,4 +41,12 @@ BrowserBookmark setApplication(CyberItem application) {
         this.application = application.getId();
         return this;
     }
+
+    String getUrlTargeted() {
+        return urlTargeted;
+    }
+
+    String getApplication() {
+        return application;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/BrowserCookie.java b/case-uco/java/src/org/sleuthkit/caseuco/BrowserCookie.java
index 6fe32dc384de446506bac94f59d26e9e099b3fb8..4724609a33062e479bbac2aa6ea962f9ff4c2830 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/BrowserCookie.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/BrowserCookie.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -76,4 +76,28 @@ BrowserCookie setCookiePath(String cookiePath) {
         this.cookiePath = cookiePath;
         return this;
     }
+
+    String getCookieName() {
+        return cookieName;
+    }
+
+    String getAccessedTime() {
+        return accessedTime;
+    }
+
+    String getExpirationTime() {
+        return expirationTime;
+    }
+
+    String getCookieDomain() {
+        return cookieDomain;
+    }
+
+    String getApplication() {
+        return application;
+    }
+
+    String getCookiePath() {
+        return cookiePath;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/CalendarEntry.java b/case-uco/java/src/org/sleuthkit/caseuco/CalendarEntry.java
index 62561dfd6ec98b990cb17281c0a9d0b8a7c93041..8a776b9787e959ff505a6ee80a24d57ab494f260 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/CalendarEntry.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/CalendarEntry.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -62,4 +62,20 @@ CalendarEntry setStartTime(Long startTime) {
         }
         return this;
     }
+
+    String getEventType() {
+        return eventType;
+    }
+
+    String getStartTime() {
+        return startTime;
+    }
+
+    String getEndTime() {
+        return endTime;
+    }
+
+    String getLocation() {
+        return location;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/CaseUcoExporter.java b/case-uco/java/src/org/sleuthkit/caseuco/CaseUcoExporter.java
index dd21f2d494b887853d45cd6c0800bb4e3e0b70fb..2386ac04655126e05c6c47e96b76ea2003f4964c 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/CaseUcoExporter.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/CaseUcoExporter.java
@@ -85,6 +85,7 @@
 import org.sleuthkit.datamodel.VolumeSystem;
 import org.sleuthkit.datamodel.AbstractFile;
 import org.sleuthkit.datamodel.BlackboardArtifact;
+import static org.sleuthkit.datamodel.BlackboardArtifact.ARTIFACT_TYPE.TSK_INTERESTING_ITEM;
 import org.sleuthkit.datamodel.BlackboardAttribute;
 import org.sleuthkit.datamodel.TimelineEventType;
 import org.sleuthkit.datamodel.TskCoreException;
@@ -121,7 +122,7 @@ public class CaseUcoExporter {
      * Creates a default CaseUcoExporter.
      *
      * @param sleuthkitCase The sleuthkit case instance containing the data to
-     * be exported.
+     *                      be exported.
      */
     public CaseUcoExporter(SleuthkitCase sleuthkitCase) {
         this(sleuthkitCase, new Properties());
@@ -135,9 +136,9 @@ public CaseUcoExporter(SleuthkitCase sleuthkitCase) {
      * README.md file.
      *
      * @param sleuthkitCase The sleuthkit case instance containing the data to
-     * be exported.
-     * @param props Properties instance containing supported configuration
-     * parameters.
+     *                      be exported.
+     * @param props         Properties instance containing supported
+     *                      configuration parameters.
      */
     public CaseUcoExporter(SleuthkitCase sleuthkitCase, Properties props) {
         this.sleuthkitCase = sleuthkitCase;
@@ -153,7 +154,7 @@ public CaseUcoExporter(SleuthkitCase sleuthkitCase, Properties props) {
      * contain a URL).
      *
      * @param uuidService A custom UUID implementation, which will be used to
-     * generate @id values in all export methods.
+     *                    generate @id values in all export methods.
      *
      * @return reference to this, for chaining configuration method calls.
      */
@@ -196,6 +197,7 @@ public List<JsonElement> exportSleuthkitCase() throws TskCoreException {
      * Exports an AbstractFile instance to CASE.
      *
      * @param file AbstractFile instance to export
+     *
      * @return A collection of CASE JSON elements
      *
      * @throws TskCoreException If an error occurred during database access.
@@ -207,10 +209,11 @@ public List<JsonElement> exportAbstractFile(AbstractFile file) throws TskCoreExc
     /**
      * Exports an AbstractFile instance to CASE.
      *
-     * @param file AbstractFile instance to export
+     * @param file      AbstractFile instance to export
      * @param localPath The location of the file on secondary storage, somewhere
-     * other than the case. Example: local disk. This value will be ignored if
-     * null
+     *                  other than the case. Example: local disk. This value
+     *                  will be ignored if null
+     *
      * @return A collection of CASE JSON elements
      *
      * @throws TskCoreException If an error occurred during database access.
@@ -257,7 +260,9 @@ public List<JsonElement> exportAbstractFile(AbstractFile file, String localPath)
      * Exports a ContentTag instance to CASE.
      *
      * @param contentTag ContentTag instance to export
+     *
      * @return A collection of CASE JSON elements
+     *
      * @throws TskCoreException If an error occurred during database access.
      */
     public List<JsonElement> exportContentTag(ContentTag contentTag) throws TskCoreException {
@@ -276,7 +281,9 @@ public List<JsonElement> exportContentTag(ContentTag contentTag) throws TskCoreE
      * Exports a DataSource instance to CASE.
      *
      * @param dataSource DataSource instance to export
+     *
      * @return A collection of CASE JSON elements
+     *
      * @throws TskCoreException If an error occurred during database access.
      */
     public List<JsonElement> exportDataSource(DataSource dataSource) throws TskCoreException {
@@ -313,7 +320,9 @@ String getDataSourcePath(DataSource dataSource) {
      * Exports a FileSystem instance to CASE.
      *
      * @param fileSystem FileSystem instance to export
+     *
      * @return A collection of CASE JSON elements
+     *
      * @throws TskCoreException If an error occurred during database access.
      */
     public List<JsonElement> exportFileSystem(FileSystem fileSystem) throws TskCoreException {
@@ -335,6 +344,7 @@ public List<JsonElement> exportFileSystem(FileSystem fileSystem) throws TskCoreE
      * Exports a Pool instance to CASE.
      *
      * @param pool Pool instance to export
+     *
      * @return A collection of CASE JSON elements
      *
      * @throws TskCoreException If an error occurred during database access.
@@ -357,7 +367,9 @@ public List<JsonElement> exportPool(Pool pool) throws TskCoreException {
      * Exports a Volume instance to CASE.
      *
      * @param volume Volume instance to export
+     *
      * @return A collection of CASE JSON elements
+     *
      * @throws TskCoreException If an error occurred during database access.
      */
     public List<JsonElement> exportVolume(Volume volume) throws TskCoreException {
@@ -384,6 +396,7 @@ public List<JsonElement> exportVolume(Volume volume) throws TskCoreException {
      * Exports a VolumeSystem instance to CASE.
      *
      * @param volumeSystem VolumeSystem instance to export
+     *
      * @return A collection of CASE JSON elements
      *
      * @throws TskCoreException If an error occurred during database access.
@@ -406,15 +419,21 @@ public List<JsonElement> exportVolumeSystem(VolumeSystem volumeSystem) throws Ts
      * Exports a BlackboardArtifact instance to CASE.
      *
      * @param artifact BlackboardArtifact instance to export
+     *
      * @return A collection of CASE JSON elements
      *
-     * @throws TskCoreException If an error occurred during database access.
-     * @throws ContentNotExportableException if the content could not be
-     * exported, even in part, to CASE.
+     * @throws TskCoreException                            If an error occurred
+     *                                                     during database
+     *                                                     access.
+     * @throws ContentNotExportableException               if the content could
+     *                                                     not be exported, even
+     *                                                     in part, to CASE.
      * @throws BlackboardJsonAttrUtil.InvalidJsonException If a JSON valued
-     * attribute could not be correctly deserialized.
+     *                                                     attribute could not
+     *                                                     be correctly
+     *                                                     deserialized.
      */
-    @SuppressWarnings( "deprecation" )
+    @SuppressWarnings("deprecation")
     public List<JsonElement> exportBlackboardArtifact(BlackboardArtifact artifact) throws TskCoreException,
             ContentNotExportableException, BlackboardJsonAttrUtil.InvalidJsonException {
         List<JsonElement> output = new ArrayList<>();
@@ -440,8 +459,6 @@ public List<JsonElement> exportBlackboardArtifact(BlackboardArtifact artifact) t
             assembleHashsetHit(uuid, artifact, output);
         } else if (TSK_DEVICE_ATTACHED.getTypeID() == artifactTypeId) {
             assembleDeviceAttached(uuid, artifact, output);
-        } else if (TSK_INTERESTING_FILE_HIT.getTypeID() == artifactTypeId) {
-            assembleInterestingFileHit(uuid, artifact, output);
         } else if (TSK_EMAIL_MSG.getTypeID() == artifactTypeId) {
             assembleEmailMessage(uuid, artifact, output);
         } else if (TSK_EXTRACTED_TEXT.getTypeID() == artifactTypeId) {
@@ -478,8 +495,8 @@ public List<JsonElement> exportBlackboardArtifact(BlackboardArtifact artifact) t
             assembleProgRun(uuid, artifact, output);
         } else if (TSK_ENCRYPTION_DETECTED.getTypeID() == artifactTypeId) {
             assembleEncryptionDetected(uuid, artifact, output);
-        } else if (TSK_INTERESTING_ARTIFACT_HIT.getTypeID() == artifactTypeId) {
-            assembleInterestingArtifact(uuid, artifact, output);
+        } else if (TSK_INTERESTING_FILE_HIT.getTypeID() == artifactTypeId || TSK_INTERESTING_ARTIFACT_HIT.getTypeID() == artifactTypeId || TSK_INTERESTING_ITEM.getTypeID() == artifactTypeId) {
+            assembleInterestingItem(uuid, artifact, output);
         } else if (TSK_GPS_ROUTE.getTypeID() == artifactTypeId) {
             assembleGPSRoute(uuid, artifact, output);
         } else if (TSK_REMOTE_DRIVE.getTypeID() == artifactTypeId) {
@@ -688,13 +705,6 @@ private void assembleRecentObject(String uuid, BlackboardArtifact artifact, List
                 .setTarget(uuid), output);
     }
 
-    private void assembleInterestingFileHit(String uuid, BlackboardArtifact artifact, List<JsonElement> output) throws TskCoreException {
-        Assertion export = new Assertion(uuid);
-        export.setName(getValueIfPresent(artifact, StandardAttributeTypes.TSK_SET_NAME));
-        export.setStatement(getValueIfPresent(artifact, StandardAttributeTypes.TSK_COMMENT));
-        addToOutput(export, output);
-    }
-
     private void assembleExtractedText(String uuid, BlackboardArtifact artifact, List<JsonElement> output) throws TskCoreException {
         Trace export = new Trace(uuid)
                 .addBundle(new ExtractedString()
@@ -1133,11 +1143,10 @@ private void assembleEncryptionDetected(String uuid, BlackboardArtifact artifact
         addToOutput(export, output);
     }
 
-    private void assembleInterestingArtifact(String uuid, BlackboardArtifact artifact, List<JsonElement> output) throws TskCoreException {
+    private void assembleInterestingItem(String uuid, BlackboardArtifact artifact, List<JsonElement> output) throws TskCoreException {
         Assertion export = new Assertion(uuid);
         export.setName(getValueIfPresent(artifact, StandardAttributeTypes.TSK_SET_NAME));
         export.setStatement(getValueIfPresent(artifact, StandardAttributeTypes.TSK_COMMENT));
-
         Long associatedArtifactId = getLongIfPresent(artifact, StandardAttributeTypes.TSK_ASSOCIATED_ARTIFACT);
         if (associatedArtifactId != null) {
             BlackboardArtifact associatedArtifact = artifact.getSleuthkitCase().getBlackboardArtifact(associatedArtifactId);
@@ -1355,10 +1364,9 @@ private void assembleAssociatedObject(String uuid, BlackboardArtifact artifact,
         Trace export = new Trace(uuid);
         addToOutput(export, output);
 
-        BlackboardAttribute associatedArtifactID = artifact.getAttribute(StandardAttributeTypes.TSK_ASSOCIATED_ARTIFACT);
-        if (associatedArtifactID != null) {
-            long artifactID = associatedArtifactID.getValueLong();
-            BlackboardArtifact associatedArtifact = artifact.getSleuthkitCase().getArtifactByArtifactId(artifactID);
+        Long associatedArtifactId = getLongIfPresent(artifact, StandardAttributeTypes.TSK_ASSOCIATED_ARTIFACT);
+        if (associatedArtifactId != null) {
+            BlackboardArtifact associatedArtifact = artifact.getSleuthkitCase().getBlackboardArtifact(associatedArtifactId);
             if (associatedArtifact != null) {
                 addToOutput(new BlankRelationshipNode()
                         .setSource(uuid)
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/ComputerSpecification.java b/case-uco/java/src/org/sleuthkit/caseuco/ComputerSpecification.java
index 06e257896ee4c4e916db7b78650025522b0c8aa6..d6ba6bc3dcddbc3594f5fc3c07f54991e2600952 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/ComputerSpecification.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/ComputerSpecification.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -44,4 +44,12 @@ ComputerSpecification setProcessorArchitecture(String processorArchitecture) {
         this.processorArchitecture = processorArchitecture;
         return this;
     }
+
+    String getHostName() {
+        return hostName;
+    }
+
+    String getProcessorArchitecture() {
+        return processorArchitecture;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/Contact.java b/case-uco/java/src/org/sleuthkit/caseuco/Contact.java
index f1b180b8fb059283a85c8d920902ed594d0164ca..b52880f3eca9e23bbd78424d7043b7a2d4efb210 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/Contact.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/Contact.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -34,4 +34,8 @@ Contact setContactName(String contactName) {
         this.contactName = contactName;
         return this;
     }
+
+    String getContactName() {
+        return contactName;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/ContentData.java b/case-uco/java/src/org/sleuthkit/caseuco/ContentData.java
index cf317eed75955477af7e57d0cca4ea2ee0a33169..d2d1adb456640dc93844cf1245315be6e1173e80 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/ContentData.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/ContentData.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -79,4 +79,28 @@ ContentData setDataPayloadReferenceUrl(UcoObject url) {
         this.dataPayloadReferenceUrl = url.getId();
         return this;
     }
+
+    Long getSizeInBytes() {
+        return sizeInBytes;
+    }
+
+    String getMimeType() {
+        return mimeType;
+    }
+
+    List<Hash> getHashes() {
+        return hashes;
+    }
+
+    String getDataPayload() {
+        return dataPayload;
+    }
+
+    String getOwner() {
+        return owner;
+    }
+
+    String getDataPayloadReferenceUrl() {
+        return dataPayloadReferenceUrl;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/Device.java b/case-uco/java/src/org/sleuthkit/caseuco/Device.java
index 74c0ff84de07fa9f51d30ceb17660d2e0441aafb..7b297fb2543ad2505d1331ee98c8e90eadd2c93a 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/Device.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/Device.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -54,4 +54,16 @@ Device setSerialNumber(String serialNumber) {
         this.serialNumber = serialNumber;
         return this;
     }
+
+    String getManufacturer() {
+        return manufacturer;
+    }
+
+    String getModel() {
+        return model;
+    }
+
+    String getSerialNumber() {
+        return serialNumber;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/DigitalAccount.java b/case-uco/java/src/org/sleuthkit/caseuco/DigitalAccount.java
index b277b7b7c2aa7e8f861712b61e27dd9c3ce30d78..d7e5ffb510f68d3485293b4835dcee92d9316aea 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/DigitalAccount.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/DigitalAccount.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -46,4 +46,12 @@ DigitalAccount setLastLoginTime(Long time) {
         }
         return this;
     }
+
+    String getDisplayName() {
+        return displayName;
+    }
+
+    String getLastLoginTime() {
+        return lastLoginTime;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/Domain.java b/case-uco/java/src/org/sleuthkit/caseuco/Domain.java
index bae38071a89b0d9c75f72b978a75912a2d4a6cd8..6431a5f24f842adb0b9e266e264d0d14505db858 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/Domain.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/Domain.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -34,4 +34,8 @@ Domain setValue(String value) {
         this.value = value;
         return this;
     }
+
+    String getValue() {
+        return value;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/DomainName.java b/case-uco/java/src/org/sleuthkit/caseuco/DomainName.java
index 938a07913a399a1f8479474a12ad57c201ed2c6a..cdaf731920c28c73274b6699fccfbebb79bfb3e3 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/DomainName.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/DomainName.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -34,4 +34,8 @@ DomainName setValue(String value) {
         this.value = value;
         return this;
     }
+
+    String getValue() {
+        return value;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/EmailAddress.java b/case-uco/java/src/org/sleuthkit/caseuco/EmailAddress.java
index efaa4fa5f26b0d4540bfc66d5b5aa7f96910772a..5e81f949b0868a8da5b79be8f5c8019f682586f4 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/EmailAddress.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/EmailAddress.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -34,4 +34,8 @@ EmailAddress setValue(String value) {
         this.value = value;
         return this;
     }
+
+    String getValue() {
+        return value;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/EmailMessage.java b/case-uco/java/src/org/sleuthkit/caseuco/EmailMessage.java
index 3f6e3ecabe4c7f433568507cd1a83461b1183e44..5ace197c40e8ffd5182c6df471b384e5327d8e38 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/EmailMessage.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/EmailMessage.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -118,4 +118,52 @@ EmailMessage setInReplyTo(CyberItem replyTo) {
         this.inReplyTo = replyTo.getId();
         return this;
     }
+
+    String getReceivedTime() {
+        return receivedTime;
+    }
+
+    String getSentTime() {
+        return sentTime;
+    }
+
+    String getBcc() {
+        return bcc;
+    }
+
+    String getCc() {
+        return cc;
+    }
+
+    String getFrom() {
+        return from;
+    }
+
+    String getHeaderRaw() {
+        return headerRaw;
+    }
+
+    String getMessageID() {
+        return messageID;
+    }
+
+    String getSubject() {
+        return subject;
+    }
+
+    String getSender() {
+        return sender;
+    }
+
+    String getInReplyTo() {
+        return inReplyTo;
+    }
+
+    String getBody() {
+        return body;
+    }
+
+    String getContentType() {
+        return contentType;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/EnvironmentVariable.java b/case-uco/java/src/org/sleuthkit/caseuco/EnvironmentVariable.java
index 833bdf731370ff4e0c90c057cf65c4f7bce815c8..1d84d414b47c408ca981fb6853dddb9a3e61c44f 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/EnvironmentVariable.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/EnvironmentVariable.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -34,4 +34,8 @@ EnvironmentVariable setValue(String value) {
         this.value = value;
         return this;
     }
+
+    String getValue() {
+        return value;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/ExtractedString.java b/case-uco/java/src/org/sleuthkit/caseuco/ExtractedString.java
index 1c099cfce1770e0056e3e666ed2a2da48a0ddfdc..25c51c974a7c1d02eb27b0b2fec8a1dc60bb930d 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/ExtractedString.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/ExtractedString.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -34,4 +34,8 @@ ExtractedString setStringValue(String stringValue) {
         this.stringValue = stringValue;
         return this;
     }
+
+    String getStringValue() {
+        return stringValue;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/FacetDeserializer.java b/case-uco/java/src/org/sleuthkit/caseuco/FacetDeserializer.java
new file mode 100644
index 0000000000000000000000000000000000000000..1078f1ec0af803e1e3e0e3d707b887297f33c9f6
--- /dev/null
+++ b/case-uco/java/src/org/sleuthkit/caseuco/FacetDeserializer.java
@@ -0,0 +1,58 @@
+/*
+ * Sleuth Kit CASE JSON LD Support
+ *
+ * Copyright 2020-2021 Basis Technology Corp.
+ * Contact: carrier <at> sleuthkit <dot> org
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *	 http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.sleuthkit.caseuco;
+
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParseException;
+import java.lang.reflect.Type;
+
+/**
+ * A Gson deserializer for facets that dynamically converts to POJO based on @type.
+ * The @type name must exactly match the name of the POJO.
+ */
+class FacetDeserializer implements JsonDeserializer<Facet> {
+    private static final String BASE_PACKAGE = "org.sleuthkit.caseuco";
+    
+    @Override
+    public Facet deserialize(JsonElement je, Type type, JsonDeserializationContext jdc) throws JsonParseException {
+        if (!(je instanceof JsonObject)) {
+            throw new JsonParseException("Expected a json object for " + je);
+        }
+        
+        JsonObject jObj = (JsonObject) je;
+        JsonElement jsonId = jObj.get("@type");
+        if (jsonId == null) {
+            throw new JsonParseException("Expected non-null @type value");
+        }
+        
+        String id = jsonId.getAsString();
+        String className = BASE_PACKAGE + "." + id;
+        Class<?> deserializationClass;
+        try {
+            deserializationClass = Class.forName(className);
+        } catch (ClassNotFoundException ex) {
+            throw new JsonParseException("Expected class to exist: " + className, ex);
+        }
+        
+        return jdc.deserialize(jObj, deserializationClass);
+    }
+}
\ No newline at end of file
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/File.java b/case-uco/java/src/org/sleuthkit/caseuco/File.java
index e692fd8b7c4e4f32c6835224c51abe074d35205d..369d057ddcc54ad7d0e8f11ec3539d333c740249 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/File.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/File.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -74,4 +74,28 @@ File setSizeInBytes(long sizeInBytes) {
         this.sizeInBytes = sizeInBytes;
         return this;
     }
+
+    String getAccessedTime() {
+        return accessedTime;
+    }
+
+    String getExtension() {
+        return extension;
+    }
+
+    String getFileName() {
+        return fileName;
+    }
+
+    String getFilePath() {
+        return filePath;
+    }
+
+    Boolean getIsDirectory() {
+        return isDirectory;
+    }
+
+    Long getSizeInBytes() {
+        return sizeInBytes;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/FileSystem.java b/case-uco/java/src/org/sleuthkit/caseuco/FileSystem.java
index 72112fac7a721915527802d246f4a59bc55d3f44..33974036b48d2808270db79b756a064ef11538fd 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/FileSystem.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/FileSystem.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -50,7 +50,7 @@ FileSystem setCluserSize(long cluserSize) {
     }
 
     //Adapter for TSK_FS_TYPE enum
-    private enum FileSystemType {
+    static enum FileSystemType {
         BDE(null),
         CPIO(null),
         EXT4(TSK_FS_TYPE_EXT4),
@@ -69,7 +69,7 @@ private FileSystemType(TSK_FS_TYPE_ENUM tskType) {
             this.tskType = tskType;
         }
 
-        private static FileSystemType from(TSK_FS_TYPE_ENUM typeToConvert) {
+        static FileSystemType from(TSK_FS_TYPE_ENUM typeToConvert) {
             for (FileSystemType type : FileSystemType.values()) {
                 if (type.tskType == typeToConvert) {
                     return type;
@@ -78,5 +78,17 @@ private static FileSystemType from(TSK_FS_TYPE_ENUM typeToConvert) {
 
             return null;
         }
+        
+        TskData.TSK_FS_TYPE_ENUM getTskType() {
+            return tskType;
+        }
+    }
+
+    FileSystemType getFileSystemType() {
+        return fileSystemType;
+    }
+
+    Long getCluserSize() {
+        return cluserSize;
     }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/HTTPConnection.java b/case-uco/java/src/org/sleuthkit/caseuco/HTTPConnection.java
index 9a90aaa4a78f1e331d09bfefe29cb79ca06f02ff..463b3a83fad462360618b3c384b2c33bf1f715df 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/HTTPConnection.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/HTTPConnection.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -34,4 +34,8 @@ HTTPConnection setHttpRequestHeader(String httpRequestHeader) {
         this.httpRequestHeader = httpRequestHeader;
         return this;
     }
+
+    String getHttpRequestHeader() {
+        return httpRequestHeader;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/Hash.java b/case-uco/java/src/org/sleuthkit/caseuco/Hash.java
index 2aa257d15949b07a884f2d9545399e24c705e0c3..cf8be151e9c2d8f6cb680741b4c88df618d3d457 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/Hash.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/Hash.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -48,4 +48,12 @@ Hash setHashMethod(HashMethod method) {
     enum HashMethod {
         MD5;
     }
+
+    HashMethod getHashMethod() {
+        return hashMethod;
+    }
+
+    String getHashValue() {
+        return hashValue;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/LatLongCoordinates.java b/case-uco/java/src/org/sleuthkit/caseuco/LatLongCoordinates.java
index f040c34ff4f415ddec8b795f863966cfa93be28b..ece1517266263206b5b7438a17911a43dc55bc71 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/LatLongCoordinates.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/LatLongCoordinates.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -48,4 +48,16 @@ LatLongCoordinates setLongitude(Double longitude) {
         this.longitude = longitude;
         return this;
     }
+
+    Double getAltitude() {
+        return altitude;
+    }
+
+    Double getLatitude() {
+        return latitude;
+    }
+
+    Double getLongitude() {
+        return longitude;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/MACAddress.java b/case-uco/java/src/org/sleuthkit/caseuco/MACAddress.java
index c003d5a4b73b668b6afce4a5b4523553a004d31a..761db5cdbf6902aba6fc84decd842cd8b93c506b 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/MACAddress.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/MACAddress.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -34,4 +34,8 @@ MACAddress setValue(String value) {
         this.value = value;
         return this;
     }
+
+    String getValue() {
+        return value;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/Message.java b/case-uco/java/src/org/sleuthkit/caseuco/Message.java
index fb61b9bea59901b4c8e2f758134fa5617f4fde0f..fffd31bd4841f98a56886ba8cba799f1a8d3b537 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/Message.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/Message.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -66,4 +66,20 @@ Message setId(String id) {
         super.setId("_:" + id);
         return this;
     }
+
+    String getMessageText() {
+        return messageText;
+    }
+
+    String getApplication() {
+        return application;
+    }
+
+    String getSentTime() {
+        return sentTime;
+    }
+
+    String getMessageType() {
+        return messageType;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/MobileDevice.java b/case-uco/java/src/org/sleuthkit/caseuco/MobileDevice.java
index c7126499c7e65483ead10fde68f026d741ed6069..8760aed4c958363d33b9499db5b0d23ef37496ff 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/MobileDevice.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/MobileDevice.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -41,4 +41,12 @@ MobileDevice setIMEI(String IMEI) {
         this.IMEI = IMEI;
         return this;
     }
+
+    String getBluetoothDeviceName() {
+        return bluetoothDeviceName;
+    }
+
+    String getIMEI() {
+        return IMEI;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/Note.java b/case-uco/java/src/org/sleuthkit/caseuco/Note.java
index e71b73950312f07df5d59cef495320e80fdbe494..019b3fade16e2d6c897ad628021f49b4c6b60f41 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/Note.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/Note.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -35,4 +35,7 @@ Note setText(String text) {
         return this;
     }
 
+    String getText() {
+        return text;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/OperatingSystem.java b/case-uco/java/src/org/sleuthkit/caseuco/OperatingSystem.java
index 1492938d880888a5f0e5b08e2d50730f2e186843..781d4395a3f177084c9b1b24008ec6a15b012b0a 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/OperatingSystem.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/OperatingSystem.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -46,4 +46,12 @@ OperatingSystem setVersion(String version) {
         this.version = version;
         return this;
     }
+
+    String getInstallDate() {
+        return installDate;
+    }
+
+    String getVersion() {
+        return version;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/PathRelation.java b/case-uco/java/src/org/sleuthkit/caseuco/PathRelation.java
index af9aeeba8c5851a5fba3d1a70831c62448b0ab25..631176bf86f5adc7cc13a9fa89363a765685235f 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/PathRelation.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/PathRelation.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -34,4 +34,8 @@ PathRelation setPath(String path) {
         this.path = path;
         return this;
     }
+
+    String getPath() {
+        return path;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/PhoneAccount.java b/case-uco/java/src/org/sleuthkit/caseuco/PhoneAccount.java
index 04b722b2421727b96beed607b72cc23f97540670..b55b544410a87730db8371d50280b8dc0bf7ddd9 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/PhoneAccount.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/PhoneAccount.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -34,4 +34,8 @@ PhoneAccount setPhoneNumber(String phoneNumber) {
         this.phoneNumber = phoneNumber;
         return this;
     }
+
+    String getPhoneNumber() {
+        return phoneNumber;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/PhoneCall.java b/case-uco/java/src/org/sleuthkit/caseuco/PhoneCall.java
index 7eb6b9756c54e10c67ba5466a1a7329729c0402c..fdb271fef780dadddc42bd21ee414ea3d2c3f91c 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/PhoneCall.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/PhoneCall.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -69,4 +69,24 @@ PhoneCall setCallType(String callType) {
         this.callType = callType;
         return this;
     }
+
+    String getTo() {
+        return to;
+    }
+
+    String getFrom() {
+        return from;
+    }
+
+    String getStartTime() {
+        return startTime;
+    }
+
+    String getEndTime() {
+        return endTime;
+    }
+
+    String getCallType() {
+        return callType;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/Relationship.java b/case-uco/java/src/org/sleuthkit/caseuco/Relationship.java
index 27d4563fbb8c4a28a132aec826c90d3b0d8af631..275e76039d6d1ed25ff54465e071950e0635bff4 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/Relationship.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/Relationship.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -55,4 +55,20 @@ Relationship isDirectional(boolean isDirectional) {
         this.isDirectional = isDirectional;
         return this;
     }
+
+    String getSource() {
+        return source;
+    }
+
+    String getTarget() {
+        return target;
+    }
+
+    String getKindOfRelationship() {
+        return kindOfRelationship;
+    }
+
+    Boolean getIsDirectional() {
+        return isDirectional;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/SIMCard.java b/case-uco/java/src/org/sleuthkit/caseuco/SIMCard.java
index e1dad6c46920cdf0ee6e0d33953e8bd125fae4c7..0020d65544f0055e11f5eb40a9adfc9276bbb59c 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/SIMCard.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/SIMCard.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -41,4 +41,12 @@ SIMCard setICCID(String ICCID) {
         this.ICCID = ICCID;
         return this;
     }
+
+    String getIMSI() {
+        return IMSI;
+    }
+
+    String getICCID() {
+        return ICCID;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/SMSMessage.java b/case-uco/java/src/org/sleuthkit/caseuco/SMSMessage.java
index c2012bd8c6b485d4be83e59da8bb3309252a16a2..7a842b8e7f1d0f154b5f9f0fa41ff9a40c897b02 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/SMSMessage.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/SMSMessage.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -36,4 +36,8 @@ SMSMessage setIsRead(Integer status) {
         }
         return this;
     }
+
+    Boolean getIsRead() {
+        return isRead;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/Trace.java b/case-uco/java/src/org/sleuthkit/caseuco/Trace.java
index 5393ec3663ec30a0a5104e8126008771dc9f7519..a400a4fb2916826856b647fea5d76fa4069ae371 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/Trace.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/Trace.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -37,4 +37,8 @@ final Trace addBundle(Facet bundle) {
         hasPropertyBundle.add(bundle);
         return this;
     }
+
+    List<Facet> getHasPropertyBundle() {
+        return hasPropertyBundle;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/URL.java b/case-uco/java/src/org/sleuthkit/caseuco/URL.java
index b0f6213e952549cc001f1367f5d1258a347f1678..43ac205408bd9017b08688cb648ed793d3114bdd 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/URL.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/URL.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -41,4 +41,12 @@ URL setUserName(CyberItem userName) {
         this.userName = userName.getId();
         return this;
     }
+
+    String getFullValue() {
+        return fullValue;
+    }
+
+    String getUserName() {
+        return userName;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/UcoObject.java b/case-uco/java/src/org/sleuthkit/caseuco/UcoObject.java
index 14c49bee68092898c292d5a6479278fdda9ea6a6..ad218ba7ce98b4443ced4279b9ad61df327f42f2 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/UcoObject.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/UcoObject.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -85,4 +85,28 @@ UcoObject setTag(String tag) {
         this.tag = tag;
         return this;
     }
+
+    String getType() {
+        return type;
+    }
+
+    String getCreatedTime() {
+        return createdTime;
+    }
+
+    String getModifiedTime() {
+        return modifiedTime;
+    }
+
+    String getDescription() {
+        return description;
+    }
+
+    String getName() {
+        return name;
+    }
+
+    String getTag() {
+        return tag;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/Volume.java b/case-uco/java/src/org/sleuthkit/caseuco/Volume.java
index 7f6bc4850f80de42653e629dac4faf8e50414f7f..1073d4abbd8677477e937848fc17ac1e8114c6c6 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/Volume.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/Volume.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -36,4 +36,12 @@ Volume setSectorSize(long sectorSize) {
         this.sectorSize = sectorSize;
         return this;
     }
+
+    String getVolumeType() {
+        return volumeType;
+    }
+
+    Long getSectorSize() {
+        return sectorSize;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/WindowsAccount.java b/case-uco/java/src/org/sleuthkit/caseuco/WindowsAccount.java
index f51d26a1d64323397f67e8e6013e52587884abae..fb3a2ba95371bfc3102076b6d32ec825b0ba383b 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/WindowsAccount.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/WindowsAccount.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -34,4 +34,8 @@ WindowsAccount setGroups(String groups) {
         this.groups = groups;
         return this;
     }
+
+    String getGroups() {
+        return groups;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/WindowsComputerSpecification.java b/case-uco/java/src/org/sleuthkit/caseuco/WindowsComputerSpecification.java
index db6365622745b89a86e7eab006e29e9e389ef644..49a544715a9797361911ffaa0fa39ed17cce91ef 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/WindowsComputerSpecification.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/WindowsComputerSpecification.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -48,4 +48,16 @@ WindowsComputerSpecification setWindowsTempDirectory(CyberItem windowsTempDirect
         this.windowsTempDirectory = windowsTempDirectory.getId();
         return this;
     }
+
+    String getRegisteredOrganization() {
+        return registeredOrganization;
+    }
+
+    String getRegisteredOwner() {
+        return registeredOwner;
+    }
+
+    String getWindowsTempDirectory() {
+        return windowsTempDirectory;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/WindowsRegistryValue.java b/case-uco/java/src/org/sleuthkit/caseuco/WindowsRegistryValue.java
index 76b3c31f4b065fc2a6977f593c448f25caeb3cf9..7dd93ff4ed714aa371ae1de1e061071a4ab867ff 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/WindowsRegistryValue.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/WindowsRegistryValue.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -34,4 +34,8 @@ WindowsRegistryValue setData(String data) {
         this.data = data;
         return this;
     }
+
+    String getData() {
+        return data;
+    }
 }
diff --git a/case-uco/java/src/org/sleuthkit/caseuco/WirelessNetworkConnection.java b/case-uco/java/src/org/sleuthkit/caseuco/WirelessNetworkConnection.java
index 0d7f51867313f335bfede01d624fcb1d9f093303..d085dd01a8f0468374d9a6765b9500f44ba212d7 100755
--- a/case-uco/java/src/org/sleuthkit/caseuco/WirelessNetworkConnection.java
+++ b/case-uco/java/src/org/sleuthkit/caseuco/WirelessNetworkConnection.java
@@ -1,7 +1,7 @@
 /*
  * Sleuth Kit CASE JSON LD Support
  *
- * Copyright 2020 Basis Technology Corp.
+ * Copyright 2020-2021 Basis Technology Corp.
  * Contact: carrier <at> sleuthkit <dot> org
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -34,4 +34,8 @@ WirelessNetworkConnection setSSID(String ssid) {
         this.ssid = ssid;
         return this;
     }
+
+    String getSsid() {
+        return ssid;
+    }
 }
diff --git a/case-uco/java/test/org/sleuthkit/caseuco/FacetDeserializerTests.java b/case-uco/java/test/org/sleuthkit/caseuco/FacetDeserializerTests.java
new file mode 100644
index 0000000000000000000000000000000000000000..91eb8dd55c356613cd6817d23164e281b4942709
--- /dev/null
+++ b/case-uco/java/test/org/sleuthkit/caseuco/FacetDeserializerTests.java
@@ -0,0 +1,192 @@
+/*
+ * Sleuth Kit CASE JSON LD Support
+ *
+ * Copyright 2021 Basis Technology Corp.
+ * Contact: carrier <at> sleuthkit <dot> org
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *	 http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.sleuthkit.caseuco;
+
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+import com.google.gson.JsonParseException;
+import com.google.gson.reflect.TypeToken;
+import java.lang.reflect.Type;
+import java.time.OffsetDateTime;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+import org.junit.Assert;
+
+import org.junit.Test;
+import org.sleuthkit.datamodel.TskData;
+
+/**
+ * Tests for deserializing facets.
+ */
+public class FacetDeserializerTests {
+
+    private static final Logger logger = Logger.getLogger(FacetDeserializerTests.class.getName());
+
+    /**
+     * Parses facets json string into a list of facets.
+     * @param facetsListJson The json string.
+     * @return The list of facets.
+     * @throws JsonParseException 
+     */
+    private static List<Facet> parseFacets(String facetsListJson) throws JsonParseException {
+        GsonBuilder gb = new GsonBuilder();
+        gb.registerTypeAdapter(Facet.class, new FacetDeserializer());
+        Gson gson = gb.create();
+        Type traceList = new TypeToken<ArrayList<Facet>>() {
+        }.getType();
+        return gson.fromJson(facetsListJson, traceList);
+    }
+
+    @Test
+    public void testExpectFacetJsonObject() {
+        try {
+            parseFacets("[\"test1\", 1, 2]");
+            Assert.fail("Expected exception when parsing facets that are not objects");
+        } catch (JsonParseException ex) {
+            Assert.assertNotNull(ex.getMessage());
+            logger.log(Level.INFO, "Received exception of: " + ex.getMessage());
+        }
+    }
+
+    @Test
+    public void testExpectFacetType() {
+        try {
+            parseFacets("[{\"@type\": \"NonsenseType\", \"@id\": \"ItemId\" }]");
+            Assert.fail("Expected exception when parsing facets that are not objects");
+        } catch (JsonParseException ex) {
+            Assert.assertNotNull(ex.getMessage());
+            logger.log(Level.INFO, "Received exception of: " + ex.getMessage());
+        }
+    }
+
+    @Test
+    public void testFacetDeserialization() throws JsonParseException {
+        long clusterSize = 512;
+        long createdTime = 946684800;
+        long modifiedTime = 946684801;
+        String description = "A file system";
+        String id = "The id";
+        String name = "The name";
+        String tag = "The tag";
+        TskData.TSK_FS_TYPE_ENUM fsType = TskData.TSK_FS_TYPE_ENUM.TSK_FS_TYPE_EXT4;
+
+        UcoObject fileSystem = new FileSystem()
+                .setCluserSize(clusterSize)
+                .setFileSystemType(fsType)
+                .setCreatedTime(createdTime)
+                .setDescription(description)
+                .setId(id)
+                .setModifiedTime(modifiedTime)
+                .setName(name)
+                .setTag(tag);
+
+        String gsonStr = new Gson().toJson(Arrays.asList(fileSystem));
+        logger.log(Level.INFO, "Json string of: " + gsonStr);
+
+        List<Facet> facets = parseFacets(gsonStr);
+        Assert.assertEquals(1, facets.size());
+        Assert.assertTrue(facets.get(0) instanceof FileSystem);
+
+        FileSystem deserialized = (FileSystem) facets.get(0);
+        Assert.assertEquals((Long) clusterSize, deserialized.getCluserSize());
+        Assert.assertEquals(createdTime, OffsetDateTime.parse(deserialized.getCreatedTime()).toEpochSecond());
+        Assert.assertEquals(modifiedTime, OffsetDateTime.parse(deserialized.getModifiedTime()).toEpochSecond());
+
+        Assert.assertEquals(description, deserialized.getDescription());
+        Assert.assertEquals(id, deserialized.getId());
+        Assert.assertEquals(name, deserialized.getName());
+        Assert.assertEquals(tag, deserialized.getTag());
+
+        Assert.assertEquals(deserialized.getFileSystemType().getTskType(), fsType);
+    }
+    
+    @Test
+    public void testTraceDeserialization() throws JsonParseException {
+        long clusterSize = 4096;
+        long createdTime = 946684802;
+        long modifiedTime = 946684803;
+        String description = "A file system 2";
+        String id = "The id 2";
+        String name = "The name 2";
+        String tag = "The tag 2";
+        TskData.TSK_FS_TYPE_ENUM fsType = TskData.TSK_FS_TYPE_ENUM.TSK_FS_TYPE_EXT4;
+
+        FileSystem fileSystem = (FileSystem) new FileSystem()
+                .setCluserSize(clusterSize)
+                .setFileSystemType(fsType)
+                .setCreatedTime(createdTime)
+                .setDescription(description)
+                .setId(id)
+                .setModifiedTime(modifiedTime)
+                .setName(name)
+                .setTag(tag);
+
+        String traceUuid = "uuid";
+        long traceCreateTime = 946684802;
+        long traceModifiedTime = 946684803;
+        String traceDescription = "A file system 2";
+        String traceId = "The id 2";
+        String traceName = "The name 2";
+        String traceTag = "The tag 2";
+        UcoObject trace = new Trace(traceUuid)
+                .addBundle(fileSystem)
+                .setCreatedTime(traceCreateTime)
+                .setDescription(traceDescription)
+                .setId(traceId)
+                .setModifiedTime(traceModifiedTime)
+                .setName(traceName)
+                .setTag(traceTag);
+        
+        String gsonStr = new Gson().toJson(trace);
+        logger.log(Level.INFO, "Json string of: " + gsonStr);
+
+        Trace deserializedTrace = new GsonBuilder()
+                .registerTypeAdapter(Facet.class, new FacetDeserializer())
+                .create()
+                .fromJson(gsonStr, Trace.class);
+        
+        Assert.assertEquals(traceCreateTime, OffsetDateTime.parse(deserializedTrace.getCreatedTime()).toEpochSecond());
+        Assert.assertEquals(traceModifiedTime, OffsetDateTime.parse(deserializedTrace.getModifiedTime()).toEpochSecond());
+
+        Assert.assertEquals(traceDescription, deserializedTrace.getDescription());
+        Assert.assertEquals(traceId, deserializedTrace.getId());
+        Assert.assertEquals(traceName, deserializedTrace.getName());
+        Assert.assertEquals(traceTag, deserializedTrace.getTag());
+
+        List<Facet> facets = deserializedTrace.getHasPropertyBundle();
+        
+        Assert.assertEquals(1, facets.size());
+        Assert.assertTrue(facets.get(0) instanceof FileSystem);
+
+        FileSystem deserialized = (FileSystem) facets.get(0);
+        Assert.assertEquals((Long) clusterSize, deserialized.getCluserSize());
+        Assert.assertEquals(createdTime, OffsetDateTime.parse(deserialized.getCreatedTime()).toEpochSecond());
+        Assert.assertEquals(modifiedTime, OffsetDateTime.parse(deserialized.getModifiedTime()).toEpochSecond());
+
+        Assert.assertEquals(description, deserialized.getDescription());
+        Assert.assertEquals(id, deserialized.getId());
+        Assert.assertEquals(name, deserialized.getName());
+        Assert.assertEquals(tag, deserialized.getTag());
+
+        Assert.assertEquals(deserialized.getFileSystemType().getTskType(), fsType);
+    }
+}
diff --git a/case-uco/java/test/org/sleuthkit/caseuco/TestSuite.java b/case-uco/java/test/org/sleuthkit/caseuco/TestSuite.java
new file mode 100644
index 0000000000000000000000000000000000000000..1f18b6d184e6e11dfda1912e1b3081d105dbea15
--- /dev/null
+++ b/case-uco/java/test/org/sleuthkit/caseuco/TestSuite.java
@@ -0,0 +1,33 @@
+/*
+ * Sleuth Kit CASE JSON LD Support
+ *
+ * Copyright 2021 Basis Technology Corp.
+ * Contact: carrier <at> sleuthkit <dot> org
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *	 http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.sleuthkit.caseuco;
+
+import org.junit.runner.RunWith;
+import org.junit.runners.Suite;
+
+/**
+ * Runs all case uco unit tests.
+ */
+@RunWith(Suite.class)
+@Suite.SuiteClasses({
+    FacetDeserializerTests.class
+})
+public class TestSuite {
+
+}
diff --git a/configure.ac b/configure.ac
index 093a9a26fce98e54947f02826e58efdee36c7bc4..7d72d4642d981da7d969f868a5926eb2f6c809c3 100644
--- a/configure.ac
+++ b/configure.ac
@@ -4,7 +4,7 @@ dnl Process this file with autoconf to produce a configure script.
 
 AC_PREREQ(2.59)
 
-AC_INIT(sleuthkit, 4.11.0)
+AC_INIT(sleuthkit, 4.12.1)
 m4_include([m4/ax_pthread.m4])
 dnl include the version from 1.12.1. This will work for
 m4_include([m4/cppunit.m4])
@@ -35,6 +35,8 @@ AC_PROG_LN_S
 AC_PROG_MAKE_SET
 AC_PATH_PROG(PERL, perl)
 
+TSK_CHECK_PROG_PKGCONFIG
+
 dnl Checks for header files.
 AC_HEADER_STDC
 dnl AC_HEADER_MAJOR
@@ -124,92 +126,94 @@ AC_CHECK_HEADERS(string, , , AC_MSG_ERROR([missing STL string class header]))
 AC_CHECK_HEADERS(vector, , , AC_MSG_ERROR([missing STL vector class header]))
 
 dnl Check for sqlite and its dependencies
-AC_CHECK_HEADERS([sqlite3.h],
-                 [AC_CHECK_LIB(dl, dlopen)
-                  AC_CHECK_LIB(sqlite3, sqlite3_open)])
-dnl Compile the bundled sqlite if there is no system one installed
-AC_MSG_CHECKING(which sqlite3 to use)
-AS_IF([test "x$ac_cv_lib_sqlite3_sqlite3_open" = "xyes"],
-      [AC_MSG_RESULT([system])],
-      [AC_MSG_RESULT([bundled])])
-AM_CONDITIONAL([HAVE_LIBSQLITE3],
-               [test "x$ac_cv_lib_sqlite3_sqlite3_open" = "xyes"])
-
-# Check if we should link afflib.
-AC_ARG_WITH([afflib],
-    [AS_HELP_STRING([--without-afflib],[Do not use AFFLIB even if it is installed])]
-    [AS_HELP_STRING([--with-afflib=dir],[Specify that AFFLIB is installed in directory 'dir'])],
-    dnl If --with-afflib or --without-afflib is given
-    [],
-    dnl If --with-afflib or --without-afflib is given
-    [with_afflib=yes])
-
-dnl check for the lib if they did not specify no
-AS_IF([test "x$with_afflib" != "xno"],
-    dnl Test the dir if they specified something beyond yes/no
-    [AS_IF([test "x$with_afflib" != "xyes"],
-        [AS_IF([test -d ${with_afflib}/include],
-            [CPPFLAGS="$CPPFLAGS -I${with_afflib}/include"
-                LDFLAGS="$LDFLAGS -L${with_afflib}/lib"],
-            dnl Dir given was not correct
-            [AC_MSG_FAILURE([AFFLIB directory not found at ${with_afflib}])])
-        ]
-    )]
-    dnl Check for the header file first to make sure they have the dev install
-    [AC_CHECK_HEADERS([afflib/afflib.h],
-      [AC_CHECK_LIB([afflib], [af_open])]
-    )]
-)
-AS_IF([test "x$ac_cv_lib_afflib_af_open" = "xyes"], [ax_afflib=yes], [ax_afflib=no])
-
-
-dnl Check if we should link zlib
-AC_ARG_WITH([zlib],
-    [AS_HELP_STRING([--without-zlib],[Do not use ZLIB even if it is installed])]
-    [AS_HELP_STRING([--with-zlib=dir],[Specify that ZLIB is installed in directory 'dir'])],
-    dnl If --with-zlib or --without-zlib is given
-    [],
-    dnl if nothing was specified, default to a test
-    [with_zlib=yes])
-
-dnl check for the lib if they did not specify no
-AS_IF(
-   [test "x$with_zlib" != "xno"],
-   [AC_MSG_NOTICE([checking for zlib])]
-   dnl Test the dir if they specified something beyond yes/no
-   [AS_IF([test "x$with_zlib" != "xyes"],
-       [AC_MSG_NOTICE([LOOKING for zlib in ${with_zlib}])]
-       [AS_IF([test -d ${with_zlib}],
-           [CPPFLAGS="$CPPFLAGS -I${with_zlib}/include"
-                LDFLAGS="$LDFLAGS -L${with_zlib}/lib"],
-           dnl Dir given was not correct
-           [AC_MSG_FAILURE([ZLIB directory not found at ${with_zlib}])]
-       )]
-    )]
-    dnl Check for the header file first to make sure they have the dev install
-    [AC_CHECK_HEADERS([zlib.h],
-      [AC_CHECK_LIB([z], [inflate],
-         [],
-         [AC_MSG_WARN([Found zlib headers, but could not link to zlib library.  Will build without zlib.])]
-         [with_zlib=no]
-      )],
-      [AC_MSG_WARN([Could not find usable zlib headers.  Will build without zlib.])]
-      [with_zlib=no]
-    )],
-    [AC_MSG_NOTICE([NOT checking for zlib because with_zlib is no])]
+AS_IF([test "x$ac_cv_prog_PKGCONFIG" = "xyes"],
+  [
+    SAVED_AX_PACKAGE_REQUIRES_PRIVATE="$AX_PACKAGE_REQUIRES_PRIVATE"
+    TSK_PKG_CHECK_MODULES([SQLITE3], [], [sqlite3],
+    [
+      CFLAGS="$CFLAGS $SQLITE3_CFLAGS"
+      CXXFLAGS="$CXXFLAGS $SQLITE3_CFLAGS"
+      LIBS="$LIBS $SQLITE3_LIBS"
+    ],
+    [
+      AX_PACKAGE_REQUIRES_PRIVATE="$SAVED_AX_PACKAGE_REQUIRES_PRIVATE"
+      ax_sqlite3=no
+    ]
+  )]
 )
-AS_IF([test "x$ac_cv_lib_z_inflate" = "xyes"], [ax_zlib=yes], [ax_zlib=no])
-
-AM_CONDITIONAL([X_ZLIB],[test "x$with_zlib" != "xno" && test "x$with_zlib" != "xyes"])
-AS_IF([test "x$with_zlib" != "xno"],
-   [Z_PATH="${with_zlib}/lib"],
-   [AC_MSG_NOTICE([failed to make Z_PATH])]
-)
-AC_SUBST(Z_PATH, $Z_PATH)
 
 dnl needed for sqllite
 AC_CHECK_LIB(dl, dlopen)
 
+AC_CHECK_HEADERS([sqlite3.h], [AC_CHECK_LIB([sqlite3], [sqlite3_open])])
+AS_IF([test "x$ac_cv_lib_sqlite3_sqlite3_open" = "xyes"], [ax_sqlite3=yes])
+
+dnl Compile the bundled sqlite if there is no system one installed
+AC_MSG_CHECKING(which sqlite3 to use)
+AS_IF([test "x$ax_sqlite3" = "xyes"],
+      [AC_MSG_RESULT([system])
+       PACKAGE_LIBS_PRIVATE="$PACKAGE_LIBS_PRIVATE -lsqlite3"],
+      [AC_MSG_RESULT([bundled])])
+AM_CONDITIONAL([HAVE_LIBSQLITE3], [test "x$ax_sqlite3" = "xyes"])
+
+dnl Check if we should link with afflib
+TSK_OPT_DEP_CHECK([afflib], [], [], [afflib/afflib.h], [afflib], [af_open])
+dnl Check if we should link with zlib
+TSK_OPT_DEP_CHECK([zlib], [ZLIB], [zlib], [zlib.h], [z], [inflate])
+dnl Check if we should link with libbfio
+TSK_OPT_DEP_CHECK([libbfio], [], [libbfio], [libbfio.h], [bfio], [libbfio_get_version])
+dnl Check if we should link with libewf
+TSK_OPT_DEP_CHECK([libewf], [EWF], [libewf], [libewf.h], [ewf], [libewf_get_version])
+dnl Check if we should link with libvhdi
+TSK_OPT_DEP_CHECK([libvhdi], [VHDI], [libvhdi], [libvhdi.h], [vhdi], [libvhdi_get_version])
+dnl Check if we should link with libvmdk
+TSK_OPT_DEP_CHECK([libvmdk], [VMDK], [libvmdk], [libvmdk.h], [vmdk], [libvmdk_get_version])
+dnl Check if we should link with libvslvm
+AS_IF([test "x$ax_libbfio" = "xyes"],
+      [TSK_OPT_DEP_CHECK([libvslvm], [LVM], [libvslvm], [libvslvm.h], [vslvm], [libvslvm_get_version])],
+      [ax_libvslvm=no])
+
+dnl check for cppunit
+AC_ARG_ENABLE([cppunit],
+    [AS_HELP_STRING([--disable-cppunit], [Build without cppunit tests])])
+
+ac_cv_cppunit=no
+AS_IF([test "x$enable_cppunit" != "xno"], [
+  AS_IF([test "x$ac_cv_prog_PKGCONFIG" = "xyes"],
+    [
+      dnl IGNOREs keep cppunit out of .pc file, as it's for testing only
+      TSK_PKG_CHECK_MODULES([CPPUNIT], [], [cppunit >= 1.12.1], [ac_cv_cppunit=yes], [ac_cv_cppunit=no], [IGNORE], [IGNORE])
+    ]
+  )
+
+  AS_IF([test "x$ac_cv_cppunit" != "xyes"],
+    [AM_PATH_CPPUNIT(1.12.1)
+     AS_IF([test "x$no_cppunit" = x], [ac_cv_cppunit=yes])]
+  )
+
+  AC_MSG_CHECKING([for TestRunner in -lcppunit])
+
+  SAVED_CFLAGS="$CFLAGS"
+  SAVED_LDFLAGS="$LDFLAGS"
+  CFLAGS="$CPPUNIT_CLFAGS"
+  LDFLAGS="$CPPUNIT_LIBS"
+
+  AC_LANG_PUSH([C++])
+  AC_LINK_IFELSE([AC_LANG_PROGRAM(
+    [[#include <cppunit/ui/text/TestRunner.h>]],
+    [[CppUnit::TextUi::TestRunner();]])],
+    [ax_cv_cppunit=yes],
+    [ax_cv_cppunit=no])
+  AC_LANG_POP([C++])
+
+  CFLAGS="$SAVED_CFLAGS"
+  LDFLAGS="$SAVED_LDFLAGS"
+
+  AC_MSG_RESULT([$ax_cv_cppunit])
+])
+
+AM_CONDITIONAL([HAVE_CPPUNIT],[test "x$ac_cv_cppunit" = xyes])
+
 dnl check for user online input
 
 AC_ARG_ENABLE([offline],
@@ -222,88 +226,6 @@ AC_ARG_ENABLE([offline],
 
 AM_CONDITIONAL([OFFLINE], [test "x$offline" = xtrue])
 
-
-dnl Check if we should link libewf.
-AC_ARG_WITH([libewf],
-    [AS_HELP_STRING([--without-libewf],[Do not use libewf even if it is installed])]
-    [AS_HELP_STRING([--with-libewf=dir],[Specify that libewf is installed in directory 'dir'])],
-    dnl If --with-libewf or --without-libewf is given
-    [],
-    dnl if nothing was specified, default to a test
-    [with_libewf=yes])
-
-dnl check for the lib if they did not specify no
-AS_IF([test "x$with_libewf" != "xno"],
-    dnl Test the dir if they specified something beyond yes/no
-    [AS_IF([test "x$with_libewf" != "xyes"],
-        [AS_IF([test -d ${with_libewf}/include],
-            [CPPFLAGS="$CPPFLAGS -I${with_libewf}/include"
-                LDFLAGS="$LDFLAGS -L${with_libewf}/lib"],
-            dnl Dir given was not correct
-            [AC_MSG_FAILURE([libewf directory not found at ${with_libewf}])])
-        ]
-    )]
-    dnl Check for the header file first to make sure they have the dev install
-    [AC_CHECK_HEADERS([libewf.h],
-      [AC_CHECK_LIB([ewf], [libewf_get_version], [], [NO_LIBEWF=true])]
-    )]
-)
-AS_IF([test "x$ac_cv_lib_ewf_libewf_get_version" = "xyes"], [ax_libewf=yes], [ax_libewf=no])
-
-dnl Check if we should link libvhdi.
-AC_ARG_WITH([libvhdi],
-    [AS_HELP_STRING([--without-libvhdi],[Do not use libvhdi even if it is installed])]
-    [AS_HELP_STRING([--with-libvhdi=dir],[Specify that libvhdi is installed in directory 'dir'])],
-    dnl If --with-libvhdi or --without-libvhdi is given
-    [],
-    dnl if nothing was specified, default to a test
-    [with_libvhdi=yes])
-
-dnl check for the lib if they did not specify no
-AS_IF([test "x$with_libvhdi" != "xno"],
-    dnl Test the dir if they specified something beyond yes/no
-    [AS_IF([test "x$with_libvhdi" != "xyes"],
-        [AS_IF([test -d ${with_libvhdi}/include],
-            [CPPFLAGS="$CPPFLAGS -I${with_libvhdi}/include"
-                LDFLAGS="$LDFLAGS -L${with_libvhdi}/lib"],
-            dnl Dir given was not correct
-            [AC_MSG_FAILURE([libvhdi directory not found at ${with_libvhdi}])])
-        ]
-    )]
-    dnl Check for the header file first to make sure they have the dev install
-    [AC_CHECK_HEADERS([libvhdi.h],
-      [AC_CHECK_LIB([vhdi], [libvhdi_get_version], [], [NO_libvhdi=true])]
-    )]
-)
-AS_IF([test "x$ac_cv_lib_vhdi_libvhdi_get_version" = "xyes"], [ax_libvhdi=yes], [ax_libvhdi=no])
-
-dnl Check if we should link libvmdk.
-AC_ARG_WITH([libvmdk],
-    [AS_HELP_STRING([--without-libvmdk],[Do not use libvmdk even if it is installed])]
-    [AS_HELP_STRING([--with-libvmdk=dir],[Specify that libvmdk is installed in directory 'dir'])],
-    dnl If --with-libvmdk or --without-libvmdk is given
-    [],
-    dnl if nothing was specified, default to a test
-    [with_libvmdk=yes])
-
-dnl check for the lib if they did not specify no
-AS_IF([test "x$with_libvmdk" != "xno"],
-    dnl Test the dir if they specified something beyond yes/no
-    [AS_IF([test "x$with_libvmdk" != "xyes"],
-        [AS_IF([test -d ${with_libvmdk}/include],
-            [CPPFLAGS="$CPPFLAGS -I${with_libvmdk}/include"
-                LDFLAGS="$LDFLAGS -L${with_libvmdk}/lib"],
-            dnl Dir given was not correct
-            [AC_MSG_FAILURE([libvmdk directory not found at ${with_libvmdk}])])
-        ]
-    )]
-    dnl Check for the header file first to make sure they have the dev install
-    [AC_CHECK_HEADERS([libvmdk.h],
-      [AC_CHECK_LIB([vmdk], [libvmdk_get_version], [], [NO_libvmdk=true])]
-    )]
-)
-AS_IF([test "x$ac_cv_lib_vmdk_libvmdk_get_version" = "xyes"], [ax_libvmdk=yes], [ax_libvmdk=no])
-
 dnl Test for the various java things that we need for bindings
 AS_IF([test "x$enable_java" != "xno"], [
     dnl javac is needed to compile the JAR file
@@ -443,8 +365,10 @@ Building:
    libewf support:                        $ax_libewf
    zlib support:                          $ax_zlib
 
+   libbfio support:                       $ax_libbfio
    libvhdi support:                       $ax_libvhdi
    libvmdk support:                       $ax_libvmdk
+   libvslvm support:                      $ax_libvslvm
 Features:
    Java/JNI support:                      $ax_java_support
    Multithreading:                        $ax_multithread
diff --git a/db_diff/tskdbdiff.py b/db_diff/tskdbdiff.py
new file mode 100644
index 0000000000000000000000000000000000000000..f3b874e9280be75a3e84a7f2b33b985ca7de8a4e
--- /dev/null
+++ b/db_diff/tskdbdiff.py
@@ -0,0 +1,1225 @@
+# Requires python3
+
+import re
+import sqlite3
+import subprocess
+import shutil
+import os
+import codecs
+import datetime
+import sys
+from typing import Callable, Dict, Union, List
+
+import psycopg2
+import psycopg2.extras
+import socket
+import csv
+
+class TskDbDiff(object):
+    """Compares two TSK/Autospy SQLite databases.
+
+    Attributes:
+        gold_artifacts:
+        autopsy_artifacts:
+        gold_attributes:
+        autopsy_attributes:
+        gold_objects:
+        autopsy_objects:
+        artifact_comparison:
+        attribute_comparision:
+        report_errors: a listof_listof_String, the error messages that will be
+        printed to screen in the run_diff method
+        passed: a boolean, did the diff pass?
+        autopsy_db_file:
+        gold_db_file:
+    """
+    def __init__(self, output_db, gold_db, output_dir=None, gold_bb_dump=None, gold_dump=None, verbose=False, isMultiUser=False, pgSettings=None):
+        """Constructor for TskDbDiff.
+
+        Args:
+            output_db_path: path to output database (non-gold standard)
+            gold_db_path: path to gold database
+            output_dir: (optional) Path to folder where generated files will be put.
+            gold_bb_dump: (optional) path to file where the gold blackboard dump is located
+            gold_dump: (optional) path to file where the gold non-blackboard dump is located
+            verbose: (optional) a boolean, if true, diff results are sent to stdout. 
+        """
+
+        self.output_db_file = output_db
+        self.gold_db_file = gold_db
+        self.output_dir = output_dir
+        self.gold_bb_dump = gold_bb_dump
+        self.gold_dump = gold_dump
+        self._generate_gold_dump = False        
+        self._generate_gold_bb_dump = False
+        self._bb_dump_diff = ""
+        self._dump_diff = ""
+        self._bb_dump = ""
+        self._dump = ""
+        self.verbose = verbose
+        self.isMultiUser = isMultiUser
+        self.pgSettings = pgSettings
+
+        if self.isMultiUser and not self.pgSettings:
+            print("Missing PostgreSQL database connection settings data.")
+            sys.exit(1)
+
+        if self.gold_bb_dump is None:
+            self._generate_gold_bb_dump = True
+        if self.gold_dump is None:
+            self._generate_gold_dump = True
+
+    def run_diff(self):
+        """Compare the databases.
+
+        Raises:
+            TskDbDiffException: if an error occurs while diffing or dumping the database
+        """
+
+        self._init_diff()
+        id_obj_path_table = -1
+        # generate the gold database dumps if necessary     
+        if self._generate_gold_dump:       
+            id_obj_path_table = TskDbDiff._dump_output_db_nonbb(self.gold_db_file, self.gold_dump, self.isMultiUser, self.pgSettings)     
+        if self._generate_gold_bb_dump:        
+            TskDbDiff._dump_output_db_bb(self.gold_db_file, self.gold_bb_dump, self.isMultiUser, self.pgSettings, id_obj_path_table)
+
+        # generate the output database dumps (both DB and BB)
+        id_obj_path_table = TskDbDiff._dump_output_db_nonbb(self.output_db_file, self._dump, self.isMultiUser, self.pgSettings)
+        TskDbDiff._dump_output_db_bb(self.output_db_file, self._bb_dump, self.isMultiUser, self.pgSettings, id_obj_path_table)
+
+        # Compare non-BB
+        dump_diff_pass = self._diff(self._dump, self.gold_dump, self._dump_diff)
+
+        # Compare BB
+        bb_dump_diff_pass = self._diff(self._bb_dump, self.gold_bb_dump, self._bb_dump_diff)
+
+        self._cleanup_diff()
+        return dump_diff_pass, bb_dump_diff_pass
+
+
+    def _init_diff(self):
+        """Set up the necessary files based on the arguments given at construction"""
+        if self.output_dir is None:
+            # No stored files
+            self._bb_dump = TskDbDiff._get_tmp_file("BlackboardDump", ".txt")
+            self._bb_dump_diff = TskDbDiff._get_tmp_file("BlackboardDump-Diff", ".txt")
+            self._dump = TskDbDiff._get_tmp_file("DBDump", ".txt")
+            self._dump_diff = TskDbDiff._get_tmp_file("DBDump-Diff", ".txt")
+        else:
+            self._bb_dump = os.path.join(self.output_dir, "BlackboardDump.txt")
+            self._bb_dump_diff = os.path.join(self.output_dir, "BlackboardDump-Diff.txt")
+            self._dump = os.path.join(self.output_dir, "DBDump.txt")
+            self._dump_diff = os.path.join(self.output_dir, "DBDump-Diff.txt")
+
+        # Sorting gold before comparing (sort behaves differently in different environments)
+        new_bb = TskDbDiff._get_tmp_file("GoldBlackboardDump", ".txt")
+        new_db = TskDbDiff._get_tmp_file("GoldDBDump", ".txt")
+        if self.gold_bb_dump is not None:
+            srtcmdlst = ["sort", self.gold_bb_dump, "-o", new_bb]
+            subprocess.call(srtcmdlst)
+            srtcmdlst = ["sort", self.gold_dump, "-o", new_db]
+            subprocess.call(srtcmdlst)
+        self.gold_bb_dump = new_bb
+        self.gold_dump = new_db
+
+
+    def _cleanup_diff(self):
+        if self.output_dir is None:
+            #cleanup temp files
+            os.remove(self._dump)
+            os.remove(self._bb_dump)
+            if os.path.isfile(self._dump_diff):
+                os.remove(self._dump_diff)
+            if os.path.isfile(self._bb_dump_diff):
+                os.remove(self._bb_dump_diff)
+
+        if self.gold_bb_dump is None:
+            os.remove(self.gold_bb_dump)
+            os.remove(self.gold_dump)
+
+
+    def _diff(self, output_file, gold_file, diff_path):
+        """Compare two text files.
+
+        Args:
+            output_file: a pathto_File, the latest text file
+            gold_file: a pathto_File, the gold text file
+            diff_path: The file to write the differences to
+        Returns False if different
+        """
+
+        if (not os.path.isfile(output_file)):
+            return False
+
+        if (not os.path.isfile(gold_file)):
+            return False
+
+        # It is faster to read the contents in and directly compare
+        output_data = codecs.open(output_file, "r", "utf_8").read()
+        gold_data = codecs.open(gold_file, "r", "utf_8").read()
+        if (gold_data == output_data):
+            return True
+
+        # If they are different, invoke 'diff'
+        diff_file = codecs.open(diff_path, "wb", "utf_8")
+        # Gold needs to be passed in as 1st arg and output as 2nd
+        dffcmdlst = ["diff", gold_file, output_file]
+        subprocess.call(dffcmdlst, stdout = diff_file)
+
+        # create file path for gold files inside output folder. In case of diff, both gold and current run files
+        # are available in the report output folder. Prefix Gold- is added to the filename.
+        gold_file_in_output_dir = os.path.join(os.path.dirname(output_file), "Gold-" + os.path.basename(output_file))
+        shutil.copy(gold_file, gold_file_in_output_dir)
+
+        return False
+
+
+    @staticmethod
+    def _get_associated_artifact_type(cur, artifact_id, isMultiUser):
+        if isMultiUser:
+            cur.execute(
+                "SELECT tsk_files.parent_path, blackboard_artifact_types.display_name FROM blackboard_artifact_types INNER JOIN blackboard_artifacts ON blackboard_artifact_types.artifact_type_id = blackboard_artifacts.artifact_type_id INNER JOIN tsk_files ON tsk_files.obj_id = blackboard_artifacts.obj_id WHERE artifact_id=%s",
+                [artifact_id])
+        else:
+            cur.execute(
+                "SELECT tsk_files.parent_path, blackboard_artifact_types.display_name FROM blackboard_artifact_types INNER JOIN blackboard_artifacts ON blackboard_artifact_types.artifact_type_id = blackboard_artifacts.artifact_type_id INNER JOIN tsk_files ON tsk_files.obj_id = blackboard_artifacts.obj_id WHERE artifact_id=?",
+                [artifact_id])
+
+        info = cur.fetchone()
+
+        return "File path: " + info[0] + " Artifact Type: " + info[1]
+
+
+    @staticmethod
+    def _dump_output_db_bb(db_file, bb_dump_file, isMultiUser, pgSettings, id_obj_path_table):
+        """Dumps sorted text results to the given output location.
+
+        Smart method that deals with a blackboard comparison to avoid issues
+        with different IDs based on when artifacts were created.
+
+        Args:
+            db_file: a pathto_File, the output database.
+            bb_dump_file: a pathto_File, the sorted dump file to write to
+        """
+
+        unsorted_dump = TskDbDiff._get_tmp_file("dump_data", ".txt")
+        if isMultiUser:
+            conn, unused_db = db_connect(db_file, isMultiUser, pgSettings)
+            artifact_cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
+        else: # Use Sqlite
+            conn = sqlite3.connect(db_file)
+            conn.text_factory = lambda x: x.decode("utf-8", "ignore")
+            conn.row_factory = sqlite3.Row
+            artifact_cursor = conn.cursor()
+        # Get the list of all artifacts (along with type and associated file)
+        # @@@ Could add a SORT by parent_path in here since that is how we are going to later sort it.
+        artifact_cursor.execute("SELECT tsk_files.parent_path, tsk_files.name, blackboard_artifact_types.display_name, blackboard_artifacts.artifact_id FROM blackboard_artifact_types INNER JOIN blackboard_artifacts ON blackboard_artifact_types.artifact_type_id = blackboard_artifacts.artifact_type_id INNER JOIN tsk_files ON tsk_files.obj_id = blackboard_artifacts.obj_id")
+        database_log = codecs.open(unsorted_dump, "wb", "utf_8")
+        row = artifact_cursor.fetchone()
+        appnd = False
+        counter = 0
+        artifact_count = 0
+        artifact_fail = 0
+
+        # Cycle through artifacts
+        try:
+            while (row != None):
+
+                # File Name and artifact type
+                # Remove parent object ID from Unalloc file name
+                normalizedName = re.sub('^Unalloc_[0-9]+_', 'Unalloc_', row["name"])
+                if(row["parent_path"] != None):
+                    database_log.write(row["parent_path"] + normalizedName + ' <artifact type="' + row["display_name"] + '" > ')
+                else:
+                    database_log.write(normalizedName + ' <artifact type="' + row["display_name"] + '" > ')
+
+                if isMultiUser:
+                    attribute_cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
+                else:
+                    attribute_cursor = conn.cursor()
+                looptry = True
+                artifact_count += 1
+                try:
+                    art_id = ""
+                    art_id = str(row["artifact_id"])
+                  
+                    # Get attributes for this artifact
+                    if isMultiUser:
+                        attribute_cursor.execute("SELECT blackboard_attributes.source, blackboard_attributes.attribute_type_id, blackboard_attribute_types.display_name, blackboard_attributes.value_type, blackboard_attributes.value_text, blackboard_attributes.value_int32, blackboard_attributes.value_int64, blackboard_attributes.value_double FROM blackboard_attributes INNER JOIN blackboard_attribute_types ON blackboard_attributes.attribute_type_id = blackboard_attribute_types.attribute_type_id WHERE artifact_id = %s ORDER BY blackboard_attributes.source, blackboard_attribute_types.display_name, blackboard_attributes.value_type, blackboard_attributes.value_text, blackboard_attributes.value_int32, blackboard_attributes.value_int64, blackboard_attributes.value_double", [art_id])
+                    else:
+                        attribute_cursor.execute("SELECT blackboard_attributes.source, blackboard_attributes.attribute_type_id, blackboard_attribute_types.display_name, blackboard_attributes.value_type, blackboard_attributes.value_text, blackboard_attributes.value_int32, blackboard_attributes.value_int64, blackboard_attributes.value_double FROM blackboard_attributes INNER JOIN blackboard_attribute_types ON blackboard_attributes.attribute_type_id = blackboard_attribute_types.attribute_type_id WHERE artifact_id =? ORDER BY blackboard_attributes.source, blackboard_attribute_types.display_name, blackboard_attributes.value_type, blackboard_attributes.value_text, blackboard_attributes.value_int32, blackboard_attributes.value_int64, blackboard_attributes.value_double", [art_id])
+                    
+                    attributes = attribute_cursor.fetchall()
+                
+                    # Print attributes
+                    if (len(attributes) == 0):
+                       # @@@@ This should be </artifact> 
+                       database_log.write(' <artifact/>\n')
+                       row = artifact_cursor.fetchone()
+                       continue
+
+                    src = attributes[0][0]
+                    for attr in attributes:
+                        numvals = 0
+                        for x in range(3, 6):
+                            if(attr[x] != None):
+                                numvals += 1
+                        if(numvals > 1):
+                            msg = "There were too many values for attribute type: " + attr["display_name"] + " for artifact with id #" + str(row["artifact_id"]) + ".\n"
+
+                        if(not attr["source"] == src):
+                            msg = "There were inconsistent sources for artifact with id #" + str(row["artifact_id"]) + ".\n"
+
+                        try:
+                            attr_value_as_string = None
+                            if attr["value_type"] == 0:
+                                attr_value_as_string = str(attr["value_text"])                        
+                            elif attr["value_type"] == 1:
+                                attr_value_as_string = str(attr["value_int32"])                        
+                            elif attr["value_type"] == 2:
+                                attr_value_as_string = str(attr["value_int64"])
+                                if attr["attribute_type_id"]  == 36 and id_obj_path_table != -1 and int(attr_value_as_string) > 0: #normalize positive TSK_PATH_IDs from being object id to a path if the obj_id_path_table was generated
+                                    attr_value_as_string = id_obj_path_table[int(attr_value_as_string)]
+                            elif attr["value_type"] == 3:
+                                attr_value_as_string = "%20.10f" % float((attr["value_double"])) #use exact format from db schema to avoid python auto format double value to (0E-10) scientific style                       
+                            elif attr["value_type"] == 4:
+                                attr_value_as_string = "bytes"                        
+                            elif attr["value_type"] == 5:
+                                attr_value_as_string = str(attr["value_int64"])
+                            elif attr["value_type"] == 6:
+                                attr_value_as_string = str(attr["value_text"])
+                            if attr["display_name"] == "Associated Artifact":
+                                attr_value_as_string = TskDbDiff._get_associated_artifact_type(attribute_cursor, attr_value_as_string, isMultiUser)
+                            patrn = re.compile("[\n\0\a\b\r\f]")
+                            if attr_value_as_string is None:
+                                print(f'Could not determine attribute value for value type: {attr["value_type"]}, display name: {attr["display_name"]}')
+
+                            attr_value_as_string = re.sub(patrn, ' ', attr_value_as_string)
+                            if attr["source"] == "Keyword Search" and attr["display_name"] == "Keyword Preview":
+                                attr_value_as_string = "<Keyword Preview placeholder>"
+                            database_log.write('<attribute source="' + attr["source"] + '" type="' + attr["display_name"] + '" value="' + attr_value_as_string + '" />')
+                        except IOError as e:
+                            print("IO error")
+                            raise TskDbDiffException("Unexpected IO error while writing to database log." + str(e))
+
+                except sqlite3.Error as e:
+                    msg = "Attributes in artifact id (in output DB)# " + str(row["artifact_id"]) + " encountered an error: " + str(e) +" .\n"
+                    print("Attributes in artifact id (in output DB)# ", str(row["artifact_id"]), " encountered an error: ", str(e))
+                    print() 
+                    looptry = False
+                    artifact_fail += 1
+                    database_log.write('Error Extracting Attributes')
+                    database_log.close()
+                    raise TskDbDiffException(msg)
+                finally:
+                    attribute_cursor.close()
+
+               
+                # @@@@ This should be </artifact> 
+                database_log.write(' <artifact/>\n')
+                row = artifact_cursor.fetchone()
+
+            if(artifact_fail > 0):
+                msg ="There were " + str(artifact_count) + " artifacts and " + str(artifact_fail) + " threw an exception while loading.\n"
+        except Exception as e:
+            raise TskDbDiffException("Unexpected error while dumping blackboard database: " + str(e))
+        finally:
+            database_log.close()
+            artifact_cursor.close()
+            conn.close()
+        
+        # Now sort the file
+        srtcmdlst = ["sort", unsorted_dump, "-o", bb_dump_file]
+        subprocess.call(srtcmdlst)
+
+    @staticmethod
+    def _dump_output_db_nonbb(db_file, dump_file, isMultiUser, pgSettings):
+        """Dumps a database to a text file.
+
+        Does not dump the artifact and attributes.
+
+        Args:
+            db_file: a pathto_File, the database file to dump
+            dump_file: a pathto_File, the location to dump the non-blackboard database items
+        """
+
+        conn, backup_db_file = db_connect(db_file, isMultiUser, pgSettings)
+        guid_utils = TskGuidUtils.create(conn)
+
+        if isMultiUser:
+            table_cols = get_pg_table_columns(conn)
+            schema = get_pg_schema(db_file, pgSettings.username, pgSettings.password,
+                                   pgSettings.pgHost, pgSettings.pgPort)
+        else:
+            table_cols = get_sqlite_table_columns(conn)
+            schema = get_sqlite_schema(conn)
+
+        with codecs.open(dump_file, "wb", "utf_8") as output_file:
+            output_file.write(schema + "\n")
+            for table, cols in sorted(table_cols.items(), key=lambda pr: pr[0]):
+                normalizer = TABLE_NORMALIZATIONS[table] if table in TABLE_NORMALIZATIONS else None
+                write_normalized(guid_utils, output_file, conn, table, cols, normalizer)
+
+        # Now sort the file
+        srtcmdlst = ["sort", dump_file, "-o", dump_file]
+        subprocess.call(srtcmdlst)
+
+        conn.close()
+        # cleanup the backup
+        # if backup_db_file:
+        #    os.remove(backup_db_file)
+        return guid_utils.obj_id_guids
+
+    @staticmethod
+    def dump_output_db(db_file, dump_file, bb_dump_file, isMultiUser, pgSettings):
+        """Dumps the given database to text files for later comparison.
+
+        Args:
+            db_file: a pathto_File, the database file to dump
+            dump_file: a pathto_File, the location to dump the non-blackboard database items
+            bb_dump_file: a pathto_File, the location to dump the blackboard database items
+        """
+        id_obj_path_table = TskDbDiff._dump_output_db_nonbb(db_file, dump_file, isMultiUser, pgSettings)
+        TskDbDiff._dump_output_db_bb(db_file, bb_dump_file, isMultiUser, pgSettings, id_obj_path_table)
+
+    @staticmethod
+    def _get_tmp_file(base, ext):
+        time = datetime.datetime.now().time().strftime("%H%M%f")
+        return os.path.join(os.environ['TMP'], base + time + ext)
+
+
+class TskDbDiffException(Exception):
+    pass
+
+class PGSettings(object):
+    def __init__(self, pgHost=None, pgPort=5432, user=None, password=None):
+        self.pgHost = pgHost
+        self.pgPort = pgPort
+        self.username = user
+        self.password = password
+
+    def get_pgHost(self):
+        return self.pgHost
+
+    def get_pgPort(self):
+        return self.pgPort
+
+    def get_username(self):
+        return self.username
+
+    def get_password(self):
+        return self.password
+
+
+class TskGuidUtils:
+    """
+    This class provides guids for potentially volatile data.
+    """
+
+    @staticmethod
+    def _get_guid_dict(db_conn, select_statement, delim="", normalizer: Union[Callable[[str], str], None] = None):
+        """
+        Retrieves a dictionary mapping the first item selected to a concatenation of the remaining values.
+        Args:
+            db_conn: The database connection.
+            select_statement: The select statement.
+            delim: The delimiter for how row data from index 1 to end shall be concatenated.
+            normalizer: Means of normalizing the generated string or None.
+
+        Returns: A dictionary mapping the key (the first item in the select statement) to a concatenation of the remaining values.
+
+        """
+        cursor = db_conn.cursor()
+        cursor.execute(select_statement)
+        ret_dict = {}
+        for row in cursor:
+            # concatenate value rows with delimiter filtering out any null values.
+            value_str = delim.join([str(col) for col in filter(lambda col: col is not None, row[1:])])
+            if normalizer:
+                value_str = normalizer(value_str)
+            ret_dict[row[0]] = value_str
+
+        return ret_dict
+
+    @staticmethod
+    def create(db_conn):
+        """
+        Creates an instance of this class by querying for relevant guid data.
+        Args:
+            db_conn: The database connection.
+
+        Returns: The instance of this class.
+
+        """
+        guid_files = TskGuidUtils._get_guid_dict(db_conn, "SELECT obj_id, parent_path, name FROM tsk_files",
+                                                 normalizer=normalize_file_path)
+        guid_vs_parts = TskGuidUtils._get_guid_dict(db_conn, "SELECT obj_id, addr, start FROM tsk_vs_parts", "_")
+        guid_vs_info = TskGuidUtils._get_guid_dict(db_conn, "SELECT obj_id, vs_type, img_offset FROM tsk_vs_info", "_")
+        guid_fs_info = TskGuidUtils._get_guid_dict(db_conn, "SELECT obj_id, img_offset, fs_type FROM tsk_fs_info", "_")
+        guid_image_names = TskGuidUtils._get_guid_dict(db_conn, "SELECT obj_id, name FROM tsk_image_names "
+                                                                "WHERE sequence=0",
+                                                       normalizer=get_filename)
+        guid_os_accounts = TskGuidUtils._get_guid_dict(db_conn, "SELECT os_account_obj_id, addr FROM tsk_os_accounts")
+        guid_reports = TskGuidUtils._get_guid_dict(db_conn, "SELECT obj_id, path FROM reports",
+                                                   normalizer=normalize_file_path)
+
+        objid_artifacts = TskGuidUtils._get_guid_dict(db_conn,
+                                                      "SELECT blackboard_artifacts.artifact_obj_id, "
+                                                      "blackboard_artifact_types.type_name "
+                                                      "FROM blackboard_artifacts "
+                                                      "INNER JOIN blackboard_artifact_types "
+                                                      "ON blackboard_artifact_types.artifact_type_id = "
+                                                      "blackboard_artifacts.artifact_type_id")
+
+        artifact_objid_artifacts = TskGuidUtils._get_guid_dict(db_conn,
+                                                               "SELECT blackboard_artifacts.artifact_id, "
+                                                               "blackboard_artifact_types.type_name "
+                                                               "FROM blackboard_artifacts "
+                                                               "INNER JOIN blackboard_artifact_types "
+                                                               "ON blackboard_artifact_types.artifact_type_id = "
+                                                               "blackboard_artifacts.artifact_type_id")
+
+        cursor = db_conn.cursor()
+        cursor.execute("SELECT obj_id, par_obj_id FROM tsk_objects")
+        par_obj_objects = dict([(row[0], row[1]) for row in cursor])
+
+        guid_artifacts = {}
+        for k, v in objid_artifacts.items():
+            if k in par_obj_objects:
+                par_obj_id = par_obj_objects[k]
+
+                # check for artifact parent in files, images, reports
+                path = ''
+                for artifact_parent_dict in [guid_files, guid_image_names, guid_reports]:
+                    if par_obj_id in artifact_parent_dict:
+                        path = artifact_parent_dict[par_obj_id]
+                        break
+
+                guid_artifacts[k] = "/".join([path, v])
+
+        return TskGuidUtils(
+            # aggregate all the object id dictionaries together
+            obj_id_guids={**guid_files, **guid_reports, **guid_os_accounts, **guid_vs_parts, **guid_vs_info,
+                          **guid_fs_info, **guid_fs_info, **guid_image_names, **guid_artifacts},
+            artifact_types=artifact_objid_artifacts)
+
+    artifact_types: Dict[int, str]
+    obj_id_guids: Dict[int, any]
+
+    def __init__(self, obj_id_guids: Dict[int, any], artifact_types: Dict[int, str]):
+        """
+        Main constructor.
+        Args:
+            obj_id_guids: A dictionary mapping object ids to their guids.
+            artifact_types: A dictionary mapping artifact ids to their types.
+        """
+        self.artifact_types = artifact_types
+        self.obj_id_guids = obj_id_guids
+
+    def get_guid_for_objid(self, obj_id, omitted_value: Union[str, None] = 'Object ID Omitted'):
+        """
+        Returns the guid for the specified object id or returns omitted value if the object id is not found.
+        Args:
+            obj_id: The object id.
+            omitted_value: The value if no object id mapping is found.
+
+        Returns: The relevant guid or the omitted_value.
+
+        """
+        return self.obj_id_guids[obj_id] if obj_id in self.obj_id_guids else omitted_value
+
+    def get_guid_for_file_objid(self, obj_id, omitted_value: Union[str, None] = 'Object ID Omitted'):
+        # this method is just an alias for get_guid_for_objid
+        return self.get_guid_for_objid(obj_id, omitted_value)
+
+    def get_guid_for_accountid(self, account_id, omitted_value: Union[str, None] = 'Account ID Omitted'):
+        # this method is just an alias for get_guid_for_objid
+        return self.get_guid_for_objid(account_id, omitted_value)
+
+    def get_guid_for_artifactid(self, artifact_id, omitted_value: Union[str, None] = 'Artifact ID Omitted'):
+        """
+        Returns the guid for the specified artifact id or returns omitted value if the artifact id is not found.
+        Args:
+            artifact_id: The artifact id.
+            omitted_value: The value if no object id mapping is found.
+
+        Returns: The relevant guid or the omitted_value.
+        """
+        return self.artifact_types[artifact_id] if artifact_id in self.artifact_types else omitted_value
+
+
+class NormalizeRow:
+    """
+    Given a dictionary representing a row (i.e. column name mapped to value), returns a normalized representation of
+    that row such that the values should be less volatile from run to run.
+    """
+    row_masker: Callable[[TskGuidUtils, Dict[str, any]], Dict[str, any]]
+
+    def __init__(self, row_masker: Callable[[TskGuidUtils, Dict[str, any]], Union[Dict[str, any], None]]):
+        """
+        Main constructor.
+        Args:
+            row_masker: The function to be called to mask the specified row.
+        """
+        self.row_masker = row_masker
+
+    def normalize(self, guid_util: TskGuidUtils, row: Dict[str, any]) -> Union[Dict[str, any], None]:
+        """
+        Normalizes a row such that the values should be less volatile from run to run.
+        Args:
+            guid_util: The TskGuidUtils instance providing guids for volatile ids.
+            row: The row values mapping column name to value.
+
+        Returns: The normalized row or None if the row should be ignored.
+
+        """
+        return self.row_masker(guid_util, row)
+
+
+class NormalizeColumns(NormalizeRow):
+    """
+    Utility for normalizing specific column values of a row so they are not volatile values that will change from run
+    to run.
+    """
+
+    @classmethod
+    def _normalize_col_vals(cls,
+                            col_mask: Dict[str, Union[any, Callable[[TskGuidUtils, any], any]]],
+                            guid_util: TskGuidUtils,
+                            row: Dict[str, any]):
+        """
+        Normalizes column values for each column rule provided.
+        Args:
+            col_mask: A dictionary mapping columns to either the replacement value or a function to retrieve the
+            replacement value given the TskGuidUtils instance and original value as arguments.
+            guid_util: The TskGuidUtil used to provide guids for volatile values.
+            row: The dictionary representing the row mapping column names to values.
+
+        Returns: The new row representation.
+
+        """
+        row_copy = row.copy()
+        for key, val in col_mask.items():
+            # only replace values if present in row
+            if key in row_copy:
+                # if a column replacing function, call with original value
+                if isinstance(val, Callable):
+                    row_copy[key] = val(guid_util, row[key])
+                # otherwise, just replace with mask value
+                else:
+                    row_copy[key] = val
+
+        return row_copy
+
+    def __init__(self, col_mask: Dict[str, Union[any, Callable[[any], any]]]):
+        super().__init__(lambda guid_util, row: NormalizeColumns._normalize_col_vals(col_mask, guid_util, row))
+
+
+def get_path_segs(path: Union[str, None]) -> Union[List[str], None]:
+    """
+    Breaks a path string into its folders and filenames.
+    Args:
+        path: The path string or None.
+
+    Returns: The path segments or None.
+
+    """
+    if path:
+        # split on backslash or forward slash
+        return list(filter(lambda x: len(x.strip()) > 0, [s for s in re.split(r"[\\/]", path)]))
+    else:
+        return None
+
+
+def get_filename(path: Union[str, None]) -> Union[str, None]:
+    """
+    Returns the last segment of a file path.
+    Args:
+        path: The path.
+
+    Returns: The last segment of the path
+
+    """
+    path_segs = get_path_segs(path)
+    if path_segs is not None and len(path_segs) > 0:
+        return path_segs[-1]
+    else:
+        return None
+
+
+def index_of(lst, search_item) -> int:
+    """
+    Returns the index of the item in the list or -1.
+    Args:
+        lst: The list.
+        search_item: The item to search for.
+
+    Returns: The index in the list of the item or -1.
+
+    """
+    if lst is None:
+        return -1
+
+    for idx, item in enumerate(lst):
+        if item == search_item:
+            return idx
+
+    return -1
+
+
+def get_sql_insert_value(val) -> str:
+    """
+    Returns the value that would appear in a sql insert statement (i.e. string becomes 'string', None becomes NULL)
+    Args:
+        val: The original value.
+
+    Returns: The sql insert equivalent value.
+
+    """
+    if val is None:
+        return "NULL"
+
+    if isinstance(val, str):
+        escaped_val = val.replace('\n', '\\n').replace("'", "''")
+        return f"'{escaped_val}'"
+
+    return str(val)
+
+
+def get_sqlite_table_columns(conn) -> Dict[str, List[str]]:
+    """
+    Retrieves a dictionary mapping table names to a list of all the columns for that table
+    where the columns are in ordinal value.
+    Args:
+        conn: The database connection.
+
+    Returns: A dictionary of the form { table_name: [col_name1, col_name2...col_nameN] }
+
+    """
+    cur = conn.cursor()
+    cur.execute("SELECT name FROM sqlite_master tables WHERE tables.type='table'")
+    tables = list([table[0] for table in cur.fetchall()])
+    cur.close()
+
+    to_ret = {}
+    for table in tables:
+        cur = conn.cursor()
+        cur.execute('SELECT name FROM pragma_table_info(?) ORDER BY cid', [table])
+        to_ret[table] = list([col[0] for col in cur.fetchall()])
+
+    return to_ret
+
+
+def get_pg_table_columns(conn) -> Dict[str, List[str]]:
+    """
+    Returns a dictionary mapping table names to the list of their columns in ordinal order.
+    Args:
+        conn: The pg database connection.
+
+    Returns: The dictionary of tables mapped to a list of their ordinal-orderd column names.
+    """
+    cursor = conn.cursor()
+    cursor.execute("""
+    SELECT cols.table_name, cols.column_name
+      FROM information_schema.columns cols
+      WHERE cols.column_name IS NOT NULL
+      AND cols.table_name IS NOT NULL
+      AND cols.table_name IN (
+        SELECT tables.tablename FROM pg_catalog.pg_tables tables
+        WHERE LOWER(schemaname) = 'public'
+      )
+    ORDER by cols.table_name, cols.ordinal_position;
+    """)
+    mapping = {}
+    for row in cursor:
+        mapping.setdefault(row[0], []).append(row[1])
+
+    cursor.close()
+    return mapping
+
+
+def sanitize_schema(original: str) -> str:
+    """
+    Sanitizes sql script representing table/index creations.
+    Args:
+        original: The original sql schema creation script.
+
+    Returns: The sanitized schema.
+    """
+    sanitized_lines = []
+    dump_line = ''
+    for line in original.splitlines():
+        line = line.strip('\r\n ')
+        lower_line = line.lower()
+        # It's comment or alter statement or catalog entry or set idle entry or empty line
+        if (not line or
+                line.startswith('--') or
+                lower_line.startswith('set') or
+                " set default nextval" in lower_line or
+                " owner to " in lower_line or
+                " owned by " in lower_line or
+                "pg_catalog" in lower_line or
+                "idle_in_transaction_session_timeout" in lower_line):
+            continue
+
+        # if there is no white space or parenthesis delimiter, add a space
+        if re.match(r'^.+?[^\s()]$', dump_line) and re.match(r'^[^\s()]', line):
+            dump_line += ' '
+
+        # append the line to the outputted line
+        dump_line += line
+
+        # if line ends with ';' then this will be one statement in diff
+        if line.endswith(';'):
+            sanitized_lines.append(dump_line)
+            dump_line = ''
+
+    if len(dump_line.strip()) > 0:
+        sanitized_lines.append(dump_line)
+
+    return "\n".join(sanitized_lines)
+
+
+def get_pg_schema(dbname: str, pg_username: str, pg_pword: str, pg_host: str, pg_port: Union[str, int]):
+    """
+    Gets the schema to be added to the dump text from the postgres database.
+    Args:
+        dbname: The name of the database.
+        pg_username: The postgres user name.
+        pg_pword: The postgres password.
+        pg_host: The postgres host.
+        pg_port: The postgres port.
+
+    Returns: The normalized schema.
+
+    """
+    os.environ['PGPASSWORD'] = pg_pword
+    pg_dump = ["pg_dump", "-U", pg_username, "-h", pg_host, "-p", str(pg_port),
+               "--schema-only", "-d", dbname, "-t", "public.*"]
+    output = subprocess.check_output(pg_dump)
+    output_str = output.decode('UTF-8')
+    return sanitize_schema(output_str)
+
+
+def get_sqlite_schema(db_conn):
+    """
+    Gets the schema to be added to the dump text from the sqlite database.
+    Args:
+        db_conn: The database connection.
+
+    Returns: The normalized schema.
+
+    """
+    cursor = db_conn.cursor()
+    query = "SELECT sql FROM sqlite_master " \
+            "WHERE type IN ('table', 'index') AND sql IS NOT NULL " \
+            "ORDER BY type DESC, tbl_name ASC"
+
+    cursor.execute(query)
+    schema = '\n'.join([str(row[0]) + ';' for row in cursor])
+    return sanitize_schema(schema)
+
+
+def _mask_event_desc(desc: str) -> str:
+    """
+    Masks dynamic event descriptions of the form "<artifact_type_name>:<artifact id>" so the artifact id is no longer
+    present.
+    Args:
+        desc: The original description.
+
+    Returns: The normalized description.
+
+    """
+
+    # Takes a string like "Shell Bags: 30840" and replaces with "ShellBags:<artifact_id>"
+    match = re.search(r"^\s*(.+?)\s*:\s*\d+\s*$", desc.strip())
+    if match:
+        return f"{match.group(1)}:<artifact_id>"
+
+    return desc
+
+
+def normalize_tsk_event_descriptions(guid_util: TskGuidUtils, row: Dict[str, any]) -> Dict[str, any]:
+    """
+    Normalizes event description rows masking possibly changing column values.
+    Args:
+        guid_util: Provides guids for ids that may change from run to run.
+        row: A dictionary mapping column names to values.
+
+    Returns: The normalized event description row.
+    """
+    row_copy = row.copy()
+    # replace object ids with information that is deterministic
+    row_copy['event_description_id'] = MASKED_ID
+    row_copy['content_obj_id'] = guid_util.get_guid_for_file_objid(row['content_obj_id'])
+    row_copy['artifact_id'] = guid_util.get_guid_for_artifactid(row['artifact_id']) \
+        if row['artifact_id'] is not None else None
+    row_copy['data_source_obj_id'] = guid_util.get_guid_for_file_objid(row['data_source_obj_id'])
+
+    if row['full_description'] == row['med_description'] == row['short_description']:
+        row_copy['full_description'] = _mask_event_desc(row['full_description'])
+        row_copy['med_description'] = _mask_event_desc(row['med_description'])
+        row_copy['short_description'] = _mask_event_desc(row['short_description'])
+
+    return row_copy
+
+
+def normalize_ingest_jobs(guid_util: TskGuidUtils, row: Dict[str, any]) -> Dict[str, any]:
+    """
+    Normalizes ingest jobs table rows.
+    Args:
+        guid_util: Provides guids for ids that may change from run to run.
+        row: A dictionary mapping column names to values.
+
+    Returns: The normalized ingest job row.
+
+    """
+    row_copy = row.copy()
+    row_copy['host_name'] = "{host_name}"
+
+    start_time = row['start_date_time']
+    end_time = row['end_date_time']
+    if start_time <= end_time:
+        row_copy['start_date_time'] = MASKED_TIME
+        row_copy['end_date_time'] = MASKED_TIME
+
+    return row_copy
+
+
+def normalize_unalloc_files(path_str: Union[str, None]) -> Union[str, None]:
+    """
+    Normalizes a path string removing timestamps from unalloc files.
+    Args:
+        path_str: The original path string.
+
+    Returns: The path string where timestamps are removed from unalloc strings.
+
+    """
+    # takes a file name like "Unalloc_30580_7466496_2980941312" and removes the object id to become
+    # "Unalloc_7466496_2980941312"
+    return None if path_str is None else re.sub('Unalloc_[0-9]+_', 'Unalloc_', path_str)
+
+
+def normalize_regripper_files(path_str: Union[str, None]) -> Union[str, None]:
+    """
+    Normalizes a path string removing timestamps from regripper files.
+    Args:
+        path_str: The original path string.
+
+    Returns: The path string where timestamps are removed from regripper paths.
+
+    """
+    # takes a file name like "regripper-12345-full" and removes the id to become "regripper-full"
+    return None if path_str is None else re.sub(r'regripper-[0-9]+-full', 'regripper-full', path_str)
+
+
+def normalize_file_path(path_str: Union[str, None]) -> Union[str, None]:
+    """
+    Normalizes file paths removing or replacing pieces that will change from run to run (i.e. object id)
+    Args:
+        path_str: The original path string.
+
+    Returns: The normalized path string
+    """
+    return normalize_unalloc_files(normalize_regripper_files(path_str))
+
+
+def normalize_tsk_files(guid_util: TskGuidUtils, row: Dict[str, any]) -> Dict[str, any]:
+    """
+    Normalizes files table rows.
+    Args:
+        guid_util: Provides guids for ids that may change from run to run.
+        row: A dictionary mapping column names to values.
+
+    Returns: The normalized files table row.
+
+    """
+    # Ignore TIFF size and hash if extracted from PDFs.
+    # See JIRA-6951 for more details.
+    row_copy = row.copy()
+    if row['extension'] is not None and row['extension'].strip().lower() == 'tif' and \
+            row['parent_path'] is not None and row['parent_path'].strip().lower().endswith('.pdf/'):
+        row_copy['size'] = "SIZE_IGNORED"
+        row_copy['md5'] = "MD5_IGNORED"
+        row_copy['sha256'] = "SHA256_IGNORED"
+
+    row_copy['data_source_obj_id'] = guid_util.get_guid_for_file_objid(row['data_source_obj_id'])
+    row_copy['obj_id'] = MASKED_OBJ_ID
+    row_copy['os_account_obj_id'] = 'MASKED_OS_ACCOUNT_OBJ_ID'
+    row_copy['parent_path'] = normalize_file_path(row['parent_path'])
+    row_copy['name'] = normalize_file_path(row['name'])
+    return row_copy
+
+
+def normalize_tsk_files_path(guid_util: TskGuidUtils, row: Dict[str, any]) -> Dict[str, any]:
+    """
+    Normalizes file path table rows.
+    Args:
+        guid_util: Provides guids for ids that may change from run to run.
+        row: A dictionary mapping column names to values.
+
+    Returns: The normalized file path table row.
+    """
+    row_copy = row.copy()
+    path = row['path']
+    if path is not None:
+        path_parts = get_path_segs(path)
+        module_output_idx = index_of(path_parts, 'ModuleOutput')
+        if module_output_idx >= 0:
+            # remove everything up to and including ModuleOutput if ModuleOutput present
+            path_parts = path_parts[module_output_idx:]
+            if len(path_parts) > 2 and path_parts[1] == 'EFE':
+                # for embedded file extractor, the next folder is the object id and should be omitted
+                del path_parts[2]
+
+        row_copy['path'] = os.path.join(*path_parts) if len(path_parts) > 0 else '/'
+
+    row_copy['obj_id'] = guid_util.get_guid_for_file_objid(row['obj_id'])
+    return row_copy
+
+
+def normalize_tsk_objects_path(guid_util: TskGuidUtils, objid: int,
+                               no_path_placeholder: Union[str, None]) -> Union[str, None]:
+    """
+    Returns a normalized path to be used in a tsk_objects table row.
+    Args:
+        guid_util: The utility for fetching guids.
+        objid: The object id of the item.
+        no_path_placeholder: text to return if no path value found.
+
+    Returns: The 'no_path_placeholder' text if no path.  Otherwise, the normalized path.
+
+    """
+    path = guid_util.get_guid_for_objid(objid, omitted_value=None)
+
+    if path is None:
+        return no_path_placeholder
+    else:
+        # remove host name (for multi-user) and dates/times from path for reports
+        path_parts = get_path_segs(path)
+        module_output_idx = index_of(path_parts, 'ModuleOutput')
+        if module_output_idx >= 0:
+            # remove everything up to and including ModuleOutput if ModuleOutput present
+            path_parts = path_parts[module_output_idx:]
+
+            if "BulkExtractor" in path_parts or "Smirk" in path_parts:
+                # chop off the last folder (which contains a date/time)
+                path_parts = path_parts[:-1]
+
+        if path_parts and len(path_parts) >= 2:
+            is_leapp = False
+            for leapp_module in ['aleapp', 'ileapp']:
+                if len(path_parts) > 0 and path_parts[1].lower() == leapp_module and \
+                        path_parts[-1].lower() == 'index.html':
+                    path_parts = ['ModuleOutput', leapp_module, 'index.html']
+                    is_leapp = True
+                    break
+            if not is_leapp:
+                for idx in range(0, len(path_parts) - 1):
+                    if path_parts[idx].lower() == "reports" and \
+                            path_parts[idx + 1].lower().startswith("autopsytestcase html report"):
+                        path_parts = ["Reports", "AutopsyTestCase HTML Report"]
+                        break
+                    if path_parts[idx].lower() == "reports" and \
+                            "html report" in path_parts[idx + 1].lower() and \
+                            len(path_parts) > idx + 2 and \
+                            path_parts[idx + 2].lower().endswith("report.html"):
+                        path_parts = ["Reports", "html-report.html"]
+                        break
+
+
+        path = os.path.join(*path_parts) if (path_parts is not None and len(path_parts) > 0) else '/'
+
+        return path
+
+
+def normalize_tsk_objects(guid_util: TskGuidUtils, row: Dict[str, any]) -> Dict[str, any]:
+    """
+    Normalizes object table rows.
+    Args:
+        guid_util: Provides guids for ids that may change from run to run.
+        row: A dictionary mapping column names to values.
+
+    Returns: The normalized object table row.
+    """
+    row_copy = row.copy()
+    row_copy['obj_id'] = None if row['obj_id'] is None else \
+        normalize_tsk_objects_path(guid_util, row['obj_id'], MASKED_OBJ_ID)
+
+    row_copy['par_obj_id'] = None if row['par_obj_id'] is None else \
+        normalize_tsk_objects_path(guid_util, row['par_obj_id'], 'MASKED_PARENT_OBJ_ID')
+
+    return row_copy
+
+
+MASKED_TIME = "MASKED_TIME"
+MASKED_OBJ_ID = "MASKED_OBJ_ID"
+MASKED_ID = "MASKED_ID"
+
+IGNORE_TABLE = "IGNORE_TABLE"
+
+TableNormalization = Union[IGNORE_TABLE, NormalizeRow]
+
+"""
+This dictionary maps tables where data should be specially handled to how they should be handled.
+"""
+TABLE_NORMALIZATIONS: Dict[str, TableNormalization] = {
+    "blackboard_artifacts": IGNORE_TABLE,
+    "blackboard_attributes": IGNORE_TABLE,
+    "data_source_info": NormalizeColumns({
+        "device_id": "{device id}",
+        "added_date_time": "{dateTime}"
+    }),
+    "image_gallery_groups": NormalizeColumns({
+        "group_id": MASKED_ID,
+        "data_source_obj_id": lambda guid_util, col: guid_util.get_guid_for_objid(col, omitted_value=None),
+    }),
+    "image_gallery_groups_seen": IGNORE_TABLE,
+    "ingest_jobs": NormalizeRow(normalize_ingest_jobs),
+    "reports": NormalizeColumns({
+        "obj_id": MASKED_OBJ_ID,
+        "path": "AutopsyTestCase",
+        "crtime": MASKED_TIME
+    }),
+    "tsk_aggregate_score": NormalizeColumns({
+       "obj_id": lambda guid_util, col: guid_util.get_guid_for_objid(col, omitted_value="Object ID Omitted"),
+       "data_source_obj_id": lambda guid_util, col: guid_util.get_guid_for_objid(col, omitted_value="Data Source Object ID Omitted"),
+    }),
+    "tsk_analysis_results": NormalizeColumns({
+        "artifact_obj_id":
+            lambda guid_util, col: guid_util.get_guid_for_objid(col, omitted_value="Artifact Object ID Omitted"),
+    }),
+    "tsk_data_artifacts": NormalizeColumns({
+        "artifact_obj_id":
+            lambda guid_util, col: guid_util.get_guid_for_file_objid(col, omitted_value="Artifact Object ID Omitted"),
+        "os_account_obj_id":
+            lambda guid_util, col: guid_util.get_guid_for_file_objid(col, omitted_value="Account Object ID Omitted"),
+    }),
+    "tsk_event_descriptions": NormalizeRow(normalize_tsk_event_descriptions),
+    "tsk_events": NormalizeColumns({
+        "event_id": "MASKED_EVENT_ID",
+        "event_description_id": 'ID OMITTED'
+    }),
+    "tsk_examiners": NormalizeColumns({
+        "login_name": "{examiner_name}"
+    }),
+    "tsk_files": NormalizeRow(normalize_tsk_files),
+    "tsk_file_layout": NormalizeColumns({
+        "obj_id": lambda guid_util, col: guid_util.get_guid_for_file_objid(col)
+    }),
+    "tsk_files_path": NormalizeRow(normalize_tsk_files_path),
+    "tsk_image_names": NormalizeColumns({
+       "name": lambda guid_util, col: get_filename(col)
+    }),
+    "tsk_objects": NormalizeRow(normalize_tsk_objects),
+    "tsk_os_account_attributes": NormalizeColumns({
+        "id": MASKED_ID,
+        "os_account_obj_id": lambda guid_util, col: guid_util.get_guid_for_accountid(col),
+        "source_obj_id": lambda guid_util, col: guid_util.get_guid_for_objid(col)
+    }),
+    "tsk_os_account_instances": NormalizeColumns({
+        "id": MASKED_ID,
+        "os_account_obj_id": lambda guid_util, col: guid_util.get_guid_for_accountid(col)
+    }),
+    "tsk_os_accounts": NormalizeColumns({
+        "os_account_obj_id": MASKED_OBJ_ID
+    }),
+    "tsk_vs_parts": NormalizeColumns({
+        "obj_id": MASKED_OBJ_ID
+    })
+}
+
+
+def write_normalized(guid_utils: TskGuidUtils, output_file, db_conn, table: str, column_names: List[str],
+                     normalizer: Union[TableNormalization, None] = None):
+    """
+    Outputs rows of a file as their normalized values (where values should not change from run to run).
+    Args:
+        guid_utils: Provides guids to replace values that would potentially change from run to run.
+        output_file: The file where the normalized dump will be written.
+        db_conn: The database connection.
+        table: The name of the table.
+        column_names: The name of the columns in the table in ordinal order.
+        normalizer: The normalizer (if any) to use so that data is properly normalized.
+    """
+    if normalizer == IGNORE_TABLE:
+        return
+
+    cursor = db_conn.cursor()
+
+    joined_columns = ",".join([col for col in column_names])
+    cursor.execute(f"SELECT {joined_columns} FROM {table}")
+    for row in cursor:
+        if len(row) != len(column_names):
+            print(
+                f"ERROR: in {table}, number of columns retrieved: {len(row)} but columns are"
+                f" {len(column_names)} with {str(column_names)}")
+            continue
+
+        row_dict = {}
+        for col_idx in range(0, len(column_names)):
+            row_dict[column_names[col_idx]] = row[col_idx]
+
+        if normalizer and isinstance(normalizer, NormalizeRow):
+            row_masker: NormalizeRow = normalizer
+            row_dict = row_masker.normalize(guid_utils, row_dict)
+
+        if row_dict is not None:
+            # show row as json-like value
+            entries = []
+            for column in column_names:
+                dict_value = row_dict[column] if column in row_dict and row_dict[column] is not None else None
+                value = get_sql_insert_value(dict_value)
+                if value is not None:
+                    entries.append((column, value))
+            insert_values = ", ".join([f"{pr[0]}: {pr[1]}" for pr in entries])
+            insert_statement = f"{table}: {{{insert_values}}}\n"
+            output_file.write(insert_statement)
+
+
+def db_connect(db_file, is_multi_user, pg_settings=None):
+    if is_multi_user:  # use PostgreSQL
+        try:
+            return psycopg2.connect("dbname=" + db_file + " user=" + pg_settings.username + " host=" +
+                                    pg_settings.pgHost + " password=" + pg_settings.password), None
+        except:
+            print("Failed to connect to the database: " + db_file)
+    else:  # Sqlite
+        # Make a copy that we can modify
+        backup_db_file = TskDbDiff._get_tmp_file("tsk_backup_db", ".db")
+        shutil.copy(db_file, backup_db_file)
+        # We sometimes get situations with messed up permissions
+        os.chmod(backup_db_file, 0o777)
+        return sqlite3.connect(backup_db_file), backup_db_file
+
+
+def main():
+    try:
+        sys.argv.pop(0)
+        output_db = sys.argv.pop(0)
+        gold_db = sys.argv.pop(0)
+    except:
+        print("usage: tskdbdiff [OUTPUT DB PATH] [GOLD DB PATH]")
+        sys.exit(1)
+
+    db_diff = TskDbDiff(output_db, gold_db, output_dir=".")
+    dump_passed, bb_dump_passed = db_diff.run_diff()
+
+    if dump_passed and bb_dump_passed:
+        print("Database comparison passed.")
+        sys.exit(0)
+    if not dump_passed:
+        print("Non blackboard database comparison failed.")
+    if not bb_dump_passed:
+        print("Blackboard database comparison failed.")
+
+    sys.exit(2)
+
+
+if __name__ == "__main__":
+    if sys.hexversion < 0x03000000:
+        print("Python 3 required")
+        sys.exit(1)
+
+    main()
diff --git a/debian/changelog b/debian/changelog
index d58b9047b8e9944e308a07e50856ca5b570c3f4f..37552b6276e410f3b824c25a9f41671c7f39e295 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,4 +1,4 @@
-sleuthkit-java (4.11.0-1) unstable; urgency=medium
+sleuthkit-java (4.12.1-1) unstable; urgency=medium
 
   * Initial release (Closes: #nnnn)  <nnnn is the bug number of your ITP>
 
diff --git a/debian/sleuthkit-java.install b/debian/sleuthkit-java.install
index 1a26f0ab860346c33c0d5501e5453b5f1b9649c0..f8e918ee58204c14ef434f1f7511fa0e70453169 100644
--- a/debian/sleuthkit-java.install
+++ b/debian/sleuthkit-java.install
@@ -1,4 +1,4 @@
-bindings/java/lib/sqlite-jdbc-3.25.2.jar /usr/share/java
-bindings/java/dist/sleuthkit-4.11.0.jar /usr/share/java
-case-uco/java/dist/sleuthkit-caseuco-4.11.0.jar /usr/share/java
+bindings/java/lib/sqlite-jdbc-3.42.0.0.jar /usr/share/java
+bindings/java/dist/sleuthkit-4.12.1.jar /usr/share/java
+case-uco/java/dist/sleuthkit-caseuco-4.12.1.jar /usr/share/java
 
diff --git a/m4/ax_pkg_check_modules.m4 b/m4/ax_pkg_check_modules.m4
new file mode 100644
index 0000000000000000000000000000000000000000..f3af0f684d1322f312798cb36ca7b309f8b036ab
--- /dev/null
+++ b/m4/ax_pkg_check_modules.m4
@@ -0,0 +1,69 @@
+# ===========================================================================
+#   http://www.gnu.org/software/autoconf-archive/ax_pkg_check_modules.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+#   AX_PKG_CHECK_MODULES(PREFIX, PUBLIC-MODULES, PRIVATE-MODULES, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND], [PUBLIC-VARIABLE], [PRIVATE-VARIABLE])
+#
+# DESCRIPTION
+#
+#   A wrapper around PKG_CHECK_MODULES which splits the list of modules into
+#   public and private dependencies, and produces two variables listing the
+#   dependencies across all invocations of AX_PKG_CHECK_MODULES. These two
+#   variables are exposed via AC_SUBST, and should be used in a pkg-config
+#   file as the substituted values for Requires and Requires.private.
+#
+#   The PREFIX, PUBLIC-MODULES and PRIVATE-MODULES arguments should be
+#   specified as for PKG_CHECK_MODULES, with the concatenation of
+#   PUBLIC-MODULES and PRIVATE-MODULES equaling the LIST-OF-MODULES from
+#   PKG_CHECK_MODULES.  The ACTION-IF-FOUND and ACTION-IF-NOT-FOUND
+#   arguments are optional, and should also be specified as for
+#   PKG_CHECK_MODULES.  ACTION-IF-FOUND is evaluated if the full
+#   LIST-OF-MODULES is found; ACTION-IF-NOT-FOUND similarly.
+#
+#   PUBLIC-VARIABLE defaults to AX_PACKAGE_REQUIRES, and PRIVATE-VARIABLE
+#   defaults to AX_PACKAGE_REQUIRES_PRIVATE.  Both variables are AC_SUBST-ed
+#   by this macro.
+#
+#   For example:
+#
+#     AX_PKG_CHECK_MODULES([GLIB],[glib-2.0 gio-2.0],[gthread-2.0])
+#     AX_PKG_CHECK_MODULES([DBUS],[],[dbus-glib-1 >= 0.98 dbus-1])
+#
+#   results in the substitutions:
+#
+#     AX_PACKAGE_REQUIRES="glib-2.0 gio-2.0"
+#     AX_PACKAGE_REQUIRES_PRIVATE="gthread-2.0 dbus-glib-1 >= 0.98 dbus-1"
+#
+#   and can be used with a template pkg-config file (.pc.in) using:
+#
+#     Requires: @AX_PACKAGE_REQUIRES@
+#     Requires.private: @AX_PACKAGE_REQUIRES_PRIVATE@
+#
+# LICENSE
+#
+#   Copyright (c) 2014 Philip Withnall <philip@tecnocode.co.uk>
+#
+#   Copying and distribution of this file, with or without modification, are
+#   permitted in any medium without royalty provided the copyright notice
+#   and this notice are preserved.  This file is offered as-is, without any
+#   warranty.
+
+#serial 2
+
+AC_DEFUN([AX_PKG_CHECK_MODULES],[
+    m4_define([ax_package_requires],
+              [m4_default_quoted([$6],[AX_PACKAGE_REQUIRES])])
+    m4_define([ax_package_requires_private],
+              [m4_default_quoted([$7],[AX_PACKAGE_REQUIRES_PRIVATE])])
+
+    ax_package_requires="$[]ax_package_requires $2"
+    ax_package_requires_private="$[]ax_package_requires_private $3"
+
+    PKG_CHECK_MODULES([$1],[$2 $3],[$4],[$5])
+
+    # Substitute output.
+    AC_SUBST(ax_package_requires)
+    AC_SUBST(ax_package_requires_private)
+])dnl AX_PKG_CHECK_MODULES
diff --git a/m4/tsk_opt_dep_check.m4 b/m4/tsk_opt_dep_check.m4
new file mode 100644
index 0000000000000000000000000000000000000000..2e5333bfb56dcdc15cbd57a7c3af80ad1eb90adc
--- /dev/null
+++ b/m4/tsk_opt_dep_check.m4
@@ -0,0 +1,140 @@
+#
+# Check if pkg-config is installed and set up variables used for producing
+# the tsk.pc.
+#
+# This MUST be run before any of the other macros in this file.
+#
+AC_DEFUN([TSK_CHECK_PROG_PKGCONFIG], [
+  AC_CHECK_PROG([PKGCONFIG], [pkg-config], [yes], [no])
+  AS_IF([test "x$ac_cv_prog_PKGCONFIG" = "xyes"], [
+    m4_ifdef([PKG_PROG_PKG_CONFIG], [PKG_PROG_PKG_CONFIG], [])
+    dnl Ask for static libs during static linking
+    AS_IF([test "x$enable_shared" != "xyes"], [PKG_CONFIG="$PKG_CONFIG --static"])
+  ])
+
+  PACKAGE_LIBS_PRIVATE=
+  AC_SUBST([PACKAGE_LIBS_PRIVATE])
+])
+
+#
+# Call AX_PKG_CHECK_MODULES only if PKG_CHECK_MODULES is defined, i.e.,
+# only if we have the pkg-config macros; otherwise make it a no-op
+#
+AC_DEFUN([TSK_PKG_CHECK_MODULES], [
+  m4_ifdef([PKG_CHECK_MODULES],
+           [AX_PKG_CHECK_MODULES([$1], [$2], [$3], [$4], [$5], [$6], [$7])])
+])
+
+#
+# Check for optional dependencies.
+#
+# TSK_OPT_DEP_CHECK(DISPLAY_NAME, PKG_VAR, PKG_MODULE, HEADER_LIST, CHECK_LIB_NAME, CHECK_LIB_FUNC)
+#
+# DISPLAY_NAME is the name of the library shown by 'configure --help'
+#
+# PKG_VAR is the prefix used for variables associated with the particular
+# dependency. Each dependency may have its own CPPFLAGS, CFLAGS, CXXFLAGS,
+# and LIBS variables. E.g., "FOO" would have FOO_CPPFLAGS, FOO_CFLAGS, etc.
+#
+# PKG_MODULE is the name of the library to be checked by pkg-config.
+#
+# HEADER_LIST is a list of header files to be checked by AC_CHECK_HEADERS.
+#
+# CHECK_LIB_NAME is the name of the library to be checked by AC_CHECK_LIB.
+#
+# CHECK_LIB FUNC is the name of the function to be checked by AC_CHECK_LIB.
+#
+# If the library is found, ax_DISPLAY_NAME will be set to 'yes'; otherwise
+# to 'no'.
+#
+AC_DEFUN([TSK_OPT_DEP_CHECK], [
+  dnl Check if we should link lib
+  AC_ARG_WITH(
+    [$1],
+    [AS_HELP_STRING([--without-$1],[Do not use $1 even if it is installed])]
+    [AS_HELP_STRING([--with-$1=dir],[Specify that $1 is installed in directory 'dir'])],
+    dnl If --with-lib or --without-lib is given
+    [],
+    dnl if nothing was specified, default to a test
+    [with_$1=yes]
+  )
+
+  dnl check for lib if they did not specify no
+  ax_$1=no
+  AS_IF(
+    [test "x[$]with_$1" != "xno"],
+    [
+      dnl Save flags so we can reset them if the library isn't found
+      SAVED_CPPFLAGS="$CPPFLAGS"
+      SAVED_CFLAGS="$CFLAGS"
+      SAVED_CXXFLAGS="$CXXFLAGS"
+      SAVED_LDFLAGS="$LDFLAGS"
+      SAVED_LIBS="$LIBS"
+
+      AS_IF([test "x[$]with_$1" = "xyes"],
+        [
+          dnl Check for lib using pkg-config, if we have it
+          m4_ifval([$2], [AS_IF([test "x$ac_cv_prog_PKGCONFIG" = "xyes"],
+            [
+              SAVED_AX_PACKAGE_REQUIRES="$AX_PACKAGE_REQUIRES"
+              SAVED_AX_PACKAGE_REQUIRES_PRIVATE="$AX_PACKAGE_REQUIRES_PRIVATE"
+              TSK_PKG_CHECK_MODULES([$2], [], [$3],
+              [
+                CPPFLAGS="$CPPFLAGS [$]$2[]_CFLAGS"
+                CFLAGS="$CFLAGS [$]$2[]_CFLAGS"
+                CXXFLAGS="$CXXFLAGS [$]$2[]_CFLAGS"
+                LIBS="$LIBS [$]$2[]_LIBS"
+                ax_$1=yes
+              ],
+              [
+                AX_PACKAGE_REQUIRES="$SAVED_AX_PACKAGE_REQUIRES"
+                AX_PACKAGE_REQUIRES_PRIVATE="$SAVED_AX_PACKAGE_REQUIRES_PRIVATE"
+                ax_$1=no
+              ]
+            )]
+          )])
+        ],
+        [
+          dnl A directory was given; check that it exists
+          AS_IF([test -d "[$]with_$1/include"],
+            [
+              CPPFLAGS="$CPPFLAGS -I[$]with_$1/include"
+              LDFLAGS="$LDFLAGS -L[$]with_$1/lib"
+            ],
+            [AC_MSG_FAILURE([$1 directory not found at [$]with_$1])]
+          )
+        ]
+      )
+
+      dnl Check if the library is usable
+      AC_CHECK_HEADERS([$4], [AC_CHECK_LIB([$5], [$6])])
+      AS_IF([test "x[$]ac_cv_lib_$5[]_$6" = "xyes"],
+        [
+          dnl Library found and usable
+          AS_IF([test "x[$]ax_$1" = "xyes"],
+            [
+              dnl Library found with pkg-config; reset CPPFLAGS so as not
+              dnl to duplicate flags pkg-config puts into CFLAGS
+              CPPFLAGS="$SAVED_CPPFLAGS"
+            ],
+            [
+              ax_$1=yes
+              dnl Library found without pkg-config; ensure that it is added
+              dnl to Libs.private in tsk.pc
+              PACKAGE_LIBS_PRIVATE="$PACKAGE_LIBS_PRIVATE -l$5"
+            ]
+          )
+        ],
+        [
+          dnl Library not found or unusable; reset flags
+          CPPFLAGS="$SAVED_CPPFLAGS"
+          CFLAGS="$SAVED_CFLAGS"
+          CXXFLAGS="$SAVED_CXXFLAGS"
+          LDFLAGS="$SAVED_LDFLAGS"
+          LIBS="$SAVED_LIBS"
+          ax_$1=no
+        ]
+      )
+    ]
+  )
+])
diff --git a/packages/sleuthkit.spec b/packages/sleuthkit.spec
index 7c18c0e2ac5a916ebb72a01421c0e189ecf3c9c8..33d0f73a2705cc9da41cdb5728b272052a8c2b5e 100644
--- a/packages/sleuthkit.spec
+++ b/packages/sleuthkit.spec
@@ -1,5 +1,5 @@
 Name:		sleuthkit	
-Version:	4.11.0
+Version:	4.12.1
 Release:	1%{?dist}
 Summary:	The Sleuth Kit (TSK) is a library and collection of command line tools that allow you to investigate volume and file system data.	
 
diff --git a/rejistry++/msvcpp/Rejistry/Rejistry.vcxproj b/rejistry++/msvcpp/Rejistry/Rejistry.vcxproj
index 46bbe2f02fdecba2e6e2135b3f3df9f15369ccdd..322f7fa32426fef4fdc8c4e4a3146ec13225cd77 100644
--- a/rejistry++/msvcpp/Rejistry/Rejistry.vcxproj
+++ b/rejistry++/msvcpp/Rejistry/Rejistry.vcxproj
@@ -45,7 +45,7 @@
     <ConfigurationType>Application</ConfigurationType>
     <UseDebugLibraries>true</UseDebugLibraries>
     <CharacterSet>Unicode</CharacterSet>
-    <PlatformToolset>Windows7.1SDK</PlatformToolset>
+    <PlatformToolset>v140_xp</PlatformToolset>
   </PropertyGroup>
   <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
     <ConfigurationType>Application</ConfigurationType>
@@ -66,14 +66,14 @@
     <UseDebugLibraries>false</UseDebugLibraries>
     <WholeProgramOptimization>true</WholeProgramOptimization>
     <CharacterSet>Unicode</CharacterSet>
-    <PlatformToolset>Windows7.1SDK</PlatformToolset>
+    <PlatformToolset>v140_xp</PlatformToolset>
   </PropertyGroup>
   <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release_NoLibs|x64'" Label="Configuration">
     <ConfigurationType>Application</ConfigurationType>
     <UseDebugLibraries>false</UseDebugLibraries>
     <WholeProgramOptimization>true</WholeProgramOptimization>
     <CharacterSet>Unicode</CharacterSet>
-    <PlatformToolset>Windows7.1SDK</PlatformToolset>
+    <PlatformToolset>v140_xp</PlatformToolset>
   </PropertyGroup>
   <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
   <ImportGroup Label="ExtensionSettings">
@@ -105,7 +105,7 @@
     </ClCompile>
     <Link>
       <GenerateDebugInformation>true</GenerateDebugInformation>
-      <AdditionalDependencies>rejistry++.lib;%(AdditionalDependencies)</AdditionalDependencies>
+      <AdditionalDependencies>rejistry++.lib;$(TSK_HOME)\win32\$(Configuration)\libtsk.lib;%(AdditionalDependencies)</AdditionalDependencies>
       <AdditionalLibraryDirectories>$(SolutionDir)\$(ConfigurationName);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
     </Link>
   </ItemDefinitionGroup>
@@ -116,7 +116,7 @@
     </ClCompile>
     <Link>
       <GenerateDebugInformation>true</GenerateDebugInformation>
-      <AdditionalDependencies>rejistry++.lib;%(AdditionalDependencies)</AdditionalDependencies>
+      <AdditionalDependencies>rejistry++.lib;$(TSK_HOME)\win32\x64\$(Configuration)\libtsk.lib;%(AdditionalDependencies)</AdditionalDependencies>
       <AdditionalLibraryDirectories>$(SolutionDir)\$(Platform)\$(ConfigurationName);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
     </Link>
   </ItemDefinitionGroup>
@@ -131,7 +131,7 @@
       <GenerateDebugInformation>true</GenerateDebugInformation>
       <EnableCOMDATFolding>true</EnableCOMDATFolding>
       <OptimizeReferences>true</OptimizeReferences>
-      <AdditionalDependencies>rejistry++.lib;%(AdditionalDependencies)</AdditionalDependencies>
+      <AdditionalDependencies>rejistry++.lib;$(TSK_HOME)\win32\$(Configuration)\libtsk.lib;%(AdditionalDependencies)</AdditionalDependencies>
       <AdditionalLibraryDirectories>$(SolutionDir)\$(ConfigurationName);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
     </Link>
   </ItemDefinitionGroup>
@@ -147,7 +147,7 @@
       <GenerateDebugInformation>true</GenerateDebugInformation>
       <EnableCOMDATFolding>true</EnableCOMDATFolding>
       <OptimizeReferences>true</OptimizeReferences>
-      <AdditionalDependencies>rejistry++.lib;%(AdditionalDependencies)</AdditionalDependencies>
+      <AdditionalDependencies>rejistry++.lib;$(TSK_HOME)\win32\$(Configuration)\libtsk.lib;%(AdditionalDependencies)</AdditionalDependencies>
       <AdditionalLibraryDirectories>$(SolutionDir)\$(ConfigurationName);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
     </Link>
   </ItemDefinitionGroup>
@@ -162,7 +162,7 @@
       <GenerateDebugInformation>true</GenerateDebugInformation>
       <EnableCOMDATFolding>true</EnableCOMDATFolding>
       <OptimizeReferences>true</OptimizeReferences>
-      <AdditionalDependencies>rejistry++.lib;</AdditionalDependencies>
+      <AdditionalDependencies>rejistry++.lib;$(TSK_HOME)\win32\x64\$(Configuration)\libtsk.lib;%(AdditionalDependencies)</AdditionalDependencies>
       <AdditionalLibraryDirectories>$(SolutionDir)\$(Platform)\$(ConfigurationName);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
     </Link>
   </ItemDefinitionGroup>
@@ -177,7 +177,7 @@
       <GenerateDebugInformation>true</GenerateDebugInformation>
       <EnableCOMDATFolding>true</EnableCOMDATFolding>
       <OptimizeReferences>true</OptimizeReferences>
-      <AdditionalDependencies>rejistry++.lib;</AdditionalDependencies>
+      <AdditionalDependencies>rejistry++.lib;$(TSK_HOME)\win32\x64\$(Configuration)\libtsk.lib;%(AdditionalDependencies)</AdditionalDependencies>
       <AdditionalLibraryDirectories>$(SolutionDir)\$(Platform)\$(ConfigurationName);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
     </Link>
   </ItemDefinitionGroup>
diff --git a/rejistry++/src/RegistryByteBuffer.cpp b/rejistry++/src/RegistryByteBuffer.cpp
index d297c09c3fb7e2f37726641e897552bce3a8f39b..00e616bf3cf649d1f22ded2ad88da635065ce897 100644
--- a/rejistry++/src/RegistryByteBuffer.cpp
+++ b/rejistry++/src/RegistryByteBuffer.cpp
@@ -35,6 +35,8 @@
 // Local includes
 #include "RegistryByteBuffer.h"
 #include "RejistryException.h"
+#include "../../tsk/base/tsk_base.h"
+#include "../../tsk/base/tsk_unicode.h"
 
 namespace Rejistry {
 
@@ -130,30 +132,12 @@ namespace Rejistry {
 			return L"";
 		}
 
-		// We do this so we can reference the last character in the string
-		// data.size() -2. if we didn't add a char to the string then returned
-		// string would be missing the last character. 
-		data.push_back('\0');
-		data.push_back('\0');
-
-		// We are unsure how from_bytes() works. Microsofts docs seem to indicate that the second pointer
-		// should point to the last character which will be included in the conversion.[1] However, another
-		// reference indicates that the data pointed to by the second pointer will not be included, which is 
-		// what our testing has shown.[2] We previously had the second pointer point to data.size() but there were
-		// concerns that we were pointing to memory we did not own. As a result, we add a char to the end of every
-		// string so we can use data.size() - 2 and still get the original string back.  
-		// 1. https://docs.microsoft.com/en-us/cpp/standard-library/wstring-convert-class?view=vs-2017#from_bytes
-		// 2. http://www.cplusplus.com/reference/locale/wstring_convert/from_bytes/
-		std::wstring result;
-		try {
-			result = conv.from_bytes(reinterpret_cast<const char*>(&data[0]), reinterpret_cast<const char*>(&data[data.size()-2]));
-		}
-		catch (std::exception&)
-		{
-			throw RegistryParseException("Error: Failed to convert string");
-		}
+		size_t numOfWchars = data.size() / sizeof(wchar_t);
+
+		// Sanitize data to ensure its valid UTF16 (CT-4851)
+		tsk_cleanupUTF16(TSK_LIT_ENDIAN, (wchar_t*)(&data[0]), numOfWchars, L'\uFFFD');
 
-		return result;
+		return std::wstring((wchar_t*)(&data[0]), numOfWchars);
 	}
 
     ByteBuffer::ByteArray RegistryByteBuffer::getData() const {
diff --git a/tests/Makefile.am b/tests/Makefile.am
index 8921e125815af1e56bc84a92a3a4c3653996a379..b0590939feed42af837222a5d96b4708b4ffb0f1 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -3,7 +3,7 @@ AM_CFLAGS += $(PTHREAD_CFLAGS)
 AM_CXXFLAGS += -Wno-unused-command-line-argument $(PTHREAD_CFLAGS)
 LDADD = ../tsk/libtsk.la
 LDFLAGS += -static $(PTHREAD_LIBS)
-EXTRA_DIST = .indent.pro runtests.sh
+EXTRA_DIST = .indent.pro runtests.sh test_libraries.sh
 
 check_SCRIPTS = runtests.sh test_libraries.sh
 
diff --git a/tests/fs_thread_test.cpp b/tests/fs_thread_test.cpp
index 117e36e1e9dccf869a8554216ef6a34ee5750a30..c3b1aa7fe3cb915a10036108741450391212857d 100644
--- a/tests/fs_thread_test.cpp
+++ b/tests/fs_thread_test.cpp
@@ -154,7 +154,7 @@ static const TSK_TCHAR *progname;
 static void
 usage()
 {
-    TFPRINTF(stderr, _TSK_T("Usage: %s [-f fstype ] [-o imgoffset ] [-v] image nthreads niters\n"), progname);
+    TFPRINTF(stderr, _TSK_T("Usage: %" PRIttocTSK " [-f fstype ] [-o imgoffset ] [-v] image nthreads niters\n"), progname);
 
     exit(1);
 }
@@ -188,7 +188,7 @@ main(int argc, char** argv1)
             fstype = tsk_fs_type_toid(OPTARG);
             if (fstype == TSK_FS_TYPE_UNSUPP) {
                 TFPRINTF(stderr,
-                         _TSK_T("Unsupported file system type: %s\n"), OPTARG);
+                         _TSK_T("Unsupported file system type: %" PRIttocTSK "\n"), OPTARG);
                 usage();
             }
             break;
diff --git a/tools/autotools/tsk_comparedir.cpp b/tools/autotools/tsk_comparedir.cpp
index 64be43c05031a460d41af2846b3fe47a42b06d52..88bc9835bdcf4f445ef82274c18d975a8bff9aab 100644
--- a/tools/autotools/tsk_comparedir.cpp
+++ b/tools/autotools/tsk_comparedir.cpp
@@ -37,7 +37,7 @@ usage()
 {
     TFPRINTF(stderr,
         _TSK_T
-        ("usage: %s [-f fstype] [-i imgtype] [-b dev_sector_size] [-o sector_offset] [-P pooltype] [-B pool_volume_block] [-n start_inum] [-vV] image [image] comparison_directory\n"),
+        ("usage: %" PRIttocTSK " [-f fstype] [-i imgtype] [-b dev_sector_size] [-o sector_offset] [-P pooltype] [-B pool_volume_block] [-n start_inum] [-vV] image [image] comparison_directory\n"),
         progname);
 
     tsk_fprintf(stderr,
@@ -99,13 +99,13 @@ uint8_t
     hFind = FindFirstFile((LPCWSTR) fullpath, &ffd);
     DWORD err = GetLastError();
     if (hFind == INVALID_HANDLE_VALUE) {
-        fprintf(stderr, "Error opening directory: %S\n", fullpath);
+        fprintf(stderr, "Error opening directory: %ls\n", fullpath);
 
         wchar_t message[64];
         FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
             FORMAT_MESSAGE_IGNORE_INSERTS, NULL, err, 0,
             (LPWSTR) & message, 64, NULL);
-        fprintf(stderr, "error: %S", message);
+        fprintf(stderr, "error: %ls", message);
         return 1;
     }
 
@@ -371,7 +371,7 @@ main(int argc, char **argv1)
         switch (ch) {
         case _TSK_T('?'):
         default:
-            TFPRINTF(stderr, _TSK_T("Invalid argument: %s\n"),
+            TFPRINTF(stderr, _TSK_T("Invalid argument: %" PRIttocTSK "\n"),
                 argv[OPTIND]);
             usage();
 
@@ -380,7 +380,7 @@ main(int argc, char **argv1)
             if (*cp || *cp == *OPTARG || ssize < 1) {
                 TFPRINTF(stderr,
                          _TSK_T
-                         ("invalid argument: sector size must be positive: %s\n"),
+                         ("invalid argument: sector size must be positive: %" PRIttocTSK "\n"),
                          OPTARG);
                 usage();
             }
@@ -394,7 +394,7 @@ main(int argc, char **argv1)
             fstype = tsk_fs_type_toid(OPTARG);
             if (fstype == TSK_FS_TYPE_UNSUPP) {
                 TFPRINTF(stderr,
-                         _TSK_T("Unsupported file system type: %s\n"), OPTARG);
+                         _TSK_T("Unsupported file system type: %" PRIttocTSK "\n"), OPTARG);
                 usage();
             }
             break;
@@ -407,7 +407,7 @@ main(int argc, char **argv1)
             }
             imgtype = tsk_img_type_toid(OPTARG);
             if (imgtype == TSK_IMG_TYPE_UNSUPP) {
-                TFPRINTF(stderr, _TSK_T("Unsupported image type: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Unsupported image type: %" PRIttocTSK "\n"),
                          OPTARG);
                 usage();
             }
diff --git a/tools/autotools/tsk_gettimes.cpp b/tools/autotools/tsk_gettimes.cpp
index 3686ecad541b14af9ab02dffe7fd612364051542..86fc8392345ff1ecab17109fdbd50f9c463dd352 100644
--- a/tools/autotools/tsk_gettimes.cpp
+++ b/tools/autotools/tsk_gettimes.cpp
@@ -21,7 +21,7 @@ usage()
 {
     TFPRINTF(stderr,
         _TSK_T
-        ("usage: %s [-vVm] [-i imgtype] [-b dev_sector_size] [-z zone] [-s seconds] image [image]\n"),
+        ("usage: %" PRIttocTSK " [-vVm] [-i imgtype] [-b dev_sector_size] [-z zone] [-s seconds] image [image]\n"),
         progname);
     tsk_fprintf(stderr,
         "\t-i imgtype: The format of the image file (use '-i list' for supported types)\n");
@@ -172,7 +172,7 @@ main(int argc, char **argv1)
         switch (ch) {
         case _TSK_T('?'):
         default:
-            TFPRINTF(stderr, _TSK_T("Invalid argument: %s\n"),
+            TFPRINTF(stderr, _TSK_T("Invalid argument: %" PRIttocTSK "\n"),
                 argv[OPTIND]);
             usage();
 
@@ -182,7 +182,7 @@ main(int argc, char **argv1)
             if (*cp || *cp == *OPTARG || ssize < 1) {
                 TFPRINTF(stderr,
                     _TSK_T
-                    ("invalid argument: sector size must be positive: %s\n"),
+                    ("invalid argument: sector size must be positive: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -197,7 +197,7 @@ main(int argc, char **argv1)
             }
             imgtype = tsk_img_type_toid(OPTARG);
             if (imgtype == TSK_IMG_TYPE_UNSUPP) {
-                TFPRINTF(stderr, _TSK_T("Unsupported image type: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Unsupported image type: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
diff --git a/tools/autotools/tsk_loaddb.cpp b/tools/autotools/tsk_loaddb.cpp
index b59506dc00128c977c674527731deb8e6370c689..011b0d3cc9570ddf8ce32aca78153b9542be88a9 100644
--- a/tools/autotools/tsk_loaddb.cpp
+++ b/tools/autotools/tsk_loaddb.cpp
@@ -20,7 +20,7 @@ usage()
 {
     TFPRINTF(stderr,
         _TSK_T
-        ("usage: %s [-ahkvV] [-i imgtype] [-b dev_sector_size] [-d database] [-z ZONE] image [image]\n"),
+        ("usage: %" PRIttocTSK " [-ahkvV] [-i imgtype] [-b dev_sector_size] [-d database] [-z ZONE] image [image]\n"),
         progname);
     tsk_fprintf(stderr, "\t-a: Add image to existing database, instead of creating a new one (requires -d to specify database)\n");
     tsk_fprintf(stderr, "\t-k: Don't create block data table\n");
@@ -72,7 +72,7 @@ main(int argc, char **argv1)
         switch (ch) {
         case _TSK_T('?'):
         default:
-            TFPRINTF(stderr, _TSK_T("Invalid argument: %s\n"),
+            TFPRINTF(stderr, _TSK_T("Invalid argument: %" PRIttocTSK "\n"),
                 argv[OPTIND]);
             usage();
 
@@ -85,7 +85,7 @@ main(int argc, char **argv1)
             if (*cp || *cp == *OPTARG || ssize < 1) {
                 TFPRINTF(stderr,
                     _TSK_T
-                    ("invalid argument: sector size must be positive: %s\n"),
+                    ("invalid argument: sector size must be positive: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -98,7 +98,7 @@ main(int argc, char **argv1)
             }
             imgtype = tsk_img_type_toid(OPTARG);
             if (imgtype == TSK_IMG_TYPE_UNSUPP) {
-                TFPRINTF(stderr, _TSK_T("Unsupported image type: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Unsupported image type: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -184,7 +184,7 @@ main(int argc, char **argv1)
         tsk_error_print(stderr);
         exit(1);
     }
-    TFPRINTF(stdout, _TSK_T("Database stored at: %s\n"), database);
+    TFPRINTF(stdout, _TSK_T("Database stored at: %" PRIttocTSK "\n"), database);
 
     autoDb->closeImage();
     delete tskCase;
diff --git a/tools/autotools/tsk_recover.cpp b/tools/autotools/tsk_recover.cpp
index 9558cb273852acc90c52a0283cef348284c633a8..c56edc819dbd08b87bd6ad74e59228fcb6c84bf2 100755
--- a/tools/autotools/tsk_recover.cpp
+++ b/tools/autotools/tsk_recover.cpp
@@ -21,7 +21,7 @@ usage()
 {
     TFPRINTF(stderr,
         _TSK_T
-        ("usage: %s [-vVae] [-f fstype] [-i imgtype] [-b dev_sector_size] [-o sector_offset] [-P pooltype] [-B pool_volume_block] [-d dir_inum] image [image] output_dir\n"),
+        ("usage: %" PRIttocTSK " [-vVae] [-f fstype] [-i imgtype] [-b dev_sector_size] [-o sector_offset] [-P pooltype] [-B pool_volume_block] [-d dir_inum] image [image] output_dir\n"),
         progname);
     tsk_fprintf(stderr,
         "\t-i imgtype: The format of the image file (use '-i list' for supported types)\n");
@@ -181,7 +181,7 @@ uint8_t TskRecover::writeFile(TSK_FS_FILE * a_fs_file, const char *a_path)
                 result = CreateDirectoryW((LPCTSTR) path16full, NULL);
             if (result == FALSE) {
                 if (GetLastError() == ERROR_PATH_NOT_FOUND) {
-                    fprintf(stderr, "Error Creating Directory (%S)", path16full);
+                    fprintf(stderr, "Error Creating Directory (%ls)", path16full);
                     return 1;
                 }
             }
@@ -227,14 +227,14 @@ uint8_t TskRecover::writeFile(TSK_FS_FILE * a_fs_file, const char *a_path)
         CreateFileW((LPCTSTR) path16full, GENERIC_WRITE, 0, NULL, OPEN_ALWAYS,
         FILE_ATTRIBUTE_NORMAL, NULL);
     if (handle == INVALID_HANDLE_VALUE) {
-        fprintf(stderr, "Error Creating File (%S)", path16full);
+        fprintf(stderr, "Error Creating File (%ls)", path16full);
         return 1;
     }
 
     //try to write to the file
     if (tsk_fs_file_walk(a_fs_file, (TSK_FS_FILE_WALK_FLAG_ENUM) 0,
             file_walk_cb, handle)) {
-        fprintf(stderr, "Error writing file %S\n", path16full);
+        fprintf(stderr, "Error writing file %ls\n", path16full);
         tsk_error_print(stderr);
         CloseHandle(handle);
         return 1;
@@ -442,7 +442,7 @@ main(int argc, char **argv1)
         switch (ch) {
         case _TSK_T('?'):
         default:
-            TFPRINTF(stderr, _TSK_T("Invalid argument: %s\n"),
+            TFPRINTF(stderr, _TSK_T("Invalid argument: %" PRIttocTSK "\n"),
                 argv[OPTIND]);
             usage();
 
@@ -455,7 +455,7 @@ main(int argc, char **argv1)
             if (*cp || *cp == *OPTARG || ssize < 1) {
                 TFPRINTF(stderr,
                     _TSK_T
-                    ("invalid argument: sector size must be positive: %s\n"),
+                    ("invalid argument: sector size must be positive: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -464,7 +464,7 @@ main(int argc, char **argv1)
         case _TSK_T('d'):
             if (tsk_fs_parse_inum(OPTARG, &dirInum, NULL, NULL, NULL, NULL)) {
                 TFPRINTF(stderr,
-                        _TSK_T("invalid argument for directory inode: %s\n"),
+                        _TSK_T("invalid argument for directory inode: %" PRIttocTSK "\n"),
                         OPTARG);
                 usage();
             }
@@ -484,7 +484,7 @@ main(int argc, char **argv1)
             fstype = tsk_fs_type_toid(OPTARG);
             if (fstype == TSK_FS_TYPE_UNSUPP) {
                 TFPRINTF(stderr,
-                         _TSK_T("Unsupported file system type: %s\n"), OPTARG);
+                         _TSK_T("Unsupported file system type: %" PRIttocTSK "\n"), OPTARG);
                 usage();
             }
             break;
@@ -497,7 +497,7 @@ main(int argc, char **argv1)
             }
             imgtype = tsk_img_type_toid(OPTARG);
             if (imgtype == TSK_IMG_TYPE_UNSUPP) {
-                TFPRINTF(stderr, _TSK_T("Unsupported image type: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Unsupported image type: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
diff --git a/tools/fstools/blkcalc.cpp b/tools/fstools/blkcalc.cpp
index 9307d79a1735625109c5d5b2511a0e4db603f146..6d345c1a1d5aa36a568eaa30f071ea8148ab25ab 100644
--- a/tools/fstools/blkcalc.cpp
+++ b/tools/fstools/blkcalc.cpp
@@ -31,7 +31,7 @@ usage()
 {
     TFPRINTF(stderr,
         _TSK_T
-        ("usage: %s [-dsu unit_addr] [-vV] [-f fstype] [-i imgtype] [-b dev_sector_size] [-o imgoffset] [-P pooltype] [-B pool_volume_block] image [images]\n"),
+        ("usage: %" PRIttocTSK " [-dsu unit_addr] [-vV] [-f fstype] [-i imgtype] [-b dev_sector_size] [-o imgoffset] [-P pooltype] [-B pool_volume_block] image [images]\n"),
         progname);
     tsk_fprintf(stderr, "Slowly calculates the opposite block number\n");
     tsk_fprintf(stderr, "\tOne of the following must be given:\n");
@@ -103,7 +103,7 @@ main(int argc, char **argv1)
         switch (ch) {
         case _TSK_T('?'):
         default:
-            TFPRINTF(stderr, _TSK_T("Invalid argument: %s\n"),
+            TFPRINTF(stderr, _TSK_T("Invalid argument: %" PRIttocTSK "\n"),
                 argv[OPTIND]);
             usage();
 
@@ -112,7 +112,7 @@ main(int argc, char **argv1)
             if (*cp || *cp == *OPTARG || ssize < 1) {
                 TFPRINTF(stderr,
                     _TSK_T
-                    ("invalid argument: sector size must be positive: %s\n"),
+                    ("invalid argument: sector size must be positive: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -122,7 +122,7 @@ main(int argc, char **argv1)
             type |= TSK_FS_BLKCALC_DD;
             count = TSTRTOULL(OPTARG, &cp, 0);
             if (*cp || *cp == *OPTARG) {
-                TFPRINTF(stderr, _TSK_T("Invalid address: %s\n"), OPTARG);
+                TFPRINTF(stderr, _TSK_T("Invalid address: %" PRIttocTSK "\n"), OPTARG);
                 usage();
             }
             set = 1;
@@ -136,7 +136,7 @@ main(int argc, char **argv1)
             fstype = tsk_fs_type_toid(OPTARG);
             if (fstype == TSK_FS_TYPE_UNSUPP) {
                 TFPRINTF(stderr,
-                    _TSK_T("Unsupported file system type: %s\n"), OPTARG);
+                    _TSK_T("Unsupported file system type: %" PRIttocTSK "\n"), OPTARG);
                 usage();
             }
             break;
@@ -148,7 +148,7 @@ main(int argc, char **argv1)
             }
             imgtype = tsk_img_type_toid(OPTARG);
             if (imgtype == TSK_IMG_TYPE_UNSUPP) {
-                TFPRINTF(stderr, _TSK_T("Unsupported image type: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Unsupported image type: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -185,7 +185,7 @@ main(int argc, char **argv1)
             type |= TSK_FS_BLKCALC_SLACK;
             count = TSTRTOULL(OPTARG, &cp, 0);
             if (*cp || *cp == *OPTARG) {
-                TFPRINTF(stderr, _TSK_T("Invalid address: %s\n"), OPTARG);
+                TFPRINTF(stderr, _TSK_T("Invalid address: %" PRIttocTSK "\n"), OPTARG);
                 usage();
             }
             set = 1;
@@ -195,7 +195,7 @@ main(int argc, char **argv1)
             type |= TSK_FS_BLKCALC_BLKLS;
             count = TSTRTOULL(OPTARG, &cp, 0);
             if (*cp || *cp == *OPTARG) {
-                TFPRINTF(stderr, _TSK_T("Invalid address: %s\n"), OPTARG);
+                TFPRINTF(stderr, _TSK_T("Invalid address: %" PRIttocTSK "\n"), OPTARG);
                 usage();
             }
             set = 1;
@@ -248,7 +248,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_fs_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
     }
@@ -258,7 +258,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_pool_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
 
@@ -267,20 +267,20 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_fs_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
     }
 
     if (-1 == tsk_fs_blkcalc(fs, (TSK_FS_BLKCALC_FLAG_ENUM) type, count)) {
         tsk_error_print(stderr);
-        fs->close(fs);
-        img->close(img);
+        tsk_fs_close(fs);
+        tsk_img_close(img);
         exit(1);
     }
 
-    fs->close(fs);
-    img->close(img);
+    tsk_fs_close(fs);
+    tsk_img_close(img);
 
     exit(0);
 }
diff --git a/tools/fstools/blkcat.cpp b/tools/fstools/blkcat.cpp
index 609e58a25412194bf6babcb39be72d00c4de70fa..886556e0eebf7149e5e867e548acef1cfb868ad8 100644
--- a/tools/fstools/blkcat.cpp
+++ b/tools/fstools/blkcat.cpp
@@ -32,7 +32,7 @@ usage()
 {
     TFPRINTF(stderr,
         _TSK_T
-        ("usage: %s [-ahsvVw] [-f fstype] [-i imgtype] [-b dev_sector_size] [-o imgoffset] [-P pooltype] [-B pool_volume_block] [-u usize] image [images] unit_addr [num]\n"),
+        ("usage: %" PRIttocTSK " [-ahsvVw] [-f fstype] [-i imgtype] [-b dev_sector_size] [-o imgoffset] [-P pooltype] [-B pool_volume_block] [-u usize] image [images] unit_addr [num]\n"),
         progname);
     tsk_fprintf(stderr, "\t-a: displays in all ASCII \n");
     tsk_fprintf(stderr, "\t-h: displays in hexdump-like fashion\n");
@@ -111,7 +111,7 @@ main(int argc, char **argv1)
             if (*cp || *cp == *OPTARG || ssize < 1) {
                 TFPRINTF(stderr,
                     _TSK_T
-                    ("invalid argument: sector size must be positive: %s\n"),
+                    ("invalid argument: sector size must be positive: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -131,7 +131,7 @@ main(int argc, char **argv1)
             }
             if (fstype == TSK_FS_TYPE_UNSUPP) {
                 TFPRINTF(stderr,
-                    _TSK_T("Unsupported file system type: %s\n"), OPTARG);
+                    _TSK_T("Unsupported file system type: %" PRIttocTSK "\n"), OPTARG);
                 usage();
             }
             break;
@@ -145,7 +145,7 @@ main(int argc, char **argv1)
             }
             imgtype = tsk_img_type_toid(OPTARG);
             if (imgtype == TSK_IMG_TYPE_UNSUPP) {
-                TFPRINTF(stderr, _TSK_T("Unsupported image type: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Unsupported image type: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -180,7 +180,7 @@ main(int argc, char **argv1)
         case _TSK_T('u'):
             usize = TSTRTOUL(OPTARG, &cp, 0);
             if (*cp || cp == OPTARG) {
-                TFPRINTF(stderr, _TSK_T("Invalid block size: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Invalid block size: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -197,7 +197,7 @@ main(int argc, char **argv1)
             break;
         case _TSK_T('?'):
         default:
-            TFPRINTF(stderr, _TSK_T("Invalid argument: %s\n"),
+            TFPRINTF(stderr, _TSK_T("Invalid argument: %" PRIttocTSK "\n"),
                 argv[OPTIND]);
             usage();
         }
@@ -254,7 +254,7 @@ main(int argc, char **argv1)
             /* Not a number, so it is the image name and we do not have a length */
             addr = TSTRTOULL(argv[argc - 1], &cp, 0);
             if (*cp || *cp == *argv[argc - 1]) {
-                TFPRINTF(stderr, _TSK_T("Invalid block address: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Invalid block address: %" PRIttocTSK "\n"),
                     argv[argc - 1]);
                 usage();
             }
@@ -277,7 +277,7 @@ main(int argc, char **argv1)
             /* We got a number, so take the length as well while we are at it */
             read_num_units = TSTRTOULL(argv[argc - 1], &cp, 0);
             if (*cp || *cp == *argv[argc - 1]) {
-                TFPRINTF(stderr, _TSK_T("Invalid size: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Invalid size: %" PRIttocTSK "\n"),
                     argv[argc - 1]);
                 usage();
             }
@@ -309,7 +309,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_fs_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
     }
@@ -319,7 +319,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_pool_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
 
@@ -328,7 +328,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_fs_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
     }
@@ -366,29 +366,29 @@ main(int argc, char **argv1)
         tsk_fprintf(stderr,
             "Data unit address too large for image (%" PRIuDADDR ")\n",
             fs->last_block);
-        fs->close(fs);
-        img->close(img);
+        tsk_fs_close(fs);
+        tsk_img_close(img);
         exit(1);
     }
     if (addr < fs->first_block) {
         tsk_fprintf(stderr,
             "Data unit address too small for image (%" PRIuDADDR ")\n",
             fs->first_block);
-        fs->close(fs);
-        img->close(img);
+        tsk_fs_close(fs);
+        tsk_img_close(img);
         exit(1);
     }
 
     if (tsk_fs_blkcat(fs, (TSK_FS_BLKCAT_FLAG_ENUM) format, addr,
             read_num_units)) {
         tsk_error_print(stderr);
-        fs->close(fs);
-        img->close(img);
+        tsk_fs_close(fs);
+        tsk_img_close(img);
         exit(1);
     }
 
-    fs->close(fs);
-    img->close(img);
+    tsk_fs_close(fs);
+    tsk_img_close(img);
 
     exit(0);
 }
diff --git a/tools/fstools/blkls.cpp b/tools/fstools/blkls.cpp
index bf8aa2e367587164dac2854d05bab99caf5ab404..2b628d0d29d2f1c6735fc81e06e6dabfa078f99d 100644
--- a/tools/fstools/blkls.cpp
+++ b/tools/fstools/blkls.cpp
@@ -35,7 +35,7 @@ usage()
 {
     TFPRINTF(stderr,
         _TSK_T
-        ("usage: %s [-aAelvV] [-f fstype] [-i imgtype] [-b dev_sector_size] [-o imgoffset] [-P pooltype] [-B pool_volume_block] image [images] [start-stop]\n"),
+        ("usage: %" PRIttocTSK " [-aAelvV] [-f fstype] [-i imgtype] [-b dev_sector_size] [-o imgoffset] [-P pooltype] [-B pool_volume_block] image [images] [start-stop]\n"),
         progname);
     tsk_fprintf(stderr, "\t-e: every block (including file system metadata blocks)\n");
     tsk_fprintf(stderr,
@@ -112,7 +112,7 @@ main(int argc, char **argv1)
         switch (ch) {
         case _TSK_T('?'):
         default:
-            TFPRINTF(stderr, _TSK_T("Invalid argument: %s\n"),
+            TFPRINTF(stderr, _TSK_T("Invalid argument: %" PRIttocTSK "\n"),
                 argv[OPTIND]);
             usage();
         case _TSK_T('a'):
@@ -128,7 +128,7 @@ main(int argc, char **argv1)
             if (*cp || *cp == *OPTARG || ssize < 1) {
                 TFPRINTF(stderr,
                     _TSK_T
-                    ("invalid argument: sector size must be positive: %s\n"),
+                    ("invalid argument: sector size must be positive: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -144,7 +144,7 @@ main(int argc, char **argv1)
             fstype = tsk_fs_type_toid(OPTARG);
             if (fstype == TSK_FS_TYPE_UNSUPP) {
                 TFPRINTF(stderr,
-                    _TSK_T("Unsupported file system type: %s\n"), OPTARG);
+                    _TSK_T("Unsupported file system type: %" PRIttocTSK "\n"), OPTARG);
                 usage();
             }
             break;
@@ -155,7 +155,7 @@ main(int argc, char **argv1)
             }
             imgtype = tsk_img_type_toid(OPTARG);
             if (imgtype == TSK_IMG_TYPE_UNSUPP) {
-                TFPRINTF(stderr, _TSK_T("Unsupported image type: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Unsupported image type: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -234,7 +234,7 @@ main(int argc, char **argv1)
                 tsk_error_print(stderr);
                 if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                     tsk_fs_type_print(stderr);
-                img->close(img);
+                tsk_img_close(img);
                 exit(1);
             }
         }
@@ -244,7 +244,7 @@ main(int argc, char **argv1)
                 tsk_error_print(stderr);
                 if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                     tsk_pool_type_print(stderr);
-                img->close(img);
+                tsk_img_close(img);
                 exit(1);
             }
 
@@ -253,7 +253,7 @@ main(int argc, char **argv1)
                 tsk_error_print(stderr);
                 if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                     tsk_fs_type_print(stderr);
-                img->close(img);
+                tsk_img_close(img);
                 exit(1);
             }
         }
@@ -350,7 +350,7 @@ main(int argc, char **argv1)
                 tsk_error_print(stderr);
                 if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                     tsk_fs_type_print(stderr);
-                img->close(img);
+                tsk_img_close(img);
                 exit(1);
             }
         }
@@ -360,7 +360,7 @@ main(int argc, char **argv1)
                 tsk_error_print(stderr);
                 if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                     tsk_pool_type_print(stderr);
-                img->close(img);
+                tsk_img_close(img);
                 exit(1);
             }
 
@@ -369,7 +369,7 @@ main(int argc, char **argv1)
                 tsk_error_print(stderr);
                 if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                     tsk_fs_type_print(stderr);
-                img->close(img);
+                tsk_img_close(img);
                 exit(1);
             }
         }
@@ -391,12 +391,12 @@ main(int argc, char **argv1)
     if (tsk_fs_blkls(fs, (TSK_FS_BLKLS_FLAG_ENUM) lclflags, bstart, blast,
             (TSK_FS_BLOCK_WALK_FLAG_ENUM)flags)) {
         tsk_error_print(stderr);
-        fs->close(fs);
-        img->close(img);
+        tsk_fs_close(fs);
+        tsk_img_close(img);
         exit(1);
     }
 
-    fs->close(fs);
-    img->close(img);
+    tsk_fs_close(fs);
+    tsk_img_close(img);
     exit(0);
 }
diff --git a/tools/fstools/blkstat.cpp b/tools/fstools/blkstat.cpp
index f816ee164dfcf257b747be451843598598e541c7..743dc68977f1398d984249c3629d6d514a410d08 100644
--- a/tools/fstools/blkstat.cpp
+++ b/tools/fstools/blkstat.cpp
@@ -24,7 +24,7 @@ usage()
 {
     TFPRINTF(stderr,
         _TSK_T
-        ("usage: %s [-vV] [-f fstype] [-i imgtype] [-b dev_sector_size] [-o imgoffset] [-P pooltype] [-B pool_volume_block] image [images] addr\n"),
+        ("usage: %" PRIttocTSK " [-vV] [-f fstype] [-i imgtype] [-b dev_sector_size] [-o imgoffset] [-P pooltype] [-B pool_volume_block] image [images] addr\n"),
         progname);
     tsk_fprintf(stderr,
         "\t-f fstype: File system type (use '-f list' for supported types)\n");
@@ -88,7 +88,7 @@ main(int argc, char **argv1)
             if (*cp || *cp == *OPTARG || ssize < 1) {
                 TFPRINTF(stderr,
                     _TSK_T
-                    ("invalid argument: sector size must be positive: %s\n"),
+                    ("invalid argument: sector size must be positive: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -101,7 +101,7 @@ main(int argc, char **argv1)
             fstype = tsk_fs_type_toid(OPTARG);
             if (fstype == TSK_FS_TYPE_UNSUPP) {
                 TFPRINTF(stderr,
-                    _TSK_T("Unsupported file system type: %s\n"), OPTARG);
+                    _TSK_T("Unsupported file system type: %" PRIttocTSK "\n"), OPTARG);
                 usage();
             }
             break;
@@ -112,7 +112,7 @@ main(int argc, char **argv1)
             }
             imgtype = tsk_img_type_toid(OPTARG);
             if (imgtype == TSK_IMG_TYPE_UNSUPP) {
-                TFPRINTF(stderr, _TSK_T("Unsupported image type: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Unsupported image type: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -149,7 +149,7 @@ main(int argc, char **argv1)
             exit(0);
         case _TSK_T('?'):
         default:
-            TFPRINTF(stderr, _TSK_T("Invalid argument: %s\n"),
+            TFPRINTF(stderr, _TSK_T("Invalid argument: %" PRIttocTSK "\n"),
                 argv[OPTIND]);
             usage();
         }
@@ -186,7 +186,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_fs_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
     }
@@ -196,7 +196,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_pool_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
 
@@ -205,7 +205,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_fs_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
     }
@@ -215,28 +215,28 @@ main(int argc, char **argv1)
         tsk_fprintf(stderr,
             "Data unit address too large for image (%" PRIuDADDR ")\n",
             fs->last_block);
-        fs->close(fs);
-        img->close(img);
+        tsk_fs_close(fs);
+        tsk_img_close(img);
         exit(1);
     }
     if (addr < fs->first_block) {
         tsk_fprintf(stderr,
             "Data unit address too small for image (%" PRIuDADDR ")\n",
             fs->first_block);
-        fs->close(fs);
-        img->close(img);
+        tsk_fs_close(fs);
+        tsk_img_close(img);
         exit(1);
     }
 
 
     if (tsk_fs_blkstat(fs, addr)) {
         tsk_error_print(stderr);
-        fs->close(fs);
-        img->close(img);
+        tsk_fs_close(fs);
+        tsk_img_close(img);
         exit(1);
     }
 
-    fs->close(fs);
-    img->close(img);
+    tsk_fs_close(fs);
+    tsk_img_close(img);
     exit(0);
 }
diff --git a/tools/fstools/fcat.cpp b/tools/fstools/fcat.cpp
index 0e7e72b7b0e345c5fef4e158c1ce12944c0d7cc0..05e62e899d80eb6811f460fb4b3034ac98ac47c1 100644
--- a/tools/fstools/fcat.cpp
+++ b/tools/fstools/fcat.cpp
@@ -20,7 +20,7 @@ usage()
 {
     TFPRINTF(stderr,
         _TSK_T
-        ("usage: %s [-hRsvV] [-f fstype] [-i imgtype] [-b dev_sector_size] [-o imgoffset] [-P pooltype] [-B pool_volume_block] file_path image [images]\n"),
+        ("usage: %" PRIttocTSK " [-hRsvV] [-f fstype] [-i imgtype] [-b dev_sector_size] [-o imgoffset] [-P pooltype] [-B pool_volume_block] file_path image [images]\n"),
         progname);
     tsk_fprintf(stderr, "\t-h: Do not display holes in sparse files\n");
     tsk_fprintf(stderr,
@@ -87,7 +87,7 @@ main(int argc, char **argv1)
         switch (ch) {
         case _TSK_T('?'):
         default:
-            TFPRINTF(stderr, _TSK_T("Invalid argument: %s\n"),
+            TFPRINTF(stderr, _TSK_T("Invalid argument: %" PRIttocTSK "\n"),
                 argv[OPTIND]);
             usage();
         case _TSK_T('b'):
@@ -95,7 +95,7 @@ main(int argc, char **argv1)
             if (*cp || *cp == *OPTARG || ssize < 1) {
                 TFPRINTF(stderr,
                     _TSK_T
-                    ("invalid argument: sector size must be positive: %s\n"),
+                    ("invalid argument: sector size must be positive: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -108,7 +108,7 @@ main(int argc, char **argv1)
             fstype = tsk_fs_type_toid(OPTARG);
             if (fstype == TSK_FS_TYPE_UNSUPP) {
                 TFPRINTF(stderr,
-                    _TSK_T("Unsupported file system type: %s\n"), OPTARG);
+                    _TSK_T("Unsupported file system type: %" PRIttocTSK "\n"), OPTARG);
                 usage();
             }
             break;
@@ -122,7 +122,7 @@ main(int argc, char **argv1)
             }
             imgtype = tsk_img_type_toid(OPTARG);
             if (imgtype == TSK_IMG_TYPE_UNSUPP) {
-                TFPRINTF(stderr, _TSK_T("Unsupported image type: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Unsupported image type: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -200,7 +200,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_fs_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
     }
@@ -210,7 +210,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_pool_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
 
@@ -219,7 +219,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_fs_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
     }
@@ -227,15 +227,15 @@ main(int argc, char **argv1)
 
     if (-1 == (retval = tsk_fs_ifind_path(fs, path, &inum))) {
         tsk_error_print(stderr);
-        fs->close(fs);
-        img->close(img);
+        tsk_fs_close(fs);
+        tsk_img_close(img);
         free(path);
         exit(1);
     }
     else if (retval == 1) {
         tsk_fprintf(stderr, "File not found\n");
-        fs->close(fs);
-        img->close(img);
+        tsk_fs_close(fs);
+        tsk_img_close(img);
         free(path);
         exit(1);
     }
@@ -252,13 +252,13 @@ main(int argc, char **argv1)
         }
         else {
             tsk_error_print(stderr);
-            fs->close(fs);
-            img->close(img);
+            tsk_fs_close(fs);
+            tsk_img_close(img);
             exit(1);
         }
     }
 
-    fs->close(fs);
-    img->close(img);
+    tsk_fs_close(fs);
+    tsk_img_close(img);
     exit(0);
 }
diff --git a/tools/fstools/ffind.cpp b/tools/fstools/ffind.cpp
index d9565bc4e2413e60768491ea91d1c1a342258e52..a129da8af46f1e311f4a24d821b51b34bf50f4e9 100644
--- a/tools/fstools/ffind.cpp
+++ b/tools/fstools/ffind.cpp
@@ -28,7 +28,7 @@ usage()
 {
     TFPRINTF(stderr,
         _TSK_T
-        ("usage: %s [-aduvV] [-f fstype] [-i imgtype] [-b dev_sector_size] [-o imgoffset] [-P pooltype] [-B pool_volume_block] image [images] inode\n"),
+        ("usage: %" PRIttocTSK " [-aduvV] [-f fstype] [-i imgtype] [-b dev_sector_size] [-o imgoffset] [-P pooltype] [-B pool_volume_block] image [images] inode\n"),
         progname);
     tsk_fprintf(stderr, "\t-a: Find all occurrences\n");
     tsk_fprintf(stderr, "\t-d: Find deleted entries ONLY\n");
@@ -102,7 +102,7 @@ main(int argc, char **argv1)
             if (*cp || *cp == *OPTARG || ssize < 1) {
                 TFPRINTF(stderr,
                     _TSK_T
-                    ("invalid argument: sector size must be positive: %s\n"),
+                    ("invalid argument: sector size must be positive: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -118,7 +118,7 @@ main(int argc, char **argv1)
             fstype = tsk_fs_type_toid(OPTARG);
             if (fstype == TSK_FS_TYPE_UNSUPP) {
                 TFPRINTF(stderr,
-                    _TSK_T("Unsupported file system type: %s\n"), OPTARG);
+                    _TSK_T("Unsupported file system type: %" PRIttocTSK "\n"), OPTARG);
                 usage();
             }
             break;
@@ -129,7 +129,7 @@ main(int argc, char **argv1)
             }
             imgtype = tsk_img_type_toid(OPTARG);
             if (imgtype == TSK_IMG_TYPE_UNSUPP) {
-                TFPRINTF(stderr, _TSK_T("Unsupported image type: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Unsupported image type: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -169,7 +169,7 @@ main(int argc, char **argv1)
             exit(0);
         case _TSK_T('?'):
         default:
-            TFPRINTF(stderr, _TSK_T("Invalid argument: %s\n"),
+            TFPRINTF(stderr, _TSK_T("Invalid argument: %" PRIttocTSK "\n"),
                 argv[OPTIND]);
             usage();
         }
@@ -193,7 +193,7 @@ main(int argc, char **argv1)
     /* Get the inode */
     if (tsk_fs_parse_inum(argv[argc - 1], &inode, &type, &type_used, &id,
             &id_used)) {
-        TFPRINTF(stderr, _TSK_T("Invalid inode: %s\n"), argv[argc - 1]);
+        TFPRINTF(stderr, _TSK_T("Invalid inode: %" PRIttocTSK "\n"), argv[argc - 1]);
         usage();
     }
 
@@ -217,7 +217,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_fs_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
     }
@@ -227,7 +227,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_pool_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
 
@@ -236,7 +236,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_fs_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
     }
@@ -258,12 +258,12 @@ main(int argc, char **argv1)
             type_used, id, id_used,
             (TSK_FS_DIR_WALK_FLAG_ENUM) dir_walk_flags)) {
         tsk_error_print(stderr);
-        fs->close(fs);
-        img->close(img);
+        tsk_fs_close(fs);
+        tsk_img_close(img);
         exit(1);
     }
 
-    fs->close(fs);
-    img->close(img);
+    tsk_fs_close(fs);
+    tsk_img_close(img);
     exit(0);
 }
diff --git a/tools/fstools/fls.cpp b/tools/fstools/fls.cpp
index fc0b91d837189c5b61fe90757843cacec16bb7e9..94ff76d784021c633bae78f22926c8ef5434a0e4 100644
--- a/tools/fstools/fls.cpp
+++ b/tools/fstools/fls.cpp
@@ -31,7 +31,7 @@ usage()
 {
     TFPRINTF(stderr,
         _TSK_T
-        ("usage: %s [-adDFlhpruvV] [-f fstype] [-i imgtype] [-b dev_sector_size] [-m dir/] [-o imgoffset] [-z ZONE] [-s seconds] image [images] [inode]\n"),
+        ("usage: %" PRIttocTSK " [-adDFlhpruvV] [-f fstype] [-i imgtype] [-b dev_sector_size] [-m dir/] [-o imgoffset] [-z ZONE] [-s seconds] image [images] [inode]\n"),
         progname);
     tsk_fprintf(stderr,
         "\tIf [inode] is not given, the root directory is used\n");
@@ -122,7 +122,7 @@ main(int argc, char **argv1)
         switch (ch) {
         case _TSK_T('?'):
         default:
-            TFPRINTF(stderr, _TSK_T("Invalid argument: %s\n"),
+            TFPRINTF(stderr, _TSK_T("Invalid argument: %" PRIttocTSK "\n"),
                 argv[OPTIND]);
             usage();
         case _TSK_T('a'):
@@ -133,7 +133,7 @@ main(int argc, char **argv1)
             if (*cp || *cp == *OPTARG || ssize < 1) {
                 TFPRINTF(stderr,
                     _TSK_T
-                    ("invalid argument: sector size must be positive: %s\n"),
+                    ("invalid argument: sector size must be positive: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -153,7 +153,7 @@ main(int argc, char **argv1)
             fstype = tsk_fs_type_toid(OPTARG);
             if (fstype == TSK_FS_TYPE_UNSUPP) {
                 TFPRINTF(stderr,
-                    _TSK_T("Unsupported file system type: %s\n"), OPTARG);
+                    _TSK_T("Unsupported file system type: %" PRIttocTSK "\n"), OPTARG);
                 usage();
             }
             break;
@@ -168,7 +168,7 @@ main(int argc, char **argv1)
             }
             imgtype = tsk_img_type_toid(OPTARG);
             if (imgtype == TSK_IMG_TYPE_UNSUPP) {
-                TFPRINTF(stderr, _TSK_T("Unsupported image type: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Unsupported image type: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -197,7 +197,7 @@ main(int argc, char **argv1)
             pooltype = tsk_pool_type_toid(OPTARG);
             if (pooltype == TSK_POOL_TYPE_UNSUPP) {
                 TFPRINTF(stderr,
-                    _TSK_T("Unsupported pool container type: %s\n"), OPTARG);
+                    _TSK_T("Unsupported pool container type: %" PRIttocTSK "\n"), OPTARG);
                 usage();
             }
             break;
@@ -410,7 +410,7 @@ main(int argc, char **argv1)
     if (tsk_fs_fls(fs, (TSK_FS_FLS_FLAG_ENUM) fls_flags, inode,
             (TSK_FS_DIR_WALK_FLAG_ENUM) name_flags, macpre, sec_skew)) {
         tsk_error_print(stderr);
-        fs->close(fs);
+        tsk_fs_close(fs);
         tsk_img_close(img);
         if (pool != NULL) {
           tsk_pool_close(pool);
@@ -421,7 +421,7 @@ main(int argc, char **argv1)
         exit(1);
     }
 
-    fs->close(fs);
+    tsk_fs_close(fs);
     tsk_img_close(img);
 
     if (pool != NULL) {
diff --git a/tools/fstools/fscheck.cpp b/tools/fstools/fscheck.cpp
index 77d5cf623d42630074889bb02270ccc0872dac8e..2c54746c50f1b8289fd7d479049de29b598f6d80 100644
--- a/tools/fstools/fscheck.cpp
+++ b/tools/fstools/fscheck.cpp
@@ -70,7 +70,7 @@ main(int argc, char **argv)
             if (*cp || *cp == *OPTARG || ssize < 1) {
                 TFPRINTF(stderr,
                     _TSK_T
-                    ("invalid argument: sector size must be positive: %s\n"),
+                    ("invalid argument: sector size must be positive: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -83,7 +83,7 @@ main(int argc, char **argv)
             fstype = tsk_fs_type_toid(OPTARG);
             if (fstype == TSK_FS_TYPE_UNSUPP) {
                 TFPRINTF(stderr,
-                    _TSK_T("Unsupported file system type: %s\n"), OPTARG);
+                    _TSK_T("Unsupported file system type: %" PRIttocTSK "\n"), OPTARG);
                 usage();
             }
             break;
@@ -95,7 +95,7 @@ main(int argc, char **argv)
             }
             imgtype = tsk_img_type_toid(OPTARG);
             if (imgtype == TSK_IMG_TYPE_UNSUPP) {
-                TFPRINTF(stderr, _TSK_T("Unsupported image type: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Unsupported image type: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -137,20 +137,20 @@ main(int argc, char **argv)
             tsk_print_types(stderr);
 
         tsk_error_print(stderr);
-        img->close(img);
+        tsk_img_close(img);
         exit(1);
 
     }
 
     if (fs->fscheck(fs, stdout)) {
         tsk_error_print(stderr);
-        fs->close(fs);
-        img->close(img);
+    	tsk_fs_close(fs);
+        tsk_img_close(img);
         exit(1);
     }
 
-    fs->close(fs);
-    img->close(img);
+    tsk_fs_close(fs);
+    tsk_img_close(img);
 
     exit(0);
 }
diff --git a/tools/fstools/fsstat.cpp b/tools/fstools/fsstat.cpp
index ca6a6a01494b42c8158f055e93589c95131b51c3..ee802e4fb56b4a3abe6967f92fe9fd70a1b881db 100644
--- a/tools/fstools/fsstat.cpp
+++ b/tools/fstools/fsstat.cpp
@@ -22,7 +22,7 @@ usage()
 {
     TFPRINTF(stderr,
         _TSK_T
-        ("usage: %s [-tvV] [-f fstype] [-i imgtype] [-b dev_sector_size] [-o imgoffset] image\n"),
+        ("usage: %" PRIttocTSK " [-tvV] [-f fstype] [-i imgtype] [-b dev_sector_size] [-o imgoffset] image\n"),
         progname);
     tsk_fprintf(stderr, "\t-t: display type only\n");
     tsk_fprintf(stderr,
@@ -83,7 +83,7 @@ main(int argc, char **argv1)
         switch (ch) {
         case _TSK_T('?'):
         default:
-            TFPRINTF(stderr, _TSK_T("Invalid argument: %s\n"),
+            TFPRINTF(stderr, _TSK_T("Invalid argument: %" PRIttocTSK "\n"),
                 argv[OPTIND]);
             usage();
         case _TSK_T('b'):
@@ -91,7 +91,7 @@ main(int argc, char **argv1)
             if (*cp || *cp == *OPTARG || ssize < 1) {
                 TFPRINTF(stderr,
                     _TSK_T
-                    ("invalid argument: sector size must be positive: %s\n"),
+                    ("invalid argument: sector size must be positive: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -104,7 +104,7 @@ main(int argc, char **argv1)
             fstype = tsk_fs_type_toid(OPTARG);
             if (fstype == TSK_FS_TYPE_UNSUPP) {
                 TFPRINTF(stderr,
-                    _TSK_T("Unsupported file system type: %s\n"), OPTARG);
+                    _TSK_T("Unsupported file system type: %" PRIttocTSK "\n"), OPTARG);
                 usage();
             }
             break;
@@ -116,7 +116,7 @@ main(int argc, char **argv1)
             }
             imgtype = tsk_img_type_toid(OPTARG);
             if (imgtype == TSK_IMG_TYPE_UNSUPP) {
-                TFPRINTF(stderr, _TSK_T("Unsupported image type: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Unsupported image type: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -141,7 +141,7 @@ main(int argc, char **argv1)
             pooltype = tsk_pool_type_toid(OPTARG);
             if (pooltype == TSK_POOL_TYPE_UNSUPP) {
                 TFPRINTF(stderr,
-                    _TSK_T("Unsupported pool container type: %s\n"), OPTARG);
+                    _TSK_T("Unsupported pool container type: %" PRIttocTSK "\n"), OPTARG);
                 usage();
             }
             break;
@@ -197,7 +197,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_fs_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
     } else {
@@ -206,7 +206,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_pool_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
 
@@ -215,7 +215,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_fs_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
     }
@@ -226,13 +226,13 @@ main(int argc, char **argv1)
     else {
         if (fs->fsstat(fs, stdout)) {
             tsk_error_print(stderr);
-            fs->close(fs);
-            img->close(img);
+            tsk_fs_close(fs);
+            tsk_img_close(img);
             exit(1);
         }
     }
 
-    fs->close(fs);
-    img->close(img);
+    tsk_fs_close(fs);
+    tsk_img_close(img);
     exit(0);
 }
diff --git a/tools/fstools/icat.cpp b/tools/fstools/icat.cpp
index b1656dc1d8ab343d514e2c3b5e57de7a7b7c0f85..0e6b1cee3f07998847c3d6b436442c151551fd06 100644
--- a/tools/fstools/icat.cpp
+++ b/tools/fstools/icat.cpp
@@ -34,7 +34,7 @@ usage()
 {
     TFPRINTF(stderr,
         _TSK_T
-        ("usage: %s [-hrRsvV] [-f fstype] [-i imgtype] [-b dev_sector_size] [-o imgoffset] image [images] inum[-typ[-id]]\n"),
+        ("usage: %" PRIttocTSK " [-hrRsvV] [-f fstype] [-i imgtype] [-b dev_sector_size] [-o imgoffset] image [images] inum[-typ[-id]]\n"),
         progname);
     tsk_fprintf(stderr, "\t-h: Do not display holes in sparse files\n");
     tsk_fprintf(stderr, "\t-r: Recover deleted file\n");
@@ -106,7 +106,7 @@ main(int argc, char **argv1)
         switch (ch) {
         case _TSK_T('?'):
         default:
-            TFPRINTF(stderr, _TSK_T("Invalid argument: %s\n"),
+            TFPRINTF(stderr, _TSK_T("Invalid argument: %" PRIttocTSK "\n"),
                 argv[OPTIND]);
             usage();
         case _TSK_T('b'):
@@ -114,7 +114,7 @@ main(int argc, char **argv1)
             if (*cp || *cp == *OPTARG || ssize < 1) {
                 TFPRINTF(stderr,
                     _TSK_T
-                    ("invalid argument: sector size must be positive: %s\n"),
+                    ("invalid argument: sector size must be positive: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -127,7 +127,7 @@ main(int argc, char **argv1)
             fstype = tsk_fs_type_toid(OPTARG);
             if (fstype == TSK_FS_TYPE_UNSUPP) {
                 TFPRINTF(stderr,
-                    _TSK_T("Unsupported file system type: %s\n"), OPTARG);
+                    _TSK_T("Unsupported file system type: %" PRIttocTSK "\n"), OPTARG);
                 usage();
             }
             break;
@@ -141,7 +141,7 @@ main(int argc, char **argv1)
             }
             imgtype = tsk_img_type_toid(OPTARG);
             if (imgtype == TSK_IMG_TYPE_UNSUPP) {
-                TFPRINTF(stderr, _TSK_T("Unsupported image type: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Unsupported image type: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -163,7 +163,7 @@ main(int argc, char **argv1)
             pooltype = tsk_pool_type_toid(OPTARG);
             if (pooltype == TSK_POOL_TYPE_UNSUPP) {
                 TFPRINTF(stderr,
-                    _TSK_T("Unsupported pool container type: %s\n"), OPTARG);
+                    _TSK_T("Unsupported pool container type: %" PRIttocTSK "\n"), OPTARG);
                 usage();
             }
             break;
@@ -212,7 +212,7 @@ main(int argc, char **argv1)
     /* Get the inode address */
     if (tsk_fs_parse_inum(argv[argc - 1], &inum, &type, &type_used, &id,
             &id_used)) {
-        TFPRINTF(stderr, _TSK_T("Invalid inode address: %s\n"),
+        TFPRINTF(stderr, _TSK_T("Invalid inode address: %" PRIttocTSK "\n"),
             argv[argc - 1]);
         usage();
     }
@@ -236,7 +236,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_fs_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
     } else {
@@ -245,7 +245,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_pool_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
 
@@ -254,7 +254,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_fs_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
     }
@@ -263,16 +263,16 @@ main(int argc, char **argv1)
         tsk_fprintf(stderr,
             "Metadata address too large for image (%" PRIuINUM ")\n",
             fs->last_inum);
-        fs->close(fs);
-        img->close(img);
+        tsk_fs_close(fs);
+        tsk_img_close(img);
         exit(1);
     }
     if (inum < fs->first_inum) {
         tsk_fprintf(stderr,
             "Metadata address too small for image (%" PRIuINUM ")\n",
             fs->first_inum);
-        fs->close(fs);
-        img->close(img);
+        tsk_fs_close(fs);
+        tsk_img_close(img);
         exit(1);
     }
 
@@ -290,12 +290,12 @@ main(int argc, char **argv1)
         }
         else {
             tsk_error_print(stderr);
-            fs->close(fs);
-            img->close(img);
+            tsk_fs_close(fs);
+            tsk_img_close(img);
             exit(1);
         }
     }
-    fs->close(fs);
-    img->close(img);
+    tsk_fs_close(fs);
+    tsk_img_close(img);
     exit(0);
 }
diff --git a/tools/fstools/ifind.cpp b/tools/fstools/ifind.cpp
index 59040dc0ea37cf012a8d8aad1ddafa9afdd356ef..81c76396f1f3848d11353c5ab6779fcfb359160d 100644
--- a/tools/fstools/ifind.cpp
+++ b/tools/fstools/ifind.cpp
@@ -31,7 +31,7 @@ usage()
 {
     TFPRINTF(stderr,
         _TSK_T
-        ("usage: %s [-alvV] [-f fstype] [-i imgtype] [-b dev_sector_size] [-o imgoffset] [-P pooltype] [-B pool_volume_block] [-d unit_addr] [-n file] [-p par_addr] [-z ZONE] image [images]\n"),
+        ("usage: %" PRIttocTSK " [-alvV] [-f fstype] [-i imgtype] [-b dev_sector_size] [-o imgoffset] [-P pooltype] [-B pool_volume_block] [-d unit_addr] [-n file] [-p par_addr] [-z ZONE] image [images]\n"),
         progname);
     tsk_fprintf(stderr, "\t-a: find all inodes\n");
     tsk_fprintf(stderr,
@@ -115,7 +115,7 @@ main(int argc, char **argv1)
             if (*cp || *cp == *OPTARG || ssize < 1) {
                 TFPRINTF(stderr,
                     _TSK_T
-                    ("invalid argument: sector size must be positive: %s\n"),
+                    ("invalid argument: sector size must be positive: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -129,7 +129,7 @@ main(int argc, char **argv1)
             type = IFIND_DATA;
             block = TSTRTOULL(OPTARG, &cp, 0);
             if (*cp || *cp == *OPTARG) {
-                TFPRINTF(stderr, _TSK_T("Invalid block address: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Invalid block address: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -142,7 +142,7 @@ main(int argc, char **argv1)
             fstype = tsk_fs_type_toid(OPTARG);
             if (fstype == TSK_FS_TYPE_UNSUPP) {
                 TFPRINTF(stderr,
-                    _TSK_T("Unsupported file system type: %s\n"), OPTARG);
+                    _TSK_T("Unsupported file system type: %" PRIttocTSK "\n"), OPTARG);
                 usage();
             }
             break;
@@ -153,7 +153,7 @@ main(int argc, char **argv1)
             }
             imgtype = tsk_img_type_toid(OPTARG);
             if (imgtype == TSK_IMG_TYPE_UNSUPP) {
-                TFPRINTF(stderr, _TSK_T("Unsupported image type: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Unsupported image type: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -211,7 +211,7 @@ main(int argc, char **argv1)
             type = IFIND_PARENT;
             if (tsk_fs_parse_inum(OPTARG, &parinode, NULL, NULL, NULL,
                     NULL)) {
-                TFPRINTF(stderr, _TSK_T("Invalid inode address: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Invalid inode address: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -277,7 +277,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_fs_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
     }
@@ -287,7 +287,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_pool_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
 
@@ -296,7 +296,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_fs_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
     }
@@ -307,15 +307,15 @@ main(int argc, char **argv1)
                 "Block %" PRIuDADDR
                 " is larger than last block in image (%" PRIuDADDR
                 ")\n", block, fs->last_block);
-            fs->close(fs);
-            img->close(img);
+            tsk_fs_close(fs);
+            tsk_img_close(img);
             exit(1);
         }
         if (tsk_fs_ifind_data(fs, (TSK_FS_IFIND_FLAG_ENUM) localflags,
                 block)) {
             tsk_error_print(stderr);
-            fs->close(fs);
-            img->close(img);
+            tsk_fs_close(fs);
+            tsk_img_close(img);
             exit(1);
         }
     }
@@ -323,8 +323,8 @@ main(int argc, char **argv1)
     else if (type == IFIND_PARENT) {
         if (TSK_FS_TYPE_ISNTFS(fs->ftype) == 0) {
             tsk_fprintf(stderr, "-p works only with NTFS file systems\n");
-            fs->close(fs);
-            img->close(img);
+            tsk_fs_close(fs);
+            tsk_img_close(img);
             exit(1);
         }
         else if (parinode > fs->last_inum) {
@@ -332,15 +332,15 @@ main(int argc, char **argv1)
                 "Meta data %" PRIuINUM
                 " is larger than last MFT entry in image (%" PRIuINUM
                 ")\n", parinode, fs->last_inum);
-            fs->close(fs);
-            img->close(img);
+            tsk_fs_close(fs);
+            tsk_img_close(img);
             exit(1);
         }
         if (tsk_fs_ifind_par(fs, (TSK_FS_IFIND_FLAG_ENUM) localflags,
                 parinode)) {
             tsk_error_print(stderr);
-            fs->close(fs);
-            img->close(img);
+            tsk_fs_close(fs);
+            tsk_img_close(img);
             exit(1);
         }
     }
@@ -351,8 +351,8 @@ main(int argc, char **argv1)
 
         if (-1 == (retval = tsk_fs_ifind_path(fs, path, &inum))) {
             tsk_error_print(stderr);
-            fs->close(fs);
-            img->close(img);
+            tsk_fs_close(fs);
+            tsk_img_close(img);
             free(path);
             exit(1);
         }
@@ -362,8 +362,8 @@ main(int argc, char **argv1)
         else
             tsk_printf("%" PRIuINUM "\n", inum);
     }
-    fs->close(fs);
-    img->close(img);
+    tsk_fs_close(fs);
+    tsk_img_close(img);
 
     exit(0);
 }
diff --git a/tools/fstools/ils.cpp b/tools/fstools/ils.cpp
index 22a6087daf993eda6317ccdefd2a3ef62d872711..4277154268cc0b781fecd83d962962918d80a46e 100644
--- a/tools/fstools/ils.cpp
+++ b/tools/fstools/ils.cpp
@@ -34,7 +34,7 @@ usage()
 {
     TFPRINTF(stderr,
         _TSK_T
-        ("usage: %s [-emOpvV] [-aAlLzZ] [-f fstype] [-i imgtype] [-b dev_sector_size] [-o imgoffset] [-P pooltype] [-B pool_volume_block] [-s seconds] image [images] [inum[-end]]\n"),
+        ("usage: %" PRIttocTSK " [-emOpvV] [-aAlLzZ] [-f fstype] [-i imgtype] [-b dev_sector_size] [-o imgoffset] [-P pooltype] [-B pool_volume_block] [-s seconds] image [images] [inum[-end]]\n"),
         progname);
     tsk_fprintf(stderr, "\t-e: Display all inodes\n");
     tsk_fprintf(stderr, "\t-m: Display output in the mactime format\n");
@@ -118,7 +118,7 @@ main(int argc, char **argv1)
         switch (ch) {
         case _TSK_T('?'):
         default:
-            TFPRINTF(stderr, _TSK_T("Invalid argument: %s\n"),
+            TFPRINTF(stderr, _TSK_T("Invalid argument: %" PRIttocTSK "\n"),
                 argv[OPTIND]);
             usage();
         case _TSK_T('b'):
@@ -126,7 +126,7 @@ main(int argc, char **argv1)
             if (*cp || *cp == *OPTARG || ssize < 1) {
                 TFPRINTF(stderr,
                     _TSK_T
-                    ("invalid argument: sector size must be positive: %s\n"),
+                    ("invalid argument: sector size must be positive: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -139,7 +139,7 @@ main(int argc, char **argv1)
             fstype = tsk_fs_type_toid(OPTARG);
             if (fstype == TSK_FS_TYPE_UNSUPP) {
                 TFPRINTF(stderr,
-                    _TSK_T("Unsupported file system type: %s\n"), OPTARG);
+                    _TSK_T("Unsupported file system type: %" PRIttocTSK "\n"), OPTARG);
                 usage();
             }
             break;
@@ -150,7 +150,7 @@ main(int argc, char **argv1)
             }
             imgtype = tsk_img_type_toid(OPTARG);
             if (imgtype == TSK_IMG_TYPE_UNSUPP) {
-                TFPRINTF(stderr, _TSK_T("Unsupported image type: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Unsupported image type: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -353,7 +353,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_fs_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
     }
@@ -363,7 +363,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_pool_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
 
@@ -372,7 +372,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_fs_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
     }
@@ -409,12 +409,12 @@ main(int argc, char **argv1)
     if (tsk_fs_ils(fs, (TSK_FS_ILS_FLAG_ENUM) ils_flags, istart, ilast,
             (TSK_FS_META_FLAG_ENUM) flags, sec_skew, image)) {
         tsk_error_print(stderr);
-        fs->close(fs);
-        img->close(img);
+        tsk_fs_close(fs);
+        tsk_img_close(img);
         exit(1);
     }
 
-    fs->close(fs);
-    img->close(img);
+    tsk_fs_close(fs);
+    tsk_img_close(img);
     exit(0);
 }
diff --git a/tools/fstools/istat.cpp b/tools/fstools/istat.cpp
index 42b7928a858db04d0c978a23223817d2351fce02..5a9de5c97d387e5c85d60966e13ad5c8f0f677e6 100644
--- a/tools/fstools/istat.cpp
+++ b/tools/fstools/istat.cpp
@@ -31,7 +31,7 @@ usage()
 {
     TFPRINTF(stderr,
         _TSK_T
-        ("usage: %s [-N num] [-f fstype] [-i imgtype] [-b dev_sector_size] [-o imgoffset] [-P pooltype] [-B pool_volume_block] [-z zone] [-s seconds] [-rvV] image inum\n"),
+        ("usage: %" PRIttocTSK " [-N num] [-f fstype] [-i imgtype] [-b dev_sector_size] [-o imgoffset] [-P pooltype] [-B pool_volume_block] [-z zone] [-s seconds] [-rvV] image inum\n"),
         progname);
     tsk_fprintf(stderr,
         "\t-N num: force the display of NUM address of block pointers\n");
@@ -104,7 +104,7 @@ main(int argc, char **argv1)
         switch (ch) {
         case _TSK_T('?'):
         default:
-            TFPRINTF(stderr, _TSK_T("Invalid argument: %s\n"),
+            TFPRINTF(stderr, _TSK_T("Invalid argument: %" PRIttocTSK "\n"),
                 argv[OPTIND]);
             usage();
         case _TSK_T('N'):
@@ -112,7 +112,7 @@ main(int argc, char **argv1)
             if (*cp || *cp == *OPTARG || numblock < 1) {
                 TFPRINTF(stderr,
                     _TSK_T
-                    ("invalid argument: block count must be positive: %s\n"),
+                    ("invalid argument: block count must be positive: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -122,7 +122,7 @@ main(int argc, char **argv1)
             if (*cp || *cp == *OPTARG || ssize < 1) {
                 TFPRINTF(stderr,
                     _TSK_T
-                    ("invalid argument: sector size must be positive: %s\n"),
+                    ("invalid argument: sector size must be positive: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -135,7 +135,7 @@ main(int argc, char **argv1)
             fstype = tsk_fs_type_toid(OPTARG);
             if (fstype == TSK_FS_TYPE_UNSUPP) {
                 TFPRINTF(stderr,
-                    _TSK_T("Unsupported file system type: %s\n"), OPTARG);
+                    _TSK_T("Unsupported file system type: %" PRIttocTSK "\n"), OPTARG);
                 usage();
             }
             break;
@@ -149,7 +149,7 @@ main(int argc, char **argv1)
             }
             imgtype = tsk_img_type_toid(OPTARG);
             if (imgtype == TSK_IMG_TYPE_UNSUPP) {
-                TFPRINTF(stderr, _TSK_T("Unsupported image type: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Unsupported image type: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -168,7 +168,7 @@ main(int argc, char **argv1)
             pooltype = tsk_pool_type_toid(OPTARG);
             if (pooltype == TSK_POOL_TYPE_UNSUPP) {
                 TFPRINTF(stderr,
-                    _TSK_T("Unsupported pool container type: %s\n"), OPTARG);
+                    _TSK_T("Unsupported pool container type: %" PRIttocTSK "\n"), OPTARG);
                 usage();
             }
             break;
@@ -228,7 +228,7 @@ main(int argc, char **argv1)
      * This will make scripting easier
      */
     if (tsk_fs_parse_inum(argv[argc - 1], &inum, NULL, NULL, NULL, NULL)) {
-        TFPRINTF(stderr, _TSK_T("Invalid inode number: %s"),
+        TFPRINTF(stderr, _TSK_T("Invalid inode number: %" PRIttocTSK),
             argv[argc - 1]);
         usage();
     }
@@ -255,7 +255,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_fs_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
     } else {
@@ -264,7 +264,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_pool_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
 
@@ -273,7 +273,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_fs_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
     }
@@ -282,8 +282,8 @@ main(int argc, char **argv1)
         tsk_fprintf(stderr,
             "Metadata address is too large for image (%" PRIuINUM ")\n",
             fs->last_inum);
-        fs->close(fs);
-        img->close(img);
+        tsk_fs_close(fs);
+        tsk_img_close(img);
         exit(1);
     }
 
@@ -291,8 +291,8 @@ main(int argc, char **argv1)
         tsk_fprintf(stderr,
             "Metadata address is too small for image (%" PRIuINUM ")\n",
             fs->first_inum);
-        fs->close(fs);
-        img->close(img);
+        tsk_fs_close(fs);
+        tsk_img_close(img);
         exit(1);
     }
 
@@ -302,12 +302,12 @@ main(int argc, char **argv1)
 
     if (fs->istat(fs, (TSK_FS_ISTAT_FLAG_ENUM) istat_flags, stdout, inum, numblock, sec_skew)) {
         tsk_error_print(stderr);
-        fs->close(fs);
-        img->close(img);
+        tsk_fs_close(fs);
+        tsk_img_close(img);
         exit(1);
     }
 
-    fs->close(fs);
-    img->close(img);
+    tsk_fs_close(fs);
+    tsk_img_close(img);
     exit(0);
 }
diff --git a/tools/fstools/jcat.cpp b/tools/fstools/jcat.cpp
index 0aed1b8ac2e555c7201aed09f105d8ceea759883..f013f3b7e3eb23aa0dea3b1a1de8bc61aef7ed6a 100644
--- a/tools/fstools/jcat.cpp
+++ b/tools/fstools/jcat.cpp
@@ -24,7 +24,7 @@ usage()
 {
     TFPRINTF(stderr,
         _TSK_T
-        ("usage: %s [-f fstype] [-i imgtype] [-b dev_sector_size] [-o imgoffset] [-vV] image [images] [inode] blk\n"),
+        ("usage: %" PRIttocTSK " [-f fstype] [-i imgtype] [-b dev_sector_size] [-o imgoffset] [-vV] image [images] [inode] blk\n"),
         progname);
     tsk_fprintf(stderr, "\tblk: The journal block to view\n");
     tsk_fprintf(stderr,
@@ -78,7 +78,7 @@ main(int argc, char **argv1)
         switch (ch) {
         case _TSK_T('?'):
         default:
-            TFPRINTF(stderr, _TSK_T("Invalid argument: %s\n"),
+            TFPRINTF(stderr, _TSK_T("Invalid argument: %" PRIttocTSK "\n"),
                 argv[OPTIND]);
             usage();
         case _TSK_T('b'):
@@ -86,7 +86,7 @@ main(int argc, char **argv1)
             if (*cp || *cp == *OPTARG || ssize < 1) {
                 TFPRINTF(stderr,
                     _TSK_T
-                    ("invalid argument: sector size must be positive: %s\n"),
+                    ("invalid argument: sector size must be positive: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -99,7 +99,7 @@ main(int argc, char **argv1)
             fstype = tsk_fs_type_toid(OPTARG);
             if (fstype == TSK_FS_TYPE_UNSUPP) {
                 TFPRINTF(stderr,
-                    _TSK_T("Unsupported file system type: %s\n"), OPTARG);
+                    _TSK_T("Unsupported file system type: %" PRIttocTSK "\n"), OPTARG);
                 usage();
             }
             break;
@@ -110,7 +110,7 @@ main(int argc, char **argv1)
             }
             imgtype = tsk_img_type_toid(OPTARG);
             if (imgtype == TSK_IMG_TYPE_UNSUPP) {
-                TFPRINTF(stderr, _TSK_T("Unsupported image type: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Unsupported image type: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -138,7 +138,7 @@ main(int argc, char **argv1)
 
     blk = TSTRTOULL(argv[argc - 1], &cp, 0);
     if (*cp || *cp == *argv[argc - 1]) {
-        TFPRINTF(stderr, _TSK_T("bad block number: %s"), argv[argc - 1]);
+        TFPRINTF(stderr, _TSK_T("bad block number: %" PRIttocTSK), argv[argc - 1]);
         exit(1);
     }
 
@@ -162,7 +162,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_fs_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
         inum = fs->journ_inum;
@@ -179,7 +179,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_fs_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
     }
@@ -188,8 +188,8 @@ main(int argc, char **argv1)
         tsk_fprintf(stderr,
             "Inode value is too large for image (%" PRIuINUM ")\n",
             fs->last_inum);
-        fs->close(fs);
-        img->close(img);
+        tsk_fs_close(fs);
+        tsk_img_close(img);
         exit(1);
     }
 
@@ -197,16 +197,16 @@ main(int argc, char **argv1)
         tsk_fprintf(stderr,
             "Inode value is too small for image (%" PRIuINUM ")\n",
             fs->first_inum);
-        fs->close(fs);
-        img->close(img);
+        tsk_fs_close(fs);
+        tsk_img_close(img);
         exit(1);
     }
 
     if (fs->jopen == NULL) {
         tsk_fprintf(stderr,
             "Journal support does not exist for this file system\n");
-        fs->close(fs);
-        img->close(img);
+        tsk_fs_close(fs);
+        tsk_img_close(img);
         exit(1);
     }
 
@@ -214,26 +214,26 @@ main(int argc, char **argv1)
     if (-1 == _setmode(_fileno(stdout), _O_BINARY)) {
         fprintf(stderr,
             "jcat: error setting stdout to binary: %s", strerror(errno));
-        fs->close(fs);
-        img->close(img);
+        tsk_fs_close(fs);
+        tsk_img_close(img);
         exit(1);
     }
 #endif
 
     if (fs->jopen(fs, inum)) {
         tsk_error_print(stderr);
-        fs->close(fs);
-        img->close(img);
+        tsk_fs_close(fs);
+        tsk_img_close(img);
         exit(1);
     }
     if (fs->jblk_walk(fs, blk, blk, 0, 0, NULL)) {
         tsk_error_print(stderr);
-        fs->close(fs);
-        img->close(img);
+        tsk_fs_close(fs);
+        tsk_img_close(img);
         exit(1);
     }
 
-    fs->close(fs);
-    img->close(img);
+    tsk_fs_close(fs);
+    tsk_img_close(img);
     exit(0);
 }
diff --git a/tools/fstools/jls.cpp b/tools/fstools/jls.cpp
index 073cbb66c4f9ca197db2f5a7a89b93fcd200d627..2f668efd596162c487d591b693ad3f99ea4324c1 100644
--- a/tools/fstools/jls.cpp
+++ b/tools/fstools/jls.cpp
@@ -20,7 +20,7 @@ usage()
 {
     TFPRINTF(stderr,
         _TSK_T
-        ("usage: %s [-f fstype] [-i imgtype] [-b dev_sector_size] [-o imgoffset] [-vV] image [inode]\n"),
+        ("usage: %" PRIttocTSK " [-f fstype] [-i imgtype] [-b dev_sector_size] [-o imgoffset] [-vV] image [inode]\n"),
         progname);
     tsk_fprintf(stderr,
         "\t-i imgtype: The format of the image file (use '-i list' for supported types)\n");
@@ -70,7 +70,7 @@ main(int argc, char **argv1)
         switch (ch) {
         case _TSK_T('?'):
         default:
-            TFPRINTF(stderr, _TSK_T("Invalid argument: %s\n"),
+            TFPRINTF(stderr, _TSK_T("Invalid argument: %" PRIttocTSK "\n"),
                 argv[OPTIND]);
             usage();
         case _TSK_T('b'):
@@ -78,7 +78,7 @@ main(int argc, char **argv1)
             if (*cp || *cp == *OPTARG || ssize < 1) {
                 TFPRINTF(stderr,
                     _TSK_T
-                    ("invalid argument: sector size must be positive: %s\n"),
+                    ("invalid argument: sector size must be positive: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -91,7 +91,7 @@ main(int argc, char **argv1)
             fstype = tsk_fs_type_toid(OPTARG);
             if (fstype == TSK_FS_TYPE_UNSUPP) {
                 TFPRINTF(stderr,
-                    _TSK_T("Unsupported file system type: %s\n"), OPTARG);
+                    _TSK_T("Unsupported file system type: %" PRIttocTSK "\n"), OPTARG);
                 usage();
             }
             break;
@@ -102,7 +102,7 @@ main(int argc, char **argv1)
             }
             imgtype = tsk_img_type_toid(OPTARG);
             if (imgtype == TSK_IMG_TYPE_UNSUPP) {
-                TFPRINTF(stderr, _TSK_T("Unsupported image type: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Unsupported image type: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -152,7 +152,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_fs_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
 
@@ -170,7 +170,7 @@ main(int argc, char **argv1)
             tsk_error_print(stderr);
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
                 tsk_fs_type_print(stderr);
-            img->close(img);
+            tsk_img_close(img);
             exit(1);
         }
     }
@@ -178,8 +178,8 @@ main(int argc, char **argv1)
     if (fs->jopen == NULL) {
         tsk_fprintf(stderr,
             "Journal support does not exist for this file system\n");
-        fs->close(fs);
-        img->close(img);
+        tsk_fs_close(fs);
+        tsk_img_close(img);
         exit(1);
     }
 
@@ -187,8 +187,8 @@ main(int argc, char **argv1)
         tsk_fprintf(stderr,
             "Inode value is too large for image (%" PRIuINUM ")\n",
             fs->last_inum);
-        fs->close(fs);
-        img->close(img);
+        tsk_fs_close(fs);
+        tsk_img_close(img);
         exit(1);
     }
 
@@ -196,25 +196,25 @@ main(int argc, char **argv1)
         tsk_fprintf(stderr,
             "Inode value is too small for image (%" PRIuINUM ")\n",
             fs->first_inum);
-        fs->close(fs);
-        img->close(img);
+        tsk_fs_close(fs);
+        tsk_img_close(img);
         exit(1);
     }
 
     if (fs->jopen(fs, inum)) {
         tsk_error_print(stderr);
-        fs->close(fs);
-        img->close(img);
+        tsk_fs_close(fs);
+        tsk_img_close(img);
         exit(1);
     }
     if (fs->jentry_walk(fs, 0, 0, NULL)) {
         tsk_error_print(stderr);
-        fs->close(fs);
-        img->close(img);
+        tsk_fs_close(fs);
+        tsk_img_close(img);
         exit(1);
     }
 
-    fs->close(fs);
-    img->close(img);
+    tsk_fs_close(fs);
+    tsk_img_close(img);
     exit(0);
 }
diff --git a/tools/fstools/usnjls.cpp b/tools/fstools/usnjls.cpp
index 5d072e52954b6e9b7c05abe8b72a47b68a5e8ba6..f67ef547cb45e8d0d25ea9f7cb7a036210a2a512 100644
--- a/tools/fstools/usnjls.cpp
+++ b/tools/fstools/usnjls.cpp
@@ -26,7 +26,7 @@ usage()
 {
     TFPRINTF(stderr,
              _TSK_T
-             ("usage: %s [-f fstype] [-i imgtype] [-b dev_sector_size]"
+             ("usage: %" PRIttocTSK " [-f fstype] [-i imgtype] [-b dev_sector_size]"
               " [-o imgoffset] [-lmvV] image [inode]\n"),
              progname);
     tsk_fprintf(stderr,
@@ -86,7 +86,7 @@ main(int argc, char **argv1)
         switch (ch) {
         case _TSK_T('?'): {
             default:
-                TFPRINTF(stderr, _TSK_T("Invalid argument: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Invalid argument: %" PRIttocTSK "\n"),
                          argv[OPTIND]);
                 usage();
         }
@@ -95,7 +95,7 @@ main(int argc, char **argv1)
             if (*cp || *cp == *OPTARG || ssize < 1) {
                 TFPRINTF(stderr,
                          _TSK_T("invalid argument: sector size "
-                                "must be positive: %s\n"),
+                                "must be positive: %" PRIttocTSK "\n"),
                          OPTARG);
                 usage();
             }
@@ -108,7 +108,7 @@ main(int argc, char **argv1)
             fstype = tsk_fs_type_toid(OPTARG);
             if (fstype == TSK_FS_TYPE_UNSUPP) {
                 TFPRINTF(stderr,
-                         _TSK_T("Unsupported file system type: %s\n"), OPTARG);
+                         _TSK_T("Unsupported file system type: %" PRIttocTSK "\n"), OPTARG);
                 usage();
             }
             break;
@@ -119,7 +119,7 @@ main(int argc, char **argv1)
             }
             imgtype = tsk_img_type_toid(OPTARG);
             if (imgtype == TSK_IMG_TYPE_UNSUPP) {
-                TFPRINTF(stderr, _TSK_T("Unsupported image type: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Unsupported image type: %" PRIttocTSK "\n"),
                          OPTARG);
                 usage();
             }
@@ -177,7 +177,7 @@ main(int argc, char **argv1)
                 tsk_fs_type_print(stderr);
             }
 
-            img->close(img);
+	    tsk_img_close(img);
             exit(1);
         }
 
@@ -214,8 +214,7 @@ main(int argc, char **argv1)
             if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE) {
                 tsk_fs_type_print(stderr);
             }
-
-            img->close(img);
+	    tsk_img_close(img);
             exit(1);
         }
     }
@@ -224,8 +223,8 @@ main(int argc, char **argv1)
         tsk_fprintf(stderr,
                     "Inode value is too large for image (%" PRIuINUM ")\n",
                     fs->last_inum);
-        fs->close(fs);
-        img->close(img);
+        tsk_fs_close(fs);
+        tsk_img_close(img);
         exit(1);
     }
 
@@ -233,19 +232,19 @@ main(int argc, char **argv1)
         tsk_fprintf(stderr,
                     "Inode value is too small for image (%" PRIuINUM ")\n",
                     fs->first_inum);
-        fs->close(fs);
-        img->close(img);
+        tsk_fs_close(fs);
+        tsk_img_close(img);
         exit(1);
     }
 
     if (tsk_fs_usnjls(fs, inum, flag)) {
         tsk_error_print(stderr);
-        fs->close(fs);
-        img->close(img);
+        tsk_fs_close(fs);
+        tsk_img_close(img);
         exit(1);
     }
 
-    fs->close(fs);
-    img->close(img);
+    tsk_fs_close(fs);
+    tsk_img_close(img);
     exit(0);
 }
diff --git a/tools/hashtools/hfind.cpp b/tools/hashtools/hfind.cpp
index edbb61df6939d0a69ce45456b52e52c25a5def69..9e5deaf1e19b978471b59c15f3eb9fd54fc272ef 100644
--- a/tools/hashtools/hfind.cpp
+++ b/tools/hashtools/hfind.cpp
@@ -24,7 +24,7 @@ usage()
 {
     TFPRINTF(stderr,
              _TSK_T
-             ("usage: %s [-eqVa] [-c] [-f lookup_file] [-i db_type] db_file [hashes]\n"),
+             ("usage: %" PRIttocTSK " [-eqVa] [-c] [-f lookup_file] [-i db_type] db_file [hashes]\n"),
              progname);
     tsk_fprintf(stderr,
                 "\t-e: Extended mode - where values other than just the name are printed\n");
@@ -292,7 +292,7 @@ main(int argc, char ** argv1)
             if ((handle = CreateFile(lookup_file, GENERIC_READ,
                                      FILE_SHARE_READ, 0, OPEN_EXISTING, 0,
                                      0)) == INVALID_HANDLE_VALUE) {
-                TFPRINTF(stderr, _TSK_T("Error opening hash file: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Error opening hash file: %" PRIttocTSK "\n"),
                          lookup_file);
                 exit(1);
             }
diff --git a/tools/imgtools/img_cat.cpp b/tools/imgtools/img_cat.cpp
index 7b682ff3474af5d39ffd4c84a52659f67208c063..3d195eb19e3ffec402e8298057f43667ee116e79 100755
--- a/tools/imgtools/img_cat.cpp
+++ b/tools/imgtools/img_cat.cpp
@@ -21,7 +21,7 @@ usage()
 {
     TFPRINTF(stderr,
         _TSK_T
-        ("usage: %s [-vV] [-i imgtype] [-b dev_sector_size] [-s start_sector] [-e stop_sector] image\n"),
+        ("usage: %" PRIttocTSK " [-vV] [-i imgtype] [-b dev_sector_size] [-s start_sector] [-e stop_sector] image\n"),
         progname);
     tsk_fprintf(stderr,
         "\t-i imgtype: The format of the image file (use 'i list' for supported types)\n");
@@ -68,7 +68,7 @@ main(int argc, char **argv1)
         switch (ch) {
         case _TSK_T('?'):
         default:
-            TFPRINTF(stderr, _TSK_T("Invalid argument: %s\n"),
+            TFPRINTF(stderr, _TSK_T("Invalid argument: %" PRIttocTSK "\n"),
                 argv[OPTIND]);
             usage();
         case _TSK_T('b'):
@@ -76,7 +76,7 @@ main(int argc, char **argv1)
             if (*cp || *cp == *OPTARG || ssize < 1) {
                 TFPRINTF(stderr,
                     _TSK_T
-                    ("invalid argument: sector size must be positive: %s\n"),
+                    ("invalid argument: sector size must be positive: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -88,7 +88,7 @@ main(int argc, char **argv1)
             }
             imgtype = tsk_img_type_toid(OPTARG);
             if (imgtype == TSK_IMG_TYPE_UNSUPP) {
-                TFPRINTF(stderr, _TSK_T("Unsupported image type: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Unsupported image type: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -99,7 +99,7 @@ main(int argc, char **argv1)
             if (*cp || *cp == *OPTARG || start_sector < 1) {
                 TFPRINTF(stderr,
                     _TSK_T
-                    ("invalid argument: start sector must be positive: %s\n"),
+                    ("invalid argument: start sector must be positive: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -110,7 +110,7 @@ main(int argc, char **argv1)
             if (*cp || *cp == *OPTARG || end_sector < 1) {
                 TFPRINTF(stderr,
                     _TSK_T
-                    ("invalid argument: end sector must be positive: %s\n"),
+                    ("invalid argument: end sector must be positive: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
diff --git a/tools/imgtools/img_stat.cpp b/tools/imgtools/img_stat.cpp
index 483505cb92a22045210f0f31848bda0ebc7a51a5..e624226dd78bbcb6f2b01f7c073fc8f7d819bc71 100644
--- a/tools/imgtools/img_stat.cpp
+++ b/tools/imgtools/img_stat.cpp
@@ -17,7 +17,7 @@ usage()
 {
     TFPRINTF(stderr,
         _TSK_T
-        ("usage: %s [-tvV] [-i imgtype] [-b dev_sector_size] image\n"),
+        ("usage: %" PRIttocTSK " [-tvV] [-i imgtype] [-b dev_sector_size] image\n"),
         progname);
     tsk_fprintf(stderr, "\t-t: display type only\n");
     tsk_fprintf(stderr,
@@ -59,7 +59,7 @@ main(int argc, char **argv1)
         switch (ch) {
         case _TSK_T('?'):
         default:
-            TFPRINTF(stderr, _TSK_T("Invalid argument: %s\n"),
+            TFPRINTF(stderr, _TSK_T("Invalid argument: %" PRIttocTSK "\n"),
                 argv[OPTIND]);
             usage();
         case _TSK_T('b'):
@@ -67,7 +67,7 @@ main(int argc, char **argv1)
             if (*cp || *cp == *OPTARG || ssize < 1) {
                 TFPRINTF(stderr,
                     _TSK_T
-                    ("invalid argument: sector size must be positive: %s\n"),
+                    ("invalid argument: sector size must be positive: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -79,7 +79,7 @@ main(int argc, char **argv1)
             }
             imgtype = tsk_img_type_toid(OPTARG);
             if (imgtype == TSK_IMG_TYPE_UNSUPP) {
-                TFPRINTF(stderr, _TSK_T("Unsupported image type: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Unsupported image type: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
diff --git a/tools/logicalimager/DriveUtil.cpp b/tools/logicalimager/DriveUtil.cpp
index 81a999eff40407c5b3ce990acfbd074433f3894d..d936fa1b72b95cbd3e52eb422bb75e76c767a274 100644
--- a/tools/logicalimager/DriveUtil.cpp
+++ b/tools/logicalimager/DriveUtil.cpp
@@ -75,7 +75,7 @@ bool DriveUtil::driveIsFAT(wchar_t *drive) {
             break;
         }
     }
-    img->close(img);
+    tsk_img_close(img);
     TskHelper::getInstance().reset();
     return result;
 }
diff --git a/tools/logicalimager/TskHelper.cpp b/tools/logicalimager/TskHelper.cpp
index a49415d15b8360ea15abdd4a469a6650e9c204d1..4611769340c5eccecfa08f9d8dc11c03bfc8f590 100755
--- a/tools/logicalimager/TskHelper.cpp
+++ b/tools/logicalimager/TskHelper.cpp
@@ -798,7 +798,7 @@ void TskHelper::enumerateFileAndVolumeSystems(TSK_IMG_INFO *img) {
 
 /*
 * Add all FS found in the given image to TskHelp::getInstance()
-* Returns TSK_IMG_INFO *, caller should call img->close(img) when done.
+* Returns TSK_IMG_INFO *, caller should call tsk_img_close(img) when done.
 * The FS can be obtained by calling TskHelper::getInstance().getFSInfoList()
 * Caller must call TskHelper::getInstance().reset() when done with the FS.
 * May exit the program if image failed to open.
diff --git a/tools/logicalimager/tsk_logical_imager.cpp b/tools/logicalimager/tsk_logical_imager.cpp
index 05f8490ec6fa023bbf2b76ff48c3e8325adc5114..728831cf1fcd40f0fb40b726b046882f556940d2 100644
--- a/tools/logicalimager/tsk_logical_imager.cpp
+++ b/tools/logicalimager/tsk_logical_imager.cpp
@@ -398,7 +398,7 @@ static void reportUsers(const std::string &sessionDir, const std::string &driveN
 static void usage() {
     TFPRINTF(stderr,
         _TSK_T
-        ("usage: %s [-c configPath]\n"),
+        ("usage: %" PRIttocTSK " [-c configPath]\n"),
         progname);
     tsk_fprintf(stderr, "\t-c configPath: The configuration file. Default is logical-imager-config.json\n");
     tsk_fprintf(stderr, "\t-v: verbose output to stderr\n");
@@ -440,7 +440,7 @@ main(int argc, char **argv1)
         switch (ch) {
         case _TSK_T('?'):
         default:
-            TFPRINTF(stderr, _TSK_T("Invalid argument: %s\n"),
+            TFPRINTF(stderr, _TSK_T("Invalid argument: %" PRIttocTSK "\n"),
                 argv[OPTIND-1]);
             usage();
 
@@ -558,7 +558,7 @@ main(int argc, char **argv1)
 
         if (hasTskLogicalImager()) {
             ReportUtil::consoleOutput(stdout, "Skipping drive %s because tsk_logical_imager.exe exists at the root directory.\n", imageShortName.c_str());
-            img->close(img);
+            tsk_img_close(img);
             TskHelper::getInstance().reset();
             continue; // Don't process a drive with /tsk_logicial_image.exe at the root
         }
@@ -624,7 +624,7 @@ main(int argc, char **argv1)
 
         if (closeImgNow) {
             // close the image, if not creating VHD.
-            img->close(img);
+            tsk_img_close(img);
         }
     }
 
@@ -644,7 +644,7 @@ main(int argc, char **argv1)
                 }
             }
         }
-        img->close(img);
+        tsk_img_close(img);
     }
 
     if (config) {
diff --git a/tools/pooltools/pstat.cpp b/tools/pooltools/pstat.cpp
index fe07ad18109a596f424cf8f482af5ed7738401d1..b6f1ef956a49fa8415d06dcf995b7e3c74b904b6 100644
--- a/tools/pooltools/pstat.cpp
+++ b/tools/pooltools/pstat.cpp
@@ -8,7 +8,7 @@ usage()
 {
     TFPRINTF(stderr,
         _TSK_T
-        ("usage: %s [-tvV] [-p pooltype] [-i imgtype] [-b dev_sector_size] [-o imgoffset] image\n"),
+        ("usage: %" PRIttocTSK " [-tvV] [-p pooltype] [-i imgtype] [-b dev_sector_size] [-o imgoffset] image\n"),
         progname);
     tsk_fprintf(stderr, "\t-t: display type only\n");
     tsk_fprintf(stderr,
@@ -57,7 +57,7 @@ main(int argc, char **argv1)
         switch (ch) {
         case _TSK_T('?'):
         default:
-            TFPRINTF(stderr, _TSK_T("Invalid argument: %s\n"),
+            TFPRINTF(stderr, _TSK_T("Invalid argument: %" PRIttocTSK "\n"),
                 argv[OPTIND]);
             usage();
         case _TSK_T('b'):
@@ -65,7 +65,7 @@ main(int argc, char **argv1)
             if (*cp || *cp == *OPTARG || ssize < 1) {
                 TFPRINTF(stderr,
                     _TSK_T
-                    ("invalid argument: sector size must be positive: %s\n"),
+                    ("invalid argument: sector size must be positive: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -78,7 +78,7 @@ main(int argc, char **argv1)
             pooltype = tsk_pool_type_toid(OPTARG);
             if (pooltype == TSK_POOL_TYPE_UNSUPP) {
                 TFPRINTF(stderr,
-                    _TSK_T("Unsupported pool container type: %s\n"), OPTARG);
+                    _TSK_T("Unsupported pool container type: %" PRIttocTSK "\n"), OPTARG);
                 usage();
             }
             break;
@@ -89,7 +89,7 @@ main(int argc, char **argv1)
             }
             imgtype = tsk_img_type_toid(OPTARG);
             if (imgtype == TSK_IMG_TYPE_UNSUPP) {
-                TFPRINTF(stderr, _TSK_T("Unsupported image type: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Unsupported image type: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -140,7 +140,7 @@ main(int argc, char **argv1)
         tsk_error_print(stderr);
         if (tsk_error_get_errno() == TSK_ERR_FS_UNSUPTYPE)
             tsk_pool_type_print(stderr);
-        img->close(img);
+        tsk_img_close(img);
         exit(1);
     }
 
@@ -150,13 +150,13 @@ main(int argc, char **argv1)
     else {
         if (pool->poolstat(pool, stdout)) {
             tsk_error_print(stderr);
-            pool->close(pool);
-            img->close(img);
+            tsk_pool_close(pool);
+            tsk_img_close(img);
             exit(1);
         }
     }
 
-    pool->close(pool);
-    img->close(img);
+    tsk_pool_close(pool);
+    tsk_img_close(img);
     exit(0);
 }
diff --git a/tools/vstools/mmcat.cpp b/tools/vstools/mmcat.cpp
index 249592461b98880e2c4f520576845b2a12226088..4d16915709bafea93439744721af1b6e008adc60 100644
--- a/tools/vstools/mmcat.cpp
+++ b/tools/vstools/mmcat.cpp
@@ -22,7 +22,7 @@ usage()
 {
     TFPRINTF(stderr,
         _TSK_T
-        ("%s [-i imgtype] [-b dev_sector_size] [-o imgoffset] [-vV] [-t vstype] image [images] part_num\n"),
+        ("usage: %" PRIttocTSK " [-i imgtype] [-b dev_sector_size] [-o imgoffset] [-vV] [-t vstype] image [images] part_num\n"),
         progname);
     tsk_fprintf(stderr,
         "\t-t vstype: The type of partition system (use '-t list' for list of supported types)\n");
@@ -76,7 +76,7 @@ main(int argc, char **argv1)
             if (*cp || *cp == *OPTARG || ssize < 1) {
                 TFPRINTF(stderr,
                     _TSK_T
-                    ("invalid argument: sector size must be positive: %s\n"),
+                    ("invalid argument: sector size must be positive: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -88,7 +88,7 @@ main(int argc, char **argv1)
             }
             imgtype = tsk_img_type_toid(OPTARG);
             if (imgtype == TSK_IMG_TYPE_UNSUPP) {
-                TFPRINTF(stderr, _TSK_T("Unsupported image type: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Unsupported image type: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -108,7 +108,7 @@ main(int argc, char **argv1)
             vstype = tsk_vs_type_toid(OPTARG);
             if (vstype == TSK_VS_TYPE_UNSUPP) {
                 TFPRINTF(stderr,
-                    _TSK_T("Unsupported volume system type: %s\n"),
+                    _TSK_T("Unsupported volume system type: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
diff --git a/tools/vstools/mmls.cpp b/tools/vstools/mmls.cpp
index 72a0dea5fc23430ea2de9b16b58115be9120585e..c81b4a4891603c0fb2a2f4cbd1817166a3d7908b 100755
--- a/tools/vstools/mmls.cpp
+++ b/tools/vstools/mmls.cpp
@@ -24,7 +24,7 @@ usage()
 {
     TFPRINTF(stderr,
         _TSK_T
-        ("%s [-i imgtype] [-b dev_sector_size] [-o imgoffset] [-BrvV] [-aAmM] [-t vstype] image [images]\n"),
+        ("usage: %" PRIttocTSK " [-i imgtype] [-b dev_sector_size] [-o imgoffset] [-BrvV] [-aAmM] [-t vstype] image [images]\n"),
         progname);
     tsk_fprintf(stderr,
         "\t-t vstype: The type of volume system (use '-t list' for list of supported types)\n");
@@ -187,7 +187,7 @@ main(int argc, char **argv1)
             if (*cp || *cp == *OPTARG || ssize < 1) {
                 TFPRINTF(stderr,
                     _TSK_T
-                    ("invalid argument: sector size must be positive: %s\n"),
+                    ("invalid argument: sector size must be positive: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -199,7 +199,7 @@ main(int argc, char **argv1)
             }
             imgtype = tsk_img_type_toid(OPTARG);
             if (imgtype == TSK_IMG_TYPE_UNSUPP) {
-                TFPRINTF(stderr, _TSK_T("Unsupported image type: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Unsupported image type: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -228,7 +228,7 @@ main(int argc, char **argv1)
             vstype = tsk_vs_type_toid(OPTARG);
             if (vstype == TSK_VS_TYPE_UNSUPP) {
                 TFPRINTF(stderr,
-                    _TSK_T("Unsupported volume system type: %s\n"),
+                    _TSK_T("Unsupported volume system type: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
diff --git a/tools/vstools/mmstat.cpp b/tools/vstools/mmstat.cpp
index 60d392bdd1277960ed813a8d4863c9be0ffecb2a..eadce375b08df9b0ffe8adb8fc210124cc2f034b 100644
--- a/tools/vstools/mmstat.cpp
+++ b/tools/vstools/mmstat.cpp
@@ -19,7 +19,7 @@ usage()
 {
     TFPRINTF(stderr,
         _TSK_T
-        ("%s [-i imgtype] [-b dev_sector_size] [-o imgoffset] [-vV] [-t vstype] image [images]\n"),
+        ("usage: %" PRIttocTSK " [-i imgtype] [-b dev_sector_size] [-o imgoffset] [-vV] [-t vstype] image [images]\n"),
         progname);
     tsk_fprintf(stderr,
         "\t-t vstype: The volume system type (use '-t list' for list of supported types)\n");
@@ -74,7 +74,7 @@ main(int argc, char **argv1)
             if (*cp || *cp == *OPTARG || ssize < 1) {
                 TFPRINTF(stderr,
                     _TSK_T
-                    ("invalid argument: sector size must be positive: %s\n"),
+                    ("invalid argument: sector size must be positive: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -86,7 +86,7 @@ main(int argc, char **argv1)
             }
             imgtype = tsk_img_type_toid(OPTARG);
             if (imgtype == TSK_IMG_TYPE_UNSUPP) {
-                TFPRINTF(stderr, _TSK_T("Unsupported image type: %s\n"),
+                TFPRINTF(stderr, _TSK_T("Unsupported image type: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
@@ -106,7 +106,7 @@ main(int argc, char **argv1)
             vstype = tsk_vs_type_toid(OPTARG);
             if (vstype == TSK_VS_TYPE_UNSUPP) {
                 TFPRINTF(stderr,
-                    _TSK_T("Unsupported volume system type: %s\n"),
+                    _TSK_T("Unsupported volume system type: %" PRIttocTSK "\n"),
                     OPTARG);
                 usage();
             }
diff --git a/tsk/Makefile.am b/tsk/Makefile.am
index 21d3605dc99dea7cf280646b25f1e1329c534de9..278f064e77c4268a4c0c901bb85e0e03862733d6 100644
--- a/tsk/Makefile.am
+++ b/tsk/Makefile.am
@@ -8,6 +8,24 @@ libtsk_la_LIBADD = base/libtskbase.la img/libtskimg.la \
     vs/libtskvs.la fs/libtskfs.la hashdb/libtskhashdb.la \
     auto/libtskauto.la pool/libtskpool.la util/libtskutil.la
 # current:revision:age
-libtsk_la_LDFLAGS = -version-info 20:6:1 $(LIBTSK_LDFLAGS)
+libtsk_la_LDFLAGS = -version-info 21:1:2 $(LIBTSK_LDFLAGS)
 
-EXTRA_DIST = tsk_tools_i.h docs/Doxyfile docs/*.dox docs/*.html
+EXTRA_DIST = tsk_tools_i.h docs/Doxyfile docs/*.dox docs/*.html \
+   tsk.pc.in
+
+pkgconfigdir = $(libdir)/pkgconfig
+nodist_pkgconfig_DATA = tsk.pc
+
+tsk.pc: tsk.pc.in Makefile
+	sed -e 's![@]prefix[@]!$(prefix)!g' \
+      -e 's![@]exec_prefix[@]!$(exec_prefix)!g' \
+      -e 's![@]includedir[@]!$(includedir)!g' \
+      -e 's![@]libdir[@]!$(libdir)!g' \
+      -e 's![@]PACKAGE_NAME[@]!$(PACKAGE_NAME)!g' \
+      -e 's![@]PACKAGE_VERSION[@]!$(PACKAGE_VERSION)!g' \
+      -e 's![@]AX_PACKAGE_REQUIRES[@]!$(AX_PACKAGE_REQUIRES)!g' \
+      -e 's![@]PACKAGE_LIBS_PRIVATE[@]!$(PACKAGE_LIBS_PRIVATE)!g' \
+      -e 's![@]AX_PACKAGE_REQUIRES_PRIVATE[@]!$(AX_PACKAGE_REQUIRES_PRIVATE)!g' \
+      $< >$@
+
+CLEANFILES = tsk.pc
diff --git a/tsk/auto/auto.cpp b/tsk/auto/auto.cpp
index 8f633247d8159c1357f4e5943a84317080183805..89243df85f7cc293b9947b907d65de4016b3c9ae 100755
--- a/tsk/auto/auto.cpp
+++ b/tsk/auto/auto.cpp
@@ -38,6 +38,7 @@ TskAuto::TskAuto()
 TskAuto::~TskAuto()
 {
     closeImage();
+	m_exteralFsInfoList.clear(); // Don't close the file systems that were passed in
     m_tag = 0;
 }
 
@@ -183,6 +184,17 @@ void
     m_fileFilterFlags = file_flags;
 }
 
+/**
+ * Store a list of pointers to open file systems to use when calling findFilesInImg
+ * instead of opening a new copy.
+ */
+void
+TskAuto::setExternalFileSystemList(const std::list<TSK_FS_INFO *>& fsInfoList)
+{
+	m_exteralFsInfoList.resize(fsInfoList.size());
+	m_exteralFsInfoList.assign(fsInfoList.begin(), fsInfoList.end());
+}
+
 /**
  * @return The size of the image in bytes or -1 if the 
  * image is not open.
@@ -249,6 +261,10 @@ TskAuto::findFilesInImg()
         return 1;
     }
 
+	if (m_img_info->itype == TSK_IMG_TYPE_LOGICAL) {
+		return findFilesInFs(0, TSK_FS_TYPE_LOGICAL);
+	}
+
     return findFilesInVs(0);
 }
 
@@ -323,11 +339,17 @@ TskAuto::findFilesInVs(TSK_OFF_T a_start, TSK_VS_TYPE_ENUM a_vtype)
     // Use mm_walk to get the volumes
     if ((vs_info = tsk_vs_open(m_img_info, a_start, a_vtype)) == NULL) {
 
-        /* If the error code is for encryption, we will register it. Otherwise,
+        /* If the error code is for encryption, we will register it.
+         * If the error code is for multiple volume systems found, register the error
+         * and return without trying to load a file system. Otherwise,
          * ignore this error to avoid confusion if the fs_open passes. */
         if (tsk_error_get_errno() == TSK_ERR_VS_ENCRYPTED) {
             registerError();
         }
+        else if (tsk_error_get_errno() == TSK_ERR_VS_MULTTYPE) {
+            registerError();
+            return 1;
+        }
         tsk_error_reset();
 
         if(tsk_verbose)
@@ -393,7 +415,7 @@ TskAuto::hasPool(TSK_OFF_T a_start)
     if (pool == nullptr) {
         return false;
     }
-    pool->close(pool);
+    tsk_pool_close(pool);
     return true;
 }
 
@@ -453,7 +475,7 @@ TskAuto::findFilesInPool(TSK_OFF_T start, TSK_POOL_TYPE_ENUM ptype)
 
             TSK_FILTER_ENUM filterRetval = filterPoolVol(vol_info);
             if ((filterRetval == TSK_FILTER_STOP) || (m_stopAllProcessing)) {
-                pool->close(pool);
+                tsk_pool_close(pool);
                 return TSK_STOP;
             }
 
@@ -466,8 +488,8 @@ TskAuto::findFilesInPool(TSK_OFF_T start, TSK_POOL_TYPE_ENUM ptype)
                         tsk_fs_close(fs_info);
 
                         if (retval == TSK_STOP) {
-                            pool_img->close(pool_img);
-                            pool->close(pool);
+                            tsk_img_close(pool_img);
+                            tsk_pool_close(pool);
                             return TSK_STOP;
                         }
                     }
@@ -485,16 +507,12 @@ TskAuto::findFilesInPool(TSK_OFF_T start, TSK_POOL_TYPE_ENUM ptype)
                                 "findFilesInPool: Error opening APFS file system");
                             registerError();
                         }
-
-                        pool_img->close(pool_img);
-                        pool->close(pool);
-                        return TSK_ERR;
                     }
 
                     tsk_img_close(pool_img);
                 }
                 else {
-                    pool->close(pool);
+                    tsk_pool_close(pool);
                     tsk_error_set_errstr2(
                         "findFilesInPool: Error opening APFS pool");
                     registerError();
@@ -506,7 +524,7 @@ TskAuto::findFilesInPool(TSK_OFF_T start, TSK_POOL_TYPE_ENUM ptype)
         }
     }
     else {
-        pool->close(pool);
+        tsk_pool_close(pool);
         tsk_error_reset();
         tsk_error_set_errno(TSK_ERR_POOL_UNSUPTYPE);
         tsk_error_set_errstr("%d", pool->ctype);
@@ -540,6 +558,18 @@ TSK_RETVAL_ENUM
         return TSK_ERR;
     }
 
+	// If we already have an open copy of this file system, use it
+	for (auto itr = m_exteralFsInfoList.begin(); itr != m_exteralFsInfoList.end(); itr++) {
+		if ((*itr)->offset == a_start) {
+			TSK_FS_INFO *fs_info = *itr;
+			TSK_RETVAL_ENUM retval = findFilesInFsInt(fs_info, fs_info->root_inum);
+			if (m_errors.empty() == false)
+				return TSK_ERR;
+			else
+				return retval;
+		}
+	}
+
     TSK_FS_INFO *fs_info;
     if ((fs_info = tsk_fs_open_img(m_img_info, a_start, a_ftype)) == NULL) {
         if (isCurVsValid() == false) {
@@ -627,6 +657,18 @@ uint8_t
         return 1;
     }
 
+	// If we already have an open copy of this file system, use it
+	for (auto itr = m_exteralFsInfoList.begin(); itr != m_exteralFsInfoList.end(); itr++) {
+		if ((*itr)->offset == a_start) {
+			TSK_FS_INFO *fs_info = *itr;
+			TSK_RETVAL_ENUM retval = findFilesInFsInt(fs_info, fs_info->root_inum);
+			if (m_errors.empty() == false)
+				return TSK_ERR;
+			else
+				return retval;
+		}
+	}
+
     TSK_FS_INFO *fs_info;
     if ((fs_info = tsk_fs_open_img(m_img_info, a_start, a_ftype)) == NULL) {
         if (isCurVsValid() == false) {
diff --git a/tsk/auto/auto_db.cpp b/tsk/auto/auto_db.cpp
index 0265c72c04e7b646ec9436f2913b447658167f12..bac4d7f88a3e8917c4de961eba6c431ad7d098c8 100755
--- a/tsk/auto/auto_db.cpp
+++ b/tsk/auto/auto_db.cpp
@@ -209,9 +209,9 @@ TskAutoDb::openImage(const char* a_deviceId)
 uint8_t
 TskAutoDb::addImageDetails(const char* deviceId)
 {
-   string md5 = "";
-   string sha1 = "";
-   string collectionDetails = "";
+   std::string md5 = "";
+   std::string sha1 = "";
+   std::string collectionDetails = "";
 #if HAVE_LIBEWF 
    if (m_img_info->itype == TSK_IMG_TYPE_EWF_EWF) {
      // @@@ This should really probably be inside of a tsk_img_ method
@@ -227,7 +227,7 @@ TskAutoDb::addImageDetails(const char* deviceId)
    }
 #endif
 
-    string devId;
+    std::string devId;
     if (NULL != deviceId) {
         devId = deviceId; 
     } else {
@@ -377,21 +377,15 @@ TskAutoDb::addUnallocatedPoolBlocksToDb(size_t & numPool) {
         /* Create the unallocated space files */
         TSK_FS_ATTR_RUN * unalloc_runs = tsk_pool_unallocated_runs(pool_info);
         TSK_FS_ATTR_RUN * current_run = unalloc_runs;
-        vector<TSK_DB_FILE_LAYOUT_RANGE> ranges;
         while (current_run != NULL) {
 
-            TSK_DB_FILE_LAYOUT_RANGE tempRange(current_run->addr * pool_info->block_size, current_run->len * pool_info->block_size, 0);
-
-            ranges.push_back(tempRange);
-            int64_t fileObjId = 0;
-            if (m_db->addUnallocBlockFile(unallocVolObjId, 0, current_run->len * pool_info->block_size, ranges, fileObjId, m_curImgId)) {
+            if (addUnallocBlockFileInChunks(current_run->addr * pool_info->block_size, current_run->len * pool_info->block_size, unallocVolObjId, m_curImgId) == TSK_ERR) {
                 registerError();
                 tsk_fs_attr_run_free(unalloc_runs);
                 return TSK_ERR;
             }
 
             current_run = current_run->next;
-            ranges.clear();
         }
         tsk_fs_attr_run_free(unalloc_runs);
     }
@@ -830,7 +824,7 @@ TskAutoDb::commitAddImage()
  * Set the current image's timezone
  */
 void
-TskAutoDb::setTz(string tzone)
+TskAutoDb::setTz(std::string tzone)
 {
     m_curImgTZone = tzone;
 }
@@ -855,7 +849,7 @@ TskAutoDb::processFile(TSK_FS_FILE * fs_file, const char *path)
     if (isDir(fs_file)) {
         m_curDirAddr = fs_file->name->meta_addr;
         tsk_take_lock(&m_curDirPathLock);
-        m_curDirPath = string(path) + fs_file->name->name;
+        m_curDirPath = std::string(path) + fs_file->name->name;
         tsk_release_lock(&m_curDirPathLock);
     }
     else if (m_curDirAddr != fs_file->name->par_addr) {
@@ -1325,14 +1319,10 @@ TSK_RETVAL_ENUM TskAutoDb::addUnallocVsSpaceToDb(size_t & numVsP) {
             return TSK_ERR;
         }
 
-        //create an unalloc file with unalloc part, with vs part as parent
-        vector<TSK_DB_FILE_LAYOUT_RANGE> ranges;
+        //create an unalloc file (or files) with unalloc part, with vs part as parent
         const uint64_t byteStart = vsInfo.offset + vsInfo.block_size * vsPart.start;
         const uint64_t byteLen = vsInfo.block_size * vsPart.len; 
-        TSK_DB_FILE_LAYOUT_RANGE tempRange(byteStart, byteLen, 0);
-        ranges.push_back(tempRange);
-        int64_t fileObjId = 0;
-        if (m_db->addUnallocBlockFile(vsPart.objId, 0, tempRange.byteLen, ranges, fileObjId, m_curImgId) == TSK_ERR) {
+        if (addUnallocBlockFileInChunks(byteStart, byteLen, vsPart.objId, m_curImgId) == TSK_ERR) {
             registerError();
             return TSK_ERR;
         }
@@ -1357,14 +1347,56 @@ TSK_RETVAL_ENUM TskAutoDb::addUnallocImageSpaceToDb() {
         retImgFile = TSK_ERR;
     }
     else {
-        TSK_DB_FILE_LAYOUT_RANGE tempRange(0, imgSize, 0);
-        //add unalloc block file for the entire image
+        retImgFile = addUnallocBlockFileInChunks(0, imgSize, m_curImgId, m_curImgId);
+    }
+    return retImgFile;
+}
+
+/**
+* Adds unallocated block files to the database, chunking if enabled.
+* 
+* @returns TSK_OK on success, TSK_ERR on error
+*/
+TSK_RETVAL_ENUM TskAutoDb::addUnallocBlockFileInChunks(uint64_t byteStart, TSK_OFF_T totalSize, int64_t parentObjId, int64_t dataSourceObjId) {
+
+    if (m_maxChunkSize <= 0) {
+        // No chunking - write the entire file
+        TSK_DB_FILE_LAYOUT_RANGE tempRange(byteStart, totalSize, 0);
         vector<TSK_DB_FILE_LAYOUT_RANGE> ranges;
         ranges.push_back(tempRange);
         int64_t fileObjId = 0;
-        retImgFile = m_db->addUnallocBlockFile(m_curImgId, 0, imgSize, ranges, fileObjId, m_curImgId);
+        return m_db->addUnallocBlockFile(parentObjId, 0, totalSize, ranges, fileObjId, dataSourceObjId);
     }
-    return retImgFile;
+
+    // We will chunk into separate files with max size m_maxChunkSize
+    uint64_t maxChunkSize = (uint64_t)m_maxChunkSize;
+    uint64_t bytesLeft = (uint64_t)totalSize;
+    uint64_t startingOffset = byteStart;
+    uint64_t chunkSize;
+    vector<TSK_DB_FILE_LAYOUT_RANGE> ranges;
+    while (bytesLeft > 0) {
+
+        if (maxChunkSize > bytesLeft) {
+            chunkSize = bytesLeft;
+            bytesLeft = 0;
+        }
+        else {
+            chunkSize = maxChunkSize;
+            bytesLeft -= maxChunkSize;
+        }
+
+        TSK_DB_FILE_LAYOUT_RANGE tempRange(startingOffset, chunkSize, 0);     
+        ranges.push_back(tempRange);
+        int64_t fileObjId = 0;
+
+        TSK_RETVAL_ENUM retval = m_db->addUnallocBlockFile(parentObjId, 0, chunkSize, ranges, fileObjId, dataSourceObjId);
+        if (retval != TSK_OK) {
+            return retval;
+        }
+        ranges.clear();
+        startingOffset += chunkSize;
+    }
+    return TSK_OK;
 }
 
 /**
@@ -1374,7 +1406,7 @@ TSK_RETVAL_ENUM TskAutoDb::addUnallocImageSpaceToDb() {
 * @returns curDirPath string representing currently analyzed directory
 */
 const std::string TskAutoDb::getCurDir() {
-    string curDirPath;
+    std::string curDirPath;
     tsk_take_lock(&m_curDirPathLock);
     curDirPath = m_curDirPath;
     tsk_release_lock(&m_curDirPathLock);
diff --git a/tsk/auto/db_sqlite.cpp b/tsk/auto/db_sqlite.cpp
index 4a402ee37f4f51899645b8786ff6f74f44d23b74..6c2c58d94106cad7b3179809f245b08dda814723 100644
--- a/tsk/auto/db_sqlite.cpp
+++ b/tsk/auto/db_sqlite.cpp
@@ -701,7 +701,7 @@ int TskDbSqlite::addImageInfo(int type, TSK_OFF_T ssize, int64_t & objId, const
     {
         // Use a GUID as the default.
         GuidGenerator generator;
-        Guid guid = generator.newGuid();
+        TSKGuid guid = generator.newGuid();
         deviceIdStr << guid;
     }
     else
@@ -711,7 +711,7 @@ int TskDbSqlite::addImageInfo(int type, TSK_OFF_T ssize, int64_t & objId, const
 #else
     deviceIdStr << deviceId;
 #endif
-    sql = sqlite3_mprintf("INSERT INTO data_source_info (obj_id, device_id, time_zone, acquisition_details) VALUES (%lld, '%s', '%s', '%q');", objId, deviceIdStr.str().c_str(), timezone.c_str(), collectionDetails.c_str());
+    sql = sqlite3_mprintf("INSERT INTO data_source_info (obj_id, device_id, time_zone, acquisition_details) VALUES (%lld, '%q', '%q', '%q');", objId, deviceIdStr.str().c_str(), timezone.c_str(), collectionDetails.c_str());
     ret = attempt_exec(sql, "Error adding data to tsk_image_info table: %s\n");
     sqlite3_free(sql);
     return ret;
diff --git a/tsk/auto/guid.cpp b/tsk/auto/guid.cpp
index efdc98a2e5a31ada378085737ac72fde17bc1747..015233793f887872f222a682e56b6eaf47cf1dbc 100755
--- a/tsk/auto/guid.cpp
+++ b/tsk/auto/guid.cpp
@@ -43,7 +43,7 @@ THE SOFTWARE.
 using namespace std;
 
 // overload << so that it's easy to convert to a string
-ostream &operator<<(ostream &s, const Guid &guid)
+ostream &operator<<(ostream &s, const TSKGuid &guid)
 {
   return s << hex << setfill('0')
     << setw(2) << (int)guid._bytes[0]
@@ -69,13 +69,13 @@ ostream &operator<<(ostream &s, const Guid &guid)
 }
 
 // create a guid from vector of bytes
-Guid::Guid(const vector<unsigned char> &bytes)
+TSKGuid::TSKGuid(const vector<unsigned char> &bytes)
 {
   _bytes = bytes;
 }
 
 // create a guid from array of bytes
-Guid::Guid(const unsigned char *bytes)
+TSKGuid::TSKGuid(const unsigned char *bytes)
 {
   _bytes.assign(bytes, bytes + 16);
 }
@@ -102,7 +102,7 @@ unsigned char hexPairToChar(char a, char b)
 }
 
 // create a guid from string
-Guid::Guid(const string &fromString)
+TSKGuid::TSKGuid(const string &fromString)
 {
   _bytes.clear();
 
@@ -133,18 +133,18 @@ Guid::Guid(const string &fromString)
 }
 
 // create empty guid
-Guid::Guid()
+TSKGuid::TSKGuid()
 {
   _bytes = vector<unsigned char>(16, 0);
 }
 
 // copy constructor
-Guid::Guid(const Guid &other)
+TSKGuid::TSKGuid(const TSKGuid &other)
 {
   _bytes = other._bytes;
 }
 
-std::string Guid::str() const {
+std::string TSKGuid::str() const {
   std::stringstream ss;
   ss << (*this);
 
@@ -152,20 +152,20 @@ std::string Guid::str() const {
 }
 
 // overload assignment operator
-Guid &Guid::operator=(const Guid &other)
+TSKGuid &TSKGuid::operator=(const TSKGuid &other)
 {
   _bytes = other._bytes;
   return *this;
 }
 
 // overload equality operator
-bool Guid::operator==(const Guid &other) const
+bool TSKGuid::operator==(const TSKGuid &other) const
 {
   return _bytes == other._bytes;
 }
 
 // overload inequality operator
-bool Guid::operator!=(const Guid &other) const
+bool TSKGuid::operator!=(const TSKGuid &other) const
 {
   return !((*this) == other);
 }
@@ -173,7 +173,7 @@ bool Guid::operator!=(const Guid &other) const
 // This is the linux friendly implementation, but it could work on other
 // systems that have libuuid available
 #ifdef GUID_LIBUUID
-Guid GuidGenerator::newGuid()
+TSKGuid GuidGenerator::newGuid()
 {
   uuid_t id;
   uuid_generate(id);
@@ -183,7 +183,7 @@ Guid GuidGenerator::newGuid()
 
 // this is the mac and ios version
 #ifdef GUID_CFUUID
-Guid GuidGenerator::newGuid()
+TSKGuid GuidGenerator::newGuid()
 {
   CFUUIDRef newId = CFUUIDCreate(NULL);
   CFUUIDBytes bytes = CFUUIDGetUUIDBytes(newId);
@@ -214,7 +214,7 @@ Guid GuidGenerator::newGuid()
 
 // obviously this is the windows version
 #ifdef GUID_WINDOWS
-Guid GuidGenerator::newGuid()
+TSKGuid GuidGenerator::newGuid()
 {
   GUID newId;
   CoCreateGuid(&newId);
@@ -257,7 +257,7 @@ GuidGenerator::GuidGenerator(JNIEnv *env)
   _leastSignificantBitsMethod = env->GetMethodID(_uuidClass, "getLeastSignificantBits", "()J");
 }
 
-Guid GuidGenerator::newGuid()
+TSKGuid GuidGenerator::newGuid()
 {
   jobject javaUuid = _env->CallStaticObjectMethod(_uuidClass, _newGuidMethod);
   jlong mostSignificant = _env->CallLongMethod(javaUuid, _mostSignificantBitsMethod);
diff --git a/tsk/auto/guid.h b/tsk/auto/guid.h
index fd0780d1b2233a7df8bcc85c6398b9c7ceaed4bd..642a44efa9d1539d172999d0bcd823cab42461d9 100755
--- a/tsk/auto/guid.h
+++ b/tsk/auto/guid.h
@@ -38,34 +38,34 @@ THE SOFTWARE.
 // 16 byte value that can be passed around by value. It also supports
 // conversion to string (via the stream operator <<) and conversion from a
 // string via constructor.
-class Guid
+class TSKGuid
 {
   public:
 
     // create a guid from vector of bytes
-    Guid(const std::vector<unsigned char> &bytes);
+    TSKGuid(const std::vector<unsigned char> &bytes);
 
     // create a guid from array of bytes
-    Guid(const unsigned char *bytes);
+    TSKGuid(const unsigned char *bytes);
 
     // create a guid from string
-    Guid(const std::string &fromString);
+    TSKGuid(const std::string &fromString);
 
     // create empty guid
-    Guid();
+    TSKGuid();
 
-    Guid(Guid &&) = default;
+    TSKGuid(TSKGuid &&) = default;
 
     // copy constructor
-    Guid(const Guid &other);
+    TSKGuid(const TSKGuid &other);
 
     // overload assignment operator
-    Guid &operator=(const Guid &other);
-    Guid &operator=(Guid &&) = default;
+    TSKGuid &operator=(const TSKGuid &other);
+    TSKGuid &operator=(TSKGuid &&) = default;
 
     // overload equality and inequality operator
-    bool operator==(const Guid &other) const;
-    bool operator!=(const Guid &other) const;
+    bool operator==(const TSKGuid &other) const;
+    bool operator!=(const TSKGuid &other) const;
 
     std::string str() const;
 
@@ -79,7 +79,7 @@ class Guid
     std::vector<unsigned char> _bytes;
 
     // make the << operator a friend so it can access _bytes
-    friend std::ostream &operator<<(std::ostream &s, const Guid &guid);
+    friend std::ostream &operator<<(std::ostream &s, const TSKGuid &guid);
 };
 
 // Class that can create new guids. The only reason this exists instead of
@@ -98,7 +98,7 @@ class GuidGenerator
     GuidGenerator() { }
 #endif
 
-    Guid newGuid();
+    TSKGuid newGuid();
 
 #ifdef GUID_ANDROID
   private:
diff --git a/tsk/auto/is_image_supported.cpp b/tsk/auto/is_image_supported.cpp
index a5aced7ebc18a94ec918227be28c21c85d1007b9..14f6c8e205704adc46c553dfba6f629373801cdb 100644
--- a/tsk/auto/is_image_supported.cpp
+++ b/tsk/auto/is_image_supported.cpp
@@ -113,6 +113,19 @@ uint8_t TskIsImageSupported::handleError()
             strncpy(m_unsupportedDesc, lastError->errstr, 1024);
             m_wasUnsupported = true;
         }
+        else if (errCode == TSK_ERR_VS_MULTTYPE) {
+            // errstr only contains the "MAC or DOS" part, so add more context
+            strncpy(m_unsupportedDesc, "Multiple volume system types found - ", 1024);
+            strncat(m_unsupportedDesc, lastError->errstr, 950);
+            m_wasUnsupported = true;
+        }
+        else if (errCode == TSK_ERR_FS_MULTTYPE) {
+            // errstr only contains the "UFS or NTFS" part, so add more context
+            strncpy(m_unsupportedDesc, "Multiple file system types found - ", 1024);
+            strncat(m_unsupportedDesc, lastError->errstr, 950);
+            m_wasUnsupported = true;
+        }
+
     }
     return 0;
 }
diff --git a/tsk/auto/tsk_auto.h b/tsk/auto/tsk_auto.h
index 028a95d82b7a37ced4054ffb3c2ec9bdbd4afe50..58ef0ce368a57cb4b3eebaef81201eae6e1dceef 100644
--- a/tsk/auto/tsk_auto.h
+++ b/tsk/auto/tsk_auto.h
@@ -34,7 +34,7 @@
 
 #include <string>
 #include <vector>
-
+#include <list>
 
 #define TSK_AUTO_TAG 0x9191ABAB
 
@@ -100,6 +100,7 @@ class TskAuto {
 
     void setFileFilterFlags(TSK_FS_DIR_WALK_FLAG_ENUM);
     void setVolFilterFlags(TSK_VS_PART_FLAG_ENUM);
+	void setExternalFileSystemList(const std::list<TSK_FS_INFO *>& exteralFsInfoList);
 
     /**
      * TskAuto calls this method before it processes the volume system that is found in an 
@@ -264,6 +265,7 @@ class TskAuto {
   protected:
     TSK_IMG_INFO * m_img_info;
     std::vector<const TSK_POOL_INFO*> m_poolInfos;
+	std::list<TSK_FS_INFO *> m_exteralFsInfoList; // Stores TSK_FS_INFO structures that were opened outside of TskAuto and passed in
 
     bool m_internalOpen;        ///< True if m_img_info was opened in TskAuto and false if passed in
     bool m_stopAllProcessing;   ///< True if no further processing should occur
diff --git a/tsk/auto/tsk_case_db.h b/tsk/auto/tsk_case_db.h
index fb6bc765644125dd5445e1a437782c7c9b3374de..18d98fc46f01a7a1281f69b26f6dc76e939933ab 100644
--- a/tsk/auto/tsk_case_db.h
+++ b/tsk/auto/tsk_case_db.h
@@ -40,7 +40,7 @@ class TskAutoDb:public TskAuto {
     virtual uint8_t openImageUtf8(int, const char *const images[],
         TSK_IMG_TYPE_ENUM, unsigned int a_ssize, const char* deviceId = NULL);
     virtual void closeImage();
-    virtual void setTz(string tzone);
+    virtual void setTz(std::string tzone);
 
     virtual TSK_FILTER_ENUM filterVs(const TSK_VS_INFO * vs_info);
     virtual TSK_FILTER_ENUM filterVol(const TSK_VS_PART_INFO * vs_part);
@@ -133,9 +133,9 @@ class TskAutoDb:public TskAuto {
     int64_t m_curFileId;    ///< Object ID of file currently being processed
     TSK_INUM_T m_curDirAddr;		///< Meta address the directory currently being processed
     int64_t m_curUnallocDirId;	
-    string m_curDirPath;		//< Path of the current directory being processed
+    std::string m_curDirPath;		//< Path of the current directory being processed
     tsk_lock_t m_curDirPathLock; //< protects concurrent access to m_curDirPath
-    string m_curImgTZone;
+    std::string m_curImgTZone;
     bool m_blkMapFlag;
     bool m_fileHashFlag;
     bool m_vsFound;
@@ -198,6 +198,7 @@ class TskAutoDb:public TskAuto {
     TSK_RETVAL_ENUM addUnallocVsSpaceToDb(size_t & numVsP);
     TSK_RETVAL_ENUM addUnallocImageSpaceToDb();
     TSK_RETVAL_ENUM addUnallocSpaceToDb();
+    TSK_RETVAL_ENUM addUnallocBlockFileInChunks(uint64_t byteStart, TSK_OFF_T totalSize, int64_t parentObjId, int64_t dataSourceObjId);
 
 };
 
diff --git a/tsk/base/mymalloc.c b/tsk/base/mymalloc.c
index efbf0aadcbd466518729625a4df7589e81d4c7eb..5bc128cffeeec6039b0f0d81527605824388ae06 100644
--- a/tsk/base/mymalloc.c
+++ b/tsk/base/mymalloc.c
@@ -29,14 +29,12 @@ tsk_malloc(size_t len)
 {
     void *ptr;
 
-    if ((ptr = malloc(len)) == 0) {
+    if ((ptr = calloc(len, 1)) == 0) {
         tsk_error_reset();
         tsk_error_set_errno(TSK_ERR_AUX_MALLOC);
         tsk_error_set_errstr("tsk_malloc: %s (%" PRIuSIZE" requested)", strerror(errno), len);
     }
-    else {
-        memset(ptr, 0, len);
-    }
+
     return ptr;
 }
 
diff --git a/tsk/base/tsk_base.h b/tsk/base/tsk_base.h
index 9c61526894d00d0a0b47816829be143b6a59de51..bc14675d6336c22034de465f48c1062a41d10cf8 100644
--- a/tsk/base/tsk_base.h
+++ b/tsk/base/tsk_base.h
@@ -39,11 +39,11 @@
  * 3.1.2b1 would be 0x03010201.  Snapshot from Jan 2, 2003 would be
  * 0xFF030102.
  * See TSK_VERSION_STR for string form. */
-#define TSK_VERSION_NUM 0x041100ff
+#define TSK_VERSION_NUM 0x041201ff
 
 /** Version of code in string form. See TSK_VERSION_NUM for
  * integer form. */
-#define TSK_VERSION_STR "4.11.0"
+#define TSK_VERSION_STR "4.12.1"
 
 
 /* include the TSK-specific header file that we created in autoconf
@@ -333,7 +333,8 @@ extern "C" {
 #define TSK_ERR_VS_BLK_NUM	(TSK_ERR_VS | 6)
 #define TSK_ERR_VS_ARG	    (TSK_ERR_VS | 7)
 #define TSK_ERR_VS_ENCRYPTED    (TSK_ERR_VS | 8)
-#define TSK_ERR_VS_MAX		9
+#define TSK_ERR_VS_MULTTYPE     (TSK_ERR_VS | 9)
+#define TSK_ERR_VS_MAX		10
 
 #define TSK_ERR_POOL_UNKTYPE    (TSK_ERR_POOL | 0)
 #define TSK_ERR_POOL_UNSUPTYPE  (TSK_ERR_IMG | 1)
@@ -361,7 +362,8 @@ extern "C" {
 #define TSK_ERR_FS_ATTR_NOTFOUND (TSK_ERR_FS | 17)
 #define TSK_ERR_FS_ENCRYPTED    (TSK_ERR_FS | 18)
 #define TSK_ERR_FS_POSSIBLY_ENCRYPTED    (TSK_ERR_FS | 19)
-#define TSK_ERR_FS_MAX		20
+#define TSK_ERR_FS_MULTTYPE    (TSK_ERR_FS | 20)
+#define TSK_ERR_FS_MAX		21
 
 #define TSK_ERR_HDB_UNKTYPE     (TSK_ERR_HDB | 0)
 #define TSK_ERR_HDB_UNSUPTYPE   (TSK_ERR_HDB | 1)
diff --git a/tsk/base/tsk_error.c b/tsk/base/tsk_error.c
index cda9c369397ce2bb639415eab431cbb6a15e6fcc..0e02cc48fbc5c8de9f79560eaf481621b2bb335f 100644
--- a/tsk/base/tsk_error.c
+++ b/tsk/base/tsk_error.c
@@ -56,6 +56,7 @@ static const char *tsk_err_mm_str[TSK_ERR_VS_MAX] = {
     "Invalid sector address",
     "Invalid API argument",
     "Encryption detected",
+    "Multiple volume system types detected",
 };
 
 static const char *tsk_err_fs_str[TSK_ERR_FS_MAX] = {
@@ -79,6 +80,7 @@ static const char *tsk_err_fs_str[TSK_ERR_FS_MAX] = {
     "Attribute not found in file",
     "Encryption detected",
     "Possible encryption detected",
+    "Multiple file system types detected",   // 20
 };
 
 static const char *tsk_err_hdb_str[TSK_ERR_HDB_MAX] = {
diff --git a/tsk/base/tsk_os.h b/tsk/base/tsk_os.h
index 4e48f0a821d3a7038017e515295b5d3bf6f860f2..eb2f8270b37eb674640e7f8836d29f5b9bd744ea 100755
--- a/tsk/base/tsk_os.h
+++ b/tsk/base/tsk_os.h
@@ -160,9 +160,8 @@ typedef WCHAR TSK_TCHAR;        ///< Character data type that is UTF-16 (wchar_t
 #endif
 
 
-#define PRIcTSK _TSK_T("S")     ///< sprintf macro to print a UTF-8 char string to TSK_TCHAR buffer
-#define PRIwTSK _TSK_T("s")     ///< sprintf macro to print a UTF-16 wchar_t string to TSK_TCHAR buffer
-#define PRIttocTSK  "S"         ///< printf macro to print a TSK_TCHAR string to stderr or other char device
+#define PRIcTSK _TSK_T("hs")     ///< sprintf macro to print a UTF-8 char string to TSK_TCHAR buffer
+#define PRIttocTSK  "ls"         ///< printf macro to print a TSK_TCHAR string to stderr or other char device
 #define PRIuSIZE "Iu"           ///< printf macro to print a size_t value in Windows printf codes
 
 #define unlink _unlink
@@ -201,8 +200,7 @@ typedef char TSK_TCHAR;         ///< Character data type that is UTF-16 (wchar_t
 #define TZSET	tzset
 #define TZNAME	tzname
 
-#define PRIcTSK _TSK_T("s")     ///< sprintf macro to print a UTF-8 char string to TSK_TCHAR buffer
-#define PRIwTSK _TSK_T("S")     ///< sprintf macro to print a UTF-16 wchar_t string to TSK_TCHAR buffer
+#define PRIcTSK _TSK_T("hs")     ///< sprintf macro to print a UTF-8 char string to TSK_TCHAR buffer
 #define PRIttocTSK  "s"         ///< printf macro to print a TSK_TCHAR string to stderr or other char device
 #define PRIuSIZE "zu"           ///< printf macro to print a size_t value in non-Windows printf codes
 
diff --git a/tsk/base/tsk_printf.c b/tsk/base/tsk_printf.c
index b192cb725c0fde3960fb9eaae9f7fda9b9197558..94b92bfd999c91040121839f6c159b4de33555bd 100644
--- a/tsk/base/tsk_printf.c
+++ b/tsk/base/tsk_printf.c
@@ -92,7 +92,7 @@ tsk_fprintf(FILE * fd, const char *msg, ...)
     {
         WCHAR wbuf[2048];
         tsk_printf_conv(wbuf, 2048, msg, &args);
-        fwprintf(fd, _TSK_T("%s"), wbuf);
+        fwprintf(fd, _TSK_T("%ls"), wbuf);
     }
 #else
     vfprintf(fd, msg, args);
@@ -119,7 +119,7 @@ tsk_printf(const char *msg, ...)
     {
         WCHAR wbuf[2048];
         tsk_printf_conv(wbuf, 2048, msg, &args);
-        wprintf(_TSK_T("%s"), wbuf);
+        wprintf(_TSK_T("%ls"), wbuf);
     }
 #else
     vprintf(msg, args);
diff --git a/tsk/base/tsk_unicode.c b/tsk/base/tsk_unicode.c
index da34c74aef578e4a06691393f6975d8e9fb03477..35a74939238fcd508fbbf9f21076f55ca198a941 100644
--- a/tsk/base/tsk_unicode.c
+++ b/tsk/base/tsk_unicode.c
@@ -43,6 +43,7 @@
  */
 
 #include "tsk_base_i.h"
+#include <wchar.h>
 
 /* Some fundamental constants */
 typedef unsigned long UTF32;    /* at least 32 bits */
@@ -150,12 +151,15 @@ tsk_UTF16toUTF8(TSK_ENDIAN_ENUM endian, const UTF16 ** sourceStart,
     TSKConversionResult result = TSKconversionOK;
     const UTF16 *source = *sourceStart;
     UTF8 *target = *targetStart;
+
     while (source < sourceEnd) {
         UTF32 ch;
         unsigned short bytesToWrite = 0;
         const UTF32 byteMask = 0xBF;
         const UTF32 byteMark = 0x80;
         const UTF16 *oldSource = source;        /* In case we have to back up because of target overflow. */
+
+        // Need at least 2 bytes
         ch = tsk_getu16(endian, (uint8_t *) source);
         source++;
 
@@ -163,6 +167,7 @@ tsk_UTF16toUTF8(TSK_ENDIAN_ENUM endian, const UTF16 ** sourceStart,
         if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) {
             /* If the 16 bits following the high surrogate are in the source buffer... */
             if (source < sourceEnd) {
+                // Need at least 2 bytes
                 UTF32 ch2 = tsk_getu16(endian, (uint8_t *) source);
                 ++source;
 
@@ -566,6 +571,50 @@ tsk_cleanupUTF8(char *source, const char replacement)
     }
 }
 
+
+/**
+ * Cleans up the passed in string to replace invalid
+ * UTF-16 values with the passed in character.
+ * @param endian Ordering that data is stored in
+ * @param source String to be cleaned up
+ * @param source_len Number of wchar_t characters in source
+ * @param replacement Character to insert into source as needed.
+ */
+void
+tsk_cleanupUTF16(TSK_ENDIAN_ENUM endian, wchar_t *source, size_t source_len, const wchar_t replacement) {
+
+    size_t cur_idx = 0;
+    while (cur_idx < source_len) {
+        UTF32 ch = tsk_getu16(endian, (uint8_t *) &source[cur_idx]);
+
+        /* If we have a surrogate pair, check out the high part. */
+        if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) {
+            /* If the 16 bits following the high surrogate are in the source buffer... */
+            if (cur_idx + 1 < source_len) {
+                UTF32 ch2 = tsk_getu16(endian, (uint8_t *) &source[cur_idx+1]);
+                
+                /* If it's a low surrogate, we're good. */
+                if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) {
+                    // all good, use both
+                    cur_idx++;
+                }
+                else {
+                    source[cur_idx] = replacement;
+                }
+            }
+            else {   /* We don't have the 16 bits following the high surrogate. */
+                source[cur_idx] = replacement;
+            }
+        }
+        /* UTF-16 surrogate values are illegal in UTF-32 */
+        else if (ch >= UNI_SUR_LOW_START && ch <= UNI_SUR_LOW_END) {
+            source[cur_idx] = replacement;
+        }
+        cur_idx++;
+    }
+}
+
+
 /* --------------------------------------------------------------------- */
 
 
diff --git a/tsk/base/tsk_unicode.h b/tsk/base/tsk_unicode.h
index 65360d9ab678f80f1631f234951f48bac2464f2b..45a6352a7ab2d1e41652383b1fca755fe9194c52 100644
--- a/tsk/base/tsk_unicode.h
+++ b/tsk/base/tsk_unicode.h
@@ -153,7 +153,10 @@ extern "C" {
         const UTF8 * sourceEnd);
 
     extern void
-     tsk_cleanupUTF8(char *source, const char replacement);
+        tsk_cleanupUTF8(char *source, const char replacement);
+
+    extern void
+        tsk_cleanupUTF16(TSK_ENDIAN_ENUM endian, wchar_t *source, size_t source_len, const wchar_t replacement);
 #endif
 //@}
 
diff --git a/tsk/docs/Doxyfile b/tsk/docs/Doxyfile
index d0f18c89ee26546ab484c3b59396ba1c0c52db34..ca6854ba0bbb0538581db14de1fee102033a519d 100644
--- a/tsk/docs/Doxyfile
+++ b/tsk/docs/Doxyfile
@@ -33,7 +33,7 @@ PROJECT_NAME           = "The Sleuth Kit"
 # if some version control system is used.
 
 # This is automatically updated  at release time. 
-PROJECT_NUMBER = 4.11.0
+PROJECT_NUMBER = 4.12.1
 
 # Using the PROJECT_BRIEF tag one can provide an optional one line description
 # for a project that appears at the top of each page and should give viewer
@@ -662,6 +662,7 @@ INPUT                  = tsk/docs/main.dox \
                          tsk/docs/img.dox \
                          tsk/docs/vs.dox \
                          tsk/docs/fs.dox \
+                         tsk/docs/pool.dox \
                          tsk/docs/hashdb.dox \
                          tsk/docs/auto.dox \
                          tsk/docs/cpp.dox \
@@ -883,7 +884,7 @@ GENERATE_HTML          = YES
 # put in front of it. If left blank `html' will be used as the default path.
 
 # NOTE: This is automatically updated at release time. 
-HTML_OUTPUT = api-docs/4.11.0/
+HTML_OUTPUT = api-docs/4.12.1/
 
 # The HTML_FILE_EXTENSION tag can be used to specify the file extension for
 # each generated HTML page (for example: .htm,.php,.asp). If it is left blank
diff --git a/tsk/docs/basics.dox b/tsk/docs/basics.dox
index 1275145459ad832ca7af164abc6f57a931685f6d..d37c3460726c3e35ba53bdd713bf0af512feb791 100644
--- a/tsk/docs/basics.dox
+++ b/tsk/docs/basics.dox
@@ -10,7 +10,11 @@ The lowest layer that TSK has is the <b>Base Layer</b>, which contains common pr
 
 The next layer up is the <b>Disk Image Layer</b>, which allows disk images in various formats to be opened and processed.  This layer hides the details associated with split, compressed, and encrypted image files from the other layers. All disk images must be first opened by the Disk Image Layer functions before they can be processed by other layers. 
 
-The next layer up is the <b>Volume System Layer</b>. This layer focuses on processing data as a volume system, such as DOS partition tables or BSD disk label structures.  If the disk image being analyzed has a volume system on it, then this set of functions will tell you starting and ending location of its partitions.  
+The next layer up is the <b>Volume System Layer</b>. This layer focuses on processing data as a volume system, such as DOS partition tables or BSD disk label structures.  If the disk image being analyzed has a volume system on it, then this set of functions will tell you starting and ending location of its partitions. These volumes/partitions have a consecutive set of sectors.  
+
+The next layer is the <b>Pool Layer</b>. This layer focuses on managing pools of blocks that can be organized into different volumes. This allows a volume to have a set of non-consecutive blocks.  Pools can exist accross an entire disk (i.e. there is no volume system) or within volumes/partitions. A pool itself will have 'pool volumes' that can contain file systems, etc. 
+
+NOTE: Most disk images do not have a pool layer. It was added to TSK for APFS support. 
 
 The next layer up is the <b>File System Layer</b>.  This layer focuses on processing data as a file system, such as FAT or NTFS. File systems can be located in a partition or can be the full disk image file.  These set of functions allow you to read arbitrary data from the file system, list files, and open files.  There are several sub-layers in the File System Layer and they are described in \ref fs_layers.  
 
@@ -29,19 +33,22 @@ A basic diagram of the relationship between these layers is shown here. Note tha
  |                       +================+                 |
  |                         /            \                   |
  |                        /              \                  |
- |              +==============+      +==================+  |
- |              |  Disk Image  |      |   Hash Database  |  |
- |              +==============+      +==================+  |
- |                 /       |                                |
- |                /        |                                |
- |   +===============+     |                                |
- |   | Volume System |     |                                |
- |   +===============+     |                                |
- |               \         |                                |
- |                \        |                                |
- |               +===============+                          |
- |               |  File System  |                          |
- |               +===============+                          |
+ |  +==========================+      +==================+  |
+ |  |        Disk Image        |      |   Hash Database  |  |
+ |  +==========================+      +==================+  |
+ |   |          |          |                                |
+ |   |          |          |                                |
+ |   |  +===============+  |                                |
+ |   |  | Volume System |  |                                |
+ |   |  +===============+  |                                |
+ |   |     |       |       |                                |
+ |  +==========+   |       |                                |
+ |  |   Pool   |   |       |                                |
+ |  +==========+   |       |                                |
+ |           |     |       |                                |
+ |        +========================+                        |
+ |        |      File System       |                        |
+ |        +========================+                        |
  |                                                          |
  |                                                          |
  |                        Automation                        |
diff --git a/tsk/docs/main.dox b/tsk/docs/main.dox
index 6055e105c2fc6a45b4178b8f6703bd5b461ba348..7a658a6970ce0b18948f11359e118135353b6b49 100644
--- a/tsk/docs/main.dox
+++ b/tsk/docs/main.dox
@@ -14,6 +14,7 @@ The User's Guide describes the various components of TSK and how to use them.  I
   - \subpage basepage 
   - \subpage imgpage 
   - \subpage vspage 
+  - \subpage poolpage
   - \subpage fspage 
   - \subpage hashdbpage
   - \subpage autopage
diff --git a/tsk/docs/pool.dox b/tsk/docs/pool.dox
new file mode 100644
index 0000000000000000000000000000000000000000..535930f21d1e5c0289c34e160bdff60b097efb3d
--- /dev/null
+++ b/tsk/docs/pool.dox
@@ -0,0 +1,17 @@
+/*! \page poolpage Pools
+
+This section is not complete.
+
+Pools can organize non-consecutive blocks into volumes. They were added to TSK as part of the APFS support. 
+
+Pools can exist:
+- At the beginning of a disk image
+- Inside of a single volume.
+- FUTURE: Accross several volumes
+
+A when a pool is detected, it will return several pool volumes.
+
+
+Back to \ref users_guide "Table of Contents"
+*/
+
diff --git a/tsk/fs/Makefile.am b/tsk/fs/Makefile.am
index c3723f8bf12c0f4f8f9cb8184d782b71376d3911..5cda2c37eaf89d32ed18cd17670709c510686178 100644
--- a/tsk/fs/Makefile.am
+++ b/tsk/fs/Makefile.am
@@ -17,7 +17,7 @@ libtskfs_la_SOURCES  = tsk_fs_i.h fs_inode.c fs_io.c fs_block.c fs_open.c \
     hfs.c hfs_dent.c hfs_journal.c hfs_unicompare.c decmpfs.c lzvn.c lzvn.h \
     dcalc_lib.c dcat_lib.c dls_lib.c dstat_lib.c ffind_lib.c \
     fls_lib.c icat_lib.c ifind_lib.c ils_lib.c usn_journal.c usnjls_lib.c \
-    walk_cpp.cpp yaffs.cpp \
+    walk_cpp.cpp yaffs.cpp logical_fs.cpp \
     apfs.cpp apfs_compat.cpp apfs_fs.cpp apfs_open.cpp
 
 indent:
diff --git a/tsk/fs/apfs.cpp b/tsk/fs/apfs.cpp
index 137e1d95b39934fb746b67f22e1306a8af7fdad3..229a04447f488c7c4f10eb33d9b224d10f8a3277 100644
--- a/tsk/fs/apfs.cpp
+++ b/tsk/fs/apfs.cpp
@@ -345,9 +345,9 @@ APFSFileSystem::APFSFileSystem(const APFSPool& pool,
   }
 }
 
-APFSFileSystem::wrapped_kek::wrapped_kek(Guid&& id,
+APFSFileSystem::wrapped_kek::wrapped_kek(TSKGuid&& id,
                                          const std::unique_ptr<uint8_t[]>& kp)
-    : uuid{std::forward<Guid>(id)} {
+    : uuid{std::forward<TSKGuid>(id)} {
   // Parse KEK
   wrapped_key_parser wp{kp.get()};
 
@@ -384,11 +384,11 @@ APFSFileSystem::APFSFileSystem(const APFSPool& pool,
 // These are the known special recovery UUIDs.  The ones that are commented out
 // are currently supported.
 static const auto unsupported_recovery_keys = {
-    Guid{"c064ebc6-0000-11aa-aa11-00306543ecac"},  // Institutional Recovery
-    Guid{"2fa31400-baff-4de7-ae2a-c3aa6e1fd340"},  // Institutional User
-    // Guid{"ebc6C064-0000-11aa-aa11-00306543ecac"},  // Personal Recovery
-    Guid{"64c0c6eb-0000-11aa-aa11-00306543ecac"},  // iCould Recovery
-    Guid{"ec1c2ad9-b618-4ed6-bd8d-50f361c27507"},  // iCloud User
+    TSKGuid{"c064ebc6-0000-11aa-aa11-00306543ecac"},  // Institutional Recovery
+    TSKGuid{"2fa31400-baff-4de7-ae2a-c3aa6e1fd340"},  // Institutional User
+    // TSKGuid{"ebc6C064-0000-11aa-aa11-00306543ecac"},  // Personal Recovery
+    TSKGuid{"64c0c6eb-0000-11aa-aa11-00306543ecac"},  // iCould Recovery
+    TSKGuid{"ec1c2ad9-b618-4ed6-bd8d-50f361c27507"},  // iCloud User
 };
 
 void APFSFileSystem::init_crypto_info() {
@@ -1000,7 +1000,7 @@ APFSKeybag::APFSKeybag(const APFSPool& pool, const apfs_block_num block_num,
   }
 }
 
-std::unique_ptr<uint8_t[]> APFSKeybag::get_key(const Guid& uuid,
+std::unique_ptr<uint8_t[]> APFSKeybag::get_key(const TSKGuid& uuid,
                                                uint16_t type) const {
   if (kb()->num_entries == 0) {
     return nullptr;
diff --git a/tsk/fs/apfs_compat.cpp b/tsk/fs/apfs_compat.cpp
index 4afacd5cb48f97a194d4bcced74c9962425e33aa..9b6660ae80bb4366decbb1db576cadd21f9ae8a1 100755
--- a/tsk/fs/apfs_compat.cpp
+++ b/tsk/fs/apfs_compat.cpp
@@ -226,8 +226,8 @@ APFSFSCompat::APFSFSCompat(TSK_IMG_INFO* img_info, const TSK_POOL_INFO* pool_inf
   };
 
   _fsinfo.dir_open_meta = [](TSK_FS_INFO* fs, TSK_FS_DIR** a_fs_dir,
-                             TSK_INUM_T inode) {
-    return to_fs(fs).dir_open_meta(a_fs_dir, inode);
+                             TSK_INUM_T inode, int recursion_depth) {
+    return to_fs(fs).dir_open_meta(a_fs_dir, inode, recursion_depth);
   };
 
   _fsinfo.fscheck = [](TSK_FS_INFO*, FILE*) {
@@ -478,7 +478,8 @@ uint8_t tsk_apfs_fsstat(TSK_FS_INFO* fs_info, apfs_fsstat_info* info) try {
 }
 
 TSK_RETVAL_ENUM APFSFSCompat::dir_open_meta(TSK_FS_DIR** a_fs_dir,
-                                            TSK_INUM_T inode_num) const
+                                            TSK_INUM_T inode_num,
+                                            int recursion_depth) const
     noexcept try {
   // Sanity checks
   if (a_fs_dir == NULL) {
@@ -568,7 +569,7 @@ uint8_t APFSFSCompat::inode_walk(TSK_FS_INFO* fs, TSK_INUM_T start_inum, TSK_INU
         tsk_error_reset();
         tsk_error_set_errno(TSK_ERR_FS_WALK_RNG);
         tsk_error_set_errstr("inode_walk: end object id must be >= start object id: "
-            "%" PRIx32 " must be >= %" PRIx32 "",
+            "%" PRIuINUM " must be >= %" PRIuINUM "",
             end_inum, start_inum);
         return 1;
     }
@@ -698,7 +699,9 @@ uint8_t APFSFSCompat::file_add_meta(TSK_FS_FILE* fs_file, TSK_INUM_T addr) const
         fs_file->meta->link = (char*)tsk_malloc(attr->size + 1);
         tsk_fs_attr_read(attr, (TSK_OFF_T)0, fs_file->meta->link, attr->size,
                          TSK_FS_FILE_READ_FLAG_NONE);
-        fs_file->meta->link[attr->size] = 0;
+        if (fs_file->meta->link != NULL) {
+            fs_file->meta->link[attr->size] = 0;
+        }
         break;
       }
     }
@@ -1430,9 +1433,6 @@ uint8_t tsk_apfs_istat(TSK_FS_FILE* fs_file, apfs_istat_info* info) try {
  */
 TSK_FS_BLOCK_FLAG_ENUM APFSFSCompat::block_getflags(TSK_FS_INFO* fs, TSK_DADDR_T addr) {
 
-    TSK_FS_FILE *fs_file;
-    int result;
-
     if (fs->img_info->itype != TSK_IMG_TYPE_POOL) {
         // No way to return an error
         return TSK_FS_BLOCK_FLAG_UNALLOC;
@@ -1641,7 +1641,7 @@ uint8_t tsk_apfs_free_snapshot_list(apfs_snapshot_list* list) try {
     return 1;
   }
 
-  for (auto i = 0; i < list->num_snapshots; i++) {
+  for (size_t i = 0; i < list->num_snapshots; i++) {
     auto& snapshot = list->snapshots[i];
     delete[] snapshot.name;
   }
diff --git a/tsk/fs/apfs_compat.hpp b/tsk/fs/apfs_compat.hpp
index 4c40ddf5ac47752d97c1003b57424625c0d4ed64..e724e81e23c85115331a5bd2b4dc4d3a4fd152e3 100644
--- a/tsk/fs/apfs_compat.hpp
+++ b/tsk/fs/apfs_compat.hpp
@@ -56,5 +56,5 @@ class APFSFSCompat : public APFSJObjTree {
   uint8_t decrypt_block(TSK_DADDR_T, void*) noexcept;
   int name_cmp(const char*, const char*) const noexcept;
 
-  TSK_RETVAL_ENUM dir_open_meta(TSK_FS_DIR**, TSK_INUM_T) const noexcept;
+  TSK_RETVAL_ENUM dir_open_meta(TSK_FS_DIR**, TSK_INUM_T, int) const noexcept;
 };
diff --git a/tsk/fs/apfs_fs.cpp b/tsk/fs/apfs_fs.cpp
index 67c1b2071bce6c493fba9e9494fd95830b7d2ad8..e0a64a56dd79a5c818125c515d72427a4cf6e65c 100644
--- a/tsk/fs/apfs_fs.cpp
+++ b/tsk/fs/apfs_fs.cpp
@@ -74,33 +74,49 @@ void APFSJObject::add_entry(const jit::value_type& e) {
       _is_clone = (_inode.private_id != key->oid());
 
       // If there's more data than the size of the inode then we have xdata
-      if ((size_t)e.value.count() > sizeof(apfs_inode)) {
+      size_t e_offset = sizeof(apfs_inode);
+      size_t e_size = e.value.count();
+      // Need at least 4 bytes for start of extended fields (xf_blob_t)
+      if (e_size > sizeof(apfs_inode) + 4) {
         // The xfield headers are right after the inode
         const auto xfield = reinterpret_cast<const apfs_xfield*>(value + 1);
 
-        // The xfield data is after all of the xfield headers
-        auto xfield_data =
-            reinterpret_cast<const char*>(&xfield->entries[xfield->num_exts]);
+        e_offset += 4;
 
-        for (auto i = 0U; i < xfield->num_exts; i++) {
-          const auto& ext = xfield->entries[i];
+        // Need at least 4 bytes for each x_field_t
+        if (xfield->num_exts < (e_size - e_offset) / 4) {
+          // sizeof(xf_blob_t) + number of extenteded fields * sizeof(x_field_t)
+          e_offset += xfield->num_exts * 4;
 
-          switch (ext.type) {
-            case APFS_XFIELD_TYPE_NAME:
-              _name = std::string(xfield_data);
-              break;
-            case APFS_XFIELD_TYPE_DSTREAM: {
-              const auto ds =
-                  reinterpret_cast<const apfs_dstream*>(xfield_data);
+          // The xfield data is after all of the xfield headers
+          auto xfield_data =
+              reinterpret_cast<const char*>(&xfield->entries[xfield->num_exts]);
 
-              _size = ds->size;
-              _size_on_disk = ds->alloced_size;
-              break;
+          for (auto i = 0U; i < xfield->num_exts; i++) {
+            const auto& ext = xfield->entries[i];
+
+            switch (ext.type) {
+              case APFS_XFIELD_TYPE_NAME:
+                if((ext.len < 1) || (ext.len > e_size) || (e_offset > e_size - ext.len)) {
+                  break;
+                }
+                _name = std::string(xfield_data, ext.len - 1);
+                break;
+
+              case APFS_XFIELD_TYPE_DSTREAM: {
+                const auto ds =
+                    reinterpret_cast<const apfs_dstream*>(xfield_data);
+
+                _size = ds->size;
+                _size_on_disk = ds->alloced_size;
+                break;
+              }
             }
-          }
 
-          // The next data needs to be aligned properly
-          xfield_data += (ext.len + 7) & 0xFFF8;
+            // The next data needs to be aligned properly
+            xfield_data += (ext.len + 7) & 0xFFF8;
+            e_offset += (ext.len + 7) & 0xFFF8;
+          }
         }
       }
       break;
diff --git a/tsk/fs/exfatfs_dent.c b/tsk/fs/exfatfs_dent.c
index 714d88a21dc1a6aa8c34a96b1452f331508be3cc..6459e4a06dc97e21dc93d6dec08f25fb16c39885 100755
--- a/tsk/fs/exfatfs_dent.c
+++ b/tsk/fs/exfatfs_dent.c
@@ -475,15 +475,16 @@ exfats_parse_special_file_dentry(EXFATFS_FS_NAME_INFO *a_name_info, FATFS_DENTRY
  * be added.
  * @param a_buf Buffer that contains the directory contents.
  * @param a_buf_len Length of buffer in bytes (must be a multiple of sector
-*  size).
+ *  size).
  * @param a_sector_addrs Array where each element is the original address of
  * the corresponding sector in a_buf (size of array is number of sectors in
  * the directory).
+ * @param recursion_depth Recursion depth to limit the number of self-calls
  * @return TSK_RETVAL_ENUM
 */
 TSK_RETVAL_ENUM
 exfatfs_dent_parse_buf(FATFS_INFO *a_fatfs, TSK_FS_DIR *a_fs_dir, char *a_buf,
-    TSK_OFF_T a_buf_len, TSK_DADDR_T *a_sector_addrs)
+    TSK_OFF_T a_buf_len, TSK_DADDR_T *a_sector_addrs, int recursion_depth)
 {
     const char *func_name = "exfatfs_parse_directory_buf";
     TSK_FS_INFO *fs = NULL;
diff --git a/tsk/fs/ext2fs.c b/tsk/fs/ext2fs.c
index 29bed30ce79d2e726a1a21de0e64a613ab6dbb65..a7fe539590a72cdeed96b6be8193aadbea7eddcc 100755
--- a/tsk/fs/ext2fs.c
+++ b/tsk/fs/ext2fs.c
@@ -635,9 +635,9 @@ ext4_load_attrs_inline(TSK_FS_FILE *fs_file, const uint8_t * ea_buf, size_t ea_b
 
                 // This is the right attribute. Check that the length and offset are valid.
                 // The offset is from the beginning of the entries, i.e., four bytes into the buffer.
-                uint32_t offset = tsk_getu32(fs_file->fs_info->endian, ea_entry->val_off);
+                uint16_t offset = tsk_getu16(fs_file->fs_info->endian, ea_entry->val_off);
                 uint32_t size = tsk_getu32(fs_file->fs_info->endian, ea_entry->val_size);
-                if (4 + offset + size <= ea_buf_len) {
+                if ((ea_buf_len >= 4) && (offset < ea_buf_len - 4) && (size <= ea_buf_len - 4 - offset)) {
                     ea_inline_data = &(ea_buf[4 + offset]);
                     ea_inline_data_len = size;
                     break;
@@ -657,11 +657,16 @@ ext4_load_attrs_inline(TSK_FS_FILE *fs_file, const uint8_t * ea_buf, size_t ea_b
         }
     }
 
+    // Check if resident data size exceeds maximum inode size (1024) - ext4 inode resident data offset (156)
+    if ((fs_meta->size == 0) || (fs_meta->size > (1024 - 156))) {
+        return 1;
+    }
+
     // Combine the two parts of the inline data for the resident attribute. For now, make a
     // buffer for the full file size - this may be different than the length of the data 
     // from the inode if we have sparse data.
-    uint8_t *resident_data;
-    if ((resident_data = (uint8_t*)tsk_malloc(fs_meta->size)) == NULL) {
+    uint8_t *resident_data = (uint8_t*)tsk_malloc(fs_meta->size);
+    if (resident_data == NULL) {
         return 1;
     }
     memset(resident_data, 0, fs_meta->size);
@@ -673,7 +678,7 @@ ext4_load_attrs_inline(TSK_FS_FILE *fs_file, const uint8_t * ea_buf, size_t ea_b
     // If we need more data and found an extended attribute, append that data
     if ((fs_meta->size > EXT2_INLINE_MAX_DATA_LEN) && (ea_inline_data_len > 0)) {
         // Don't go beyond the size of the file
-        size_t ea_data_len = (inode_data_len + ea_inline_data_len < (uint64_t)fs_meta->size) ? inode_data_len + ea_inline_data_len : fs_meta->size - inode_data_len;
+        size_t ea_data_len = (ea_inline_data_len < (uint64_t)fs_meta->size - inode_data_len) ? ea_inline_data_len : fs_meta->size - inode_data_len;
         memcpy(resident_data + inode_data_len, ea_inline_data, ea_data_len);
     }
 
@@ -1665,7 +1670,7 @@ ext2fs_make_data_run_extent_index(TSK_FS_INFO * fs_info,
 
         // Ensure buf is sufficiently large
         // Otherwise extents[i] below can cause an OOB read
-        if ((fs_blocksize < sizeof(ext2fs_extent_header)) || (num_entries > (fs_blocksize - sizeof(ext2fs_extent_header)) / sizeof(ext2fs_extent))) {
+        if (((unsigned long)fs_blocksize < sizeof(ext2fs_extent_header)) || (num_entries > (fs_blocksize - sizeof(ext2fs_extent_header)) / sizeof(ext2fs_extent))) {
             free(buf);
             return 1;
         }
@@ -1684,7 +1689,7 @@ ext2fs_make_data_run_extent_index(TSK_FS_INFO * fs_info,
 
         // Ensure buf is sufficiently large
         // Otherwise indices[i] below can cause an OOB read
-        if ((fs_blocksize < sizeof(ext2fs_extent_header)) || (num_entries > (fs_blocksize - sizeof(ext2fs_extent_header)) / sizeof(ext2fs_extent_idx))) {
+        if (((unsigned long)fs_blocksize < sizeof(ext2fs_extent_header)) || (num_entries > (fs_blocksize - sizeof(ext2fs_extent_header)) / sizeof(ext2fs_extent_idx))) {
             free(buf);
             return 1;
         }
@@ -1715,7 +1720,7 @@ ext2fs_make_data_run_extent_index(TSK_FS_INFO * fs_info,
  */
 static int32_t
 ext2fs_extent_tree_index_count(TSK_FS_INFO * fs_info,
-    TSK_FS_META * fs_meta, ext2fs_extent_header * header)
+    TSK_FS_META * fs_meta, ext2fs_extent_header * header, int recursion_depth)
 {
     int fs_blocksize = fs_info->block_size;
     ext2fs_extent_idx *indices;
@@ -1723,6 +1728,13 @@ ext2fs_extent_tree_index_count(TSK_FS_INFO * fs_info,
     uint8_t *buf;
     int i;
 
+    // 32 is an arbitrary chosen value.
+    if (recursion_depth > 32) {
+        tsk_error_set_errno(TSK_ERR_FS_INODE_COR);
+        tsk_error_set_errstr
+            ("ext2fs_load_attrs: exceeded maximum recursion depth!");
+        return -1;
+    }
     if (tsk_getu16(fs_info->endian, header->eh_magic) != 0xF30A) {
         tsk_error_set_errno(TSK_ERR_FS_INODE_COR);
         tsk_error_set_errstr
@@ -1734,7 +1746,8 @@ ext2fs_extent_tree_index_count(TSK_FS_INFO * fs_info,
         return 0;
     }
 
-    if ((buf = (uint8_t *) tsk_malloc(fs_blocksize)) == NULL) {
+    buf = (uint8_t *) tsk_malloc(fs_blocksize);
+    if (buf == NULL) {
         return -1;
     }
 
@@ -1756,12 +1769,14 @@ ext2fs_extent_tree_index_count(TSK_FS_INFO * fs_info,
             }
             tsk_error_set_errstr2("ext2fs_extent_tree_index_count: Block %"
                 PRIuDADDR, block);
+            free(buf);
             return -1;
         }
 
         if ((ret =
                 ext2fs_extent_tree_index_count(fs_info, fs_meta,
-                    (ext2fs_extent_header *) buf)) < 0) {
+                    (ext2fs_extent_header *) buf, recursion_depth + 1)) < 0) {
+            free(buf);
             return -1;
         }
         count += ret;
@@ -1936,7 +1951,7 @@ ext4_load_attrs_extents(TSK_FS_FILE *fs_file)
          }
         
         extent_index_size =
-        ext2fs_extent_tree_index_count(fs_info, fs_meta, header);
+        ext2fs_extent_tree_index_count(fs_info, fs_meta, header, 0);
         if (extent_index_size < 0) {
             return 1;
         }
diff --git a/tsk/fs/ext2fs_dent.c b/tsk/fs/ext2fs_dent.c
index 95e8b256961bdd504dcc91b34a2d4abeb2140e30..0c49e05e3481ad6f324e2fca31e7c467aa04c50a 100644
--- a/tsk/fs/ext2fs_dent.c
+++ b/tsk/fs/ext2fs_dent.c
@@ -232,16 +232,16 @@ ext2fs_dent_parse_block(EXT2FS_INFO * ext2fs, TSK_FS_DIR * a_fs_dir,
 * @param a_fs_dir Pointer to FS_DIR pointer. Can contain an already allocated
 * structure or a new structure.
 * @param a_addr Address of directory to process.
+* @param recursion_depth Recursion depth to limit the number of self-calls
 * @returns error, corruption, ok etc.
 */
 
 TSK_RETVAL_ENUM
 ext2fs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir,
-    TSK_INUM_T a_addr)
+    TSK_INUM_T a_addr, int recursion_depth)
 {
     EXT2FS_INFO *ext2fs = (EXT2FS_INFO *) a_fs;
     char *dirbuf;
-    TSK_OFF_T size;
     TSK_FS_DIR *fs_dir;
     TSK_LIST *list_seen = NULL;
 
@@ -316,12 +316,19 @@ ext2fs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir,
     if ((dirbuf = tsk_malloc((size_t)a_fs->block_size)) == NULL) {
         return TSK_ERR;
     }
+    TSK_OFF_T size = 0;
 
     if (fs_dir->fs_file->meta->content_type == TSK_FS_META_CONTENT_TYPE_EXT4_INLINE) {
         // For inline dirs, don't try to read past the end of the data
         size = fs_dir->fs_file->meta->size;
     }
     else {
+        if (fs_dir->fs_file->meta->size <= 0 || a_fs->block_size <= 0
+                || (INT64_MAX - (a_fs->block_size - 1) < fs_dir->fs_file->meta->size)) {
+            tsk_error_set_errstr("ext2fs_dir_open_meta: invalid data size value out of bounds.\n");
+            free(dirbuf);
+            return TSK_ERR;
+        }
         size = roundup(fs_dir->fs_file->meta->size, a_fs->block_size);
     }
     TSK_OFF_T offset = 0;
diff --git a/tsk/fs/fatfs_dent.cpp b/tsk/fs/fatfs_dent.cpp
index c1e86f0558e4caa8d8151adec8682b26e8cf22c4..93d80bca90961b31f93b25bc165c32a759ba7843 100644
--- a/tsk/fs/fatfs_dent.cpp
+++ b/tsk/fs/fatfs_dent.cpp
@@ -219,12 +219,13 @@ static TSK_WALK_RET_ENUM
 * @param a_fs_dir Pointer to FS_DIR pointer. Can contain an already allocated
 * structure or a new structure.
 * @param a_addr Address of directory to process.
+* @param recursion_depth Recursion depth to limit the number of self-calls
 * @returns error, corruption, ok etc.
 */
 
 TSK_RETVAL_ENUM
     fatfs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir,
-    TSK_INUM_T a_addr)
+    TSK_INUM_T a_addr, int recursion_depth)
 {
     const char *func_name = "fatfs_dir_open_meta";
     TSK_OFF_T size, len;
@@ -344,7 +345,7 @@ TSK_RETVAL_ENUM
         "%s: Parsing directory %" PRIuINUM "\n",
         func_name, a_addr);
 
-    retval = fatfs->dent_parse_buf(fatfs, fs_dir, dirbuf, len, addrbuf);
+    retval = fatfs->dent_parse_buf(fatfs, fs_dir, dirbuf, len, addrbuf, recursion_depth);
 
     free(dirbuf);
     free(addrbuf);
diff --git a/tsk/fs/fatfs_meta.c b/tsk/fs/fatfs_meta.c
index 6a21b3fe73f1150a9c211db68b11edca4ffb228e..dacbfb732f178e1282edfba87ff65620e7ad8a97 100755
--- a/tsk/fs/fatfs_meta.c
+++ b/tsk/fs/fatfs_meta.c
@@ -820,7 +820,11 @@ fatfs_make_data_runs(TSK_FS_FILE * a_fs_file)
                     if (tsk_verbose)
                         tsk_fprintf(stderr,
                             "Loop found while processing file\n");
-                    tsk_fs_attr_run_free(data_run_head);
+                    if (data_run_head != NULL ) {
+                      tsk_fs_attr_run_free(data_run_head);
+                      // Make sure to set data_run_head to NULL to prevent a use-after-free
+                      data_run_head = NULL;
+                    }
                     if (list_seen != NULL) {
                         tsk_list_free(list_seen);
                         list_seen = NULL;
@@ -1216,12 +1220,17 @@ fatfs_inode_walk(TSK_FS_INFO *a_fs, TSK_INUM_T a_start_inum,
             return 0;
         }
     }
+    size_t bitmap_len = (a_fs->block_count + 7) / 8;
+
+    // Taking 128 MiB as an arbitrary upper bound
+    if ((bitmap_len == 0) || (bitmap_len > (128 * 1024 * 1024))) {
+        tsk_fs_file_close(fs_file);
+        return 1;
+    }
 
     /* Allocate a bitmap to keep track of which sectors are allocated to
      * directories. */
-    if ((dir_sectors_bitmap =
-            (uint8_t*)tsk_malloc((size_t) ((a_fs->block_count +
-                        7) / 8))) == NULL) {
+    if ((dir_sectors_bitmap = (uint8_t*)tsk_malloc(bitmap_len)) == NULL) {
         tsk_fs_file_close(fs_file);
         return 1;
     }
diff --git a/tsk/fs/fatfs_utils.c b/tsk/fs/fatfs_utils.c
index 42e7fe57dc2214ee6e8eeaef4e9df6df0fb25bff..9495ac923e578992d505beb09a7f7ea2a3b918cd 100755
--- a/tsk/fs/fatfs_utils.c
+++ b/tsk/fs/fatfs_utils.c
@@ -149,7 +149,7 @@ fatfs_dos_2_unix_time(uint16_t date, uint16_t time, uint8_t timetens)
         tm1.tm_sec = 0;
 
     /* The ctimetens value has a range of 0 to 199 */
-    if (timetens > 100)
+    if (timetens >= 100)
         tm1.tm_sec++;
 
     tm1.tm_min = ((time & FATFS_MIN_MASK) >> FATFS_MIN_SHIFT);
diff --git a/tsk/fs/fatxxfs_dent.c b/tsk/fs/fatxxfs_dent.c
index a68c7c6f99a7e29a52358cc3f200891a7dbfc32d..ad191bafaf3c22a651eaf81e75bd7028127073cd 100755
--- a/tsk/fs/fatxxfs_dent.c
+++ b/tsk/fs/fatxxfs_dent.c
@@ -48,15 +48,16 @@ typedef struct {
  * be added.
  * @param buf Buffer that contains the directory contents.
  * @param len Length of buffer in bytes (must be a multiple of sector
-*  size).
+ *  size).
  * @param addrs Array where each element is the original address of
  * the corresponding sector in a_buf (size of array is number of sectors in
  * the directory).
+ * @param recursion_depth Recursion depth to limit the number of self-calls
  * @return TSK_RETVAL_ENUM
 */
 TSK_RETVAL_ENUM
 fatxxfs_dent_parse_buf(FATFS_INFO *fatfs, TSK_FS_DIR *a_fs_dir, char *buf,
-    TSK_OFF_T len, TSK_DADDR_T *addrs)
+    TSK_OFF_T len, TSK_DADDR_T *addrs, int recursion_depth)
 {
     char *func_name = "fatxxfs_dent_parse_buf";
     unsigned int idx = 0; 
@@ -370,12 +371,12 @@ fatxxfs_dent_parse_buf(FATFS_INFO *fatfs, TSK_FS_DIR *a_fs_dir, char *buf,
                         /* The parent directory is not in the list.  We are going to walk
                         * the directory until we hit this directory. This process will
                         * populate the buffer table and we will then rescan it */
-                        if (tsk_fs_dir_walk(fs, fs->root_inum,
+                        if (tsk_fs_dir_walk_internal(fs, fs->root_inum,
                             (TSK_FS_DIR_WALK_FLAG_ENUM)(TSK_FS_DIR_WALK_FLAG_ALLOC |
                             TSK_FS_DIR_WALK_FLAG_UNALLOC |
                             TSK_FS_DIR_WALK_FLAG_RECURSE),
                             fatfs_find_parent_act,
-                            (void *) &a_fs_dir->fs_file->meta->addr)) {
+                            (void *) &a_fs_dir->fs_file->meta->addr, recursion_depth)) {
                                 return TSK_OK;
                         }
 
diff --git a/tsk/fs/ffs_dent.c b/tsk/fs/ffs_dent.c
index a9e9aec334c72fb7343acb4c2bd7d2f6c83cd984..7a032ccda70e6074554e014a52f5efb9682b5561 100644
--- a/tsk/fs/ffs_dent.c
+++ b/tsk/fs/ffs_dent.c
@@ -222,11 +222,12 @@ ffs_dent_parse_block(FFS_INFO * ffs, TSK_FS_DIR * fs_dir, uint8_t a_is_del,
  * @param a_fs_dir Pointer to FS_DIR pointer. Can contain an already allocated
  * structure or a new structure.
  * @param a_addr Address of directory to process.
+ * @param recursion_depth Recursion depth to limit the number of self-calls
  * @returns error, corruption, ok etc.
  */
 TSK_RETVAL_ENUM
 ffs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir,
-    TSK_INUM_T a_addr)
+    TSK_INUM_T a_addr, int recursion_depth)
 {
     TSK_OFF_T size;
     FFS_INFO *ffs = (FFS_INFO *) a_fs;
diff --git a/tsk/fs/fs_attr.c b/tsk/fs/fs_attr.c
index 178e9719c17b10723a0fb8e4ac6f715b2155dd10..38047e3c5a43c638b7011a847bfe609d3d6c6dbf 100755
--- a/tsk/fs/fs_attr.c
+++ b/tsk/fs/fs_attr.c
@@ -33,6 +33,7 @@
  * They form a linked list and are added to the TSK_FS_META structure
  */
 #include "tsk_fs_i.h"
+#include "tsk_logical_fs.h"
 
 
 /**
@@ -923,9 +924,14 @@ tsk_fs_attr_walk_nonres(const TSK_FS_ATTR * fs_attr,
                 }
                 else {
                     ssize_t cnt;
-
-                    cnt = tsk_fs_read_block_decrypt
-                        (fs, addr + len_idx, buf, fs->block_size, fs_attr_run->crypto_id + len_idx);
+					if (fs->ftype == TSK_FS_TYPE_LOGICAL) {
+						// We can't read logical files directly from the image.
+						cnt = logicalfs_read_block(fs, fs_attr->fs_file, addr + len_idx, buf);
+					}
+					else {
+						cnt = tsk_fs_read_block_decrypt
+						(fs, addr + len_idx, buf, fs->block_size, fs_attr_run->crypto_id + len_idx);
+					}
                     if (cnt != fs->block_size) {
                         if (cnt >= 0) {
                             tsk_error_reset();
@@ -1068,7 +1074,6 @@ tsk_fs_attr_walk(const TSK_FS_ATTR * a_fs_attr,
     }
     // non-resident data
     else if (a_fs_attr->flags & TSK_FS_ATTR_NONRES) {
-		fflush(stderr);
         return tsk_fs_attr_walk_nonres(a_fs_attr, a_flags, a_action,
             a_ptr);
     }
@@ -1101,7 +1106,7 @@ tsk_fs_attr_read(const TSK_FS_ATTR * a_fs_attr, TSK_OFF_T a_offset,
     TSK_FS_INFO *fs;
 
     if ((a_fs_attr == NULL) || (a_fs_attr->fs_file == NULL)
-        || (a_fs_attr->fs_file->fs_info == NULL)) {
+        || (a_fs_attr->fs_file->fs_info == NULL) || (a_buf == NULL)) {
         tsk_error_set_errno(TSK_ERR_FS_ARG);
         tsk_error_set_errstr
             ("tsk_fs_attr_read: Attribute has null pointers.");
@@ -1109,6 +1114,11 @@ tsk_fs_attr_read(const TSK_FS_ATTR * a_fs_attr, TSK_OFF_T a_offset,
     }
     fs = a_fs_attr->fs_file->fs_info;
 
+	// Handle logical directories separately
+	if (fs->ftype == TSK_FS_TYPE_LOGICAL) {
+		return logicalfs_read(fs, a_fs_attr->fs_file, a_offset, a_len, a_buf);
+	}
+
     /* for compressed data, call the specialized function */
     if (a_fs_attr->flags & TSK_FS_ATTR_COMP) {
         if (a_fs_attr->r == NULL) {
@@ -1220,6 +1230,12 @@ tsk_fs_attr_read(const TSK_FS_ATTR * a_fs_attr, TSK_OFF_T a_offset,
              * info out of order and we did not get all of the run info.  We
              * return 0s if data is read from this type of run. */
             else if (data_run_cur->flags & TSK_FS_ATTR_RUN_FLAG_FILLER) {
+                if (a_buf == NULL) {
+                    tsk_error_reset();
+                    tsk_error_set_errno(TSK_ERR_FS_READ_OFF);
+                    tsk_error_set_errstr("tsk_fs_attr_read - missing a_buf");
+                    return -1;
+                }
                 memset(&a_buf[len_toread - len_remain], 0, len_inrun);
                 if (tsk_verbose)
                     fprintf(stderr,
@@ -1256,11 +1272,11 @@ tsk_fs_attr_read(const TSK_FS_ATTR * a_fs_attr, TSK_OFF_T a_offset,
 
                 // add the byte offset in the block
                 fs_offset_b += byteoffset_toread;
+				cnt =
+					tsk_fs_read_decrypt(fs, fs_offset_b,
+						&a_buf[len_toread - len_remain], len_inrun,
+						data_run_cur->crypto_id + blkoffset_inrun);
 
-                cnt =
-                    tsk_fs_read_decrypt(fs, fs_offset_b,
-                    &a_buf[len_toread - len_remain], len_inrun, 
-                    data_run_cur->crypto_id + blkoffset_inrun);
                 if (cnt != (ssize_t)len_inrun) {
                     if (cnt >= 0) {
                         tsk_error_reset();
diff --git a/tsk/fs/fs_dir.c b/tsk/fs/fs_dir.c
index b80c0012fbaa0895f28c27fe24c01ef5f76d2db5..ac2f3d220bb29ab8efad7ebbc09cca63bc21a912 100644
--- a/tsk/fs/fs_dir.c
+++ b/tsk/fs/fs_dir.c
@@ -73,9 +73,12 @@ tsk_fs_dir_realloc(TSK_FS_DIR * a_fs_dir, size_t a_cnt)
     prev_cnt = a_fs_dir->names_alloc;
 
     a_fs_dir->names_alloc = a_cnt;
+
     if ((a_fs_dir->names =
-            (TSK_FS_NAME *) tsk_realloc((void *) a_fs_dir->names,
-                sizeof(TSK_FS_NAME) * a_fs_dir->names_alloc)) == NULL) {
+        (TSK_FS_NAME *)tsk_realloc((void *)a_fs_dir->names,
+            sizeof(TSK_FS_NAME) * a_fs_dir->names_alloc)) == NULL) {
+        a_fs_dir->names_alloc = 0;
+        a_fs_dir->names_used = 0;
         return 1;
     }
 
@@ -177,6 +180,10 @@ tsk_fs_dir_contains(TSK_FS_DIR * a_fs_dir, TSK_INUM_T meta_addr, uint32_t hash)
 static void 
 tsk_fs_dir_free_name_internal(TSK_FS_NAME *fs_name) 
 {
+    if (fs_name == NULL) {
+        return;
+    }
+
     if (fs_name->name) {
 	    free(fs_name->name);
 	    fs_name->name = NULL;
@@ -245,6 +252,15 @@ tsk_fs_dir_add(TSK_FS_DIR * a_fs_dir, const TSK_FS_NAME * a_fs_name)
     if (fs_name_dest == NULL) {
         // make sure we got the room
         if (a_fs_dir->names_used >= a_fs_dir->names_alloc) {
+
+			// Protect against trying to process very large directories
+			if (a_fs_dir->names_used >= MAX_DIR_SIZE_TO_PROCESS) {
+				tsk_error_reset();
+				tsk_error_set_errno(TSK_ERR_FS_GENFS);
+				tsk_error_set_errstr("tsk_fs_dir_add: Directory too large to process (addr: %" PRIuSIZE")", a_fs_dir->addr);
+				return 1;
+			}
+
             if (tsk_fs_dir_realloc(a_fs_dir, a_fs_dir->names_used + 512))
                 return 1;
         }
@@ -266,14 +282,17 @@ tsk_fs_dir_add(TSK_FS_DIR * a_fs_dir, const TSK_FS_NAME * a_fs_name)
 
 
 
-/** \ingroup fslib
+/** \internal
+* Internal version of the tsk_fs_dir_open_meta function with macro recursion depth.  
+*
 * Open a directory (using its metadata addr) so that each of the files in it can be accessed.
 * @param a_fs File system to analyze
 * @param a_addr Metadata address of the directory to open
+* @param macro_recursion_depth Recursion depth to limit the number of calls if the underlying file system needs to call methods to resolve. 
 * @returns NULL on error
 */
-TSK_FS_DIR *
-tsk_fs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_INUM_T a_addr)
+static TSK_FS_DIR *
+tsk_fs_dir_open_meta_internal(TSK_FS_INFO * a_fs, TSK_INUM_T a_addr, int macro_recursion_depth)
 {
     TSK_FS_DIR *fs_dir = NULL;
     TSK_RETVAL_ENUM retval;
@@ -282,11 +301,11 @@ tsk_fs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_INUM_T a_addr)
         || (a_fs->dir_open_meta == NULL)) {
         tsk_error_set_errno(TSK_ERR_FS_ARG);
         tsk_error_set_errstr
-            ("tsk_fs_dir_open_meta: called with NULL or unallocated structures");
+            ("tsk_fs_dir_open_meta_internal: called with NULL or unallocated structures");
         return NULL;
     }
 
-    retval = a_fs->dir_open_meta(a_fs, &fs_dir, a_addr);
+    retval = a_fs->dir_open_meta(a_fs, &fs_dir, a_addr, macro_recursion_depth);
     if (retval != TSK_OK) {
         tsk_fs_dir_close(fs_dir);
         return NULL;
@@ -296,6 +315,21 @@ tsk_fs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_INUM_T a_addr)
 }
 
 
+
+/** \ingroup fslib
+* Open a directory (using its metadata addr) so that each of the files in it can be accessed.
+*
+* @param a_fs File system to analyze
+* @param a_addr Metadata address of the directory to open
+* @returns NULL on error
+*/
+TSK_FS_DIR *
+tsk_fs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_INUM_T a_addr)
+{
+    return tsk_fs_dir_open_meta_internal(a_fs, a_addr, 0);
+}
+
+
 /** \ingroup fslib
 * Open a directory (using its path) so that each of the files in it can be accessed.
 * @param a_fs File system to analyze
@@ -356,10 +390,12 @@ tsk_fs_dir_close(TSK_FS_DIR * a_fs_dir)
         return;
     }
 
-    for (i = 0; i < a_fs_dir->names_used; i++) {
-        tsk_fs_dir_free_name_internal(&a_fs_dir->names[i]);
+    if (a_fs_dir->names != NULL) {
+        for (i = 0; i < a_fs_dir->names_used; i++) {
+            tsk_fs_dir_free_name_internal(&a_fs_dir->names[i]);
+        }
+        free(a_fs_dir->names);
     }
-    free(a_fs_dir->names);
 
     if (a_fs_dir->fs_file) {
         tsk_fs_file_close(a_fs_dir->fs_file);
@@ -486,7 +522,7 @@ tsk_fs_dir_get_name(const TSK_FS_DIR * a_fs_dir, size_t a_idx)
 #define DIR_STRSZ   4096
 
 /** \internal
- * used to keep state between calls to dir_walk_lcl
+ * used to keep state between calls to dir_walk_recurse
  */
 typedef struct {
     /* Recursive path stuff */
@@ -610,11 +646,11 @@ prioritizeDirNames(TSK_FS_NAME * names, size_t count, int * indexToOrderedIndex)
 }
 
 /* dir_walk local function that is used for recursive calls.  Callers
- * should initially call the non-local version. */
+ * should initially call the non-recursive version. */
 static TSK_WALK_RET_ENUM
-tsk_fs_dir_walk_lcl(TSK_FS_INFO * a_fs, DENT_DINFO * a_dinfo,
+tsk_fs_dir_walk_recursive(TSK_FS_INFO * a_fs, DENT_DINFO * a_dinfo,
     TSK_INUM_T a_addr, TSK_FS_DIR_WALK_FLAG_ENUM a_flags,
-    TSK_FS_DIR_WALK_CB a_action, void *a_ptr)
+    TSK_FS_DIR_WALK_CB a_action, void *a_ptr, int macro_recursion_depth)
 {
     TSK_FS_DIR *fs_dir;
     TSK_FS_FILE *fs_file;
@@ -622,7 +658,7 @@ tsk_fs_dir_walk_lcl(TSK_FS_INFO * a_fs, DENT_DINFO * a_dinfo,
     int* indexToOrderedIndex = NULL;
 
     // get the list of entries in the directory
-    if ((fs_dir = tsk_fs_dir_open_meta(a_fs, a_addr)) == NULL) {
+    if ((fs_dir = tsk_fs_dir_open_meta_internal(a_fs, a_addr, macro_recursion_depth + 1)) == NULL) {
         return TSK_WALK_ERROR;
     }
 
@@ -779,14 +815,21 @@ tsk_fs_dir_walk_lcl(TSK_FS_INFO * a_fs, DENT_DINFO * a_dinfo,
                 }
 
                 /* If we've exceeded the max depth or max length, don't
-                 * recurse any further into this directory */
+                 * recurse any further into this directory 
+                 * NOTE: We have two concepts of recursion detection in
+                 * here.  This one is based on within a top-level call
+                 * to dir_walk.  The macro_recursion_depth value allows
+                 * us to detect when file systems need to call dir_walk
+                 * to resolve things and they get into an infinite loop.
+                 * Perhaps they can be unified some day. 
+                 */
                 if ((a_dinfo->depth >= MAX_DEPTH) ||
                     (DIR_STRSZ <=
                         strlen(a_dinfo->dirs) +
                         strlen(fs_file->name->name))) {   
                     if (tsk_verbose) {
                         tsk_fprintf(stdout,
-                            "tsk_fs_dir_walk_lcl: directory : %"
+                            "tsk_fs_dir_walk_recursive: directory : %"
                             PRIuINUM " exceeded max length / depth\n", fs_file->name->meta_addr);
                     }
 
@@ -804,7 +847,7 @@ tsk_fs_dir_walk_lcl(TSK_FS_INFO * a_fs, DENT_DINFO * a_dinfo,
                 strncpy(a_dinfo->didx[a_dinfo->depth],
                     fs_file->name->name,
                     DIR_STRSZ - strlen(a_dinfo->dirs));
-                strncat(a_dinfo->dirs, "/", DIR_STRSZ-1);
+                strncat(a_dinfo->dirs, "/", DIR_STRSZ - strlen(a_dinfo->dirs) - 1);
                 depth_added = 1;
                 a_dinfo->depth++;
 
@@ -817,15 +860,27 @@ tsk_fs_dir_walk_lcl(TSK_FS_INFO * a_fs, DENT_DINFO * a_dinfo,
                     save_bak = a_dinfo->save_inum_named;
                     a_dinfo->save_inum_named = 0;
                 }
-                retval = tsk_fs_dir_walk_lcl(a_fs,
+                retval = tsk_fs_dir_walk_recursive(a_fs,
                     a_dinfo, fs_file->name->meta_addr, a_flags,
-                    a_action, a_ptr);
+                    a_action, a_ptr, macro_recursion_depth + 1);
                 if (retval == TSK_WALK_ERROR) {
-                    /* If this fails because the directory could not be
-                     * loaded, then we still continue */
+                    /* In most cases we want to continue if a directory 
+                     * did not load, but if we ran out
+                     * of memory we should stop */
+                    if (tsk_error_get_errno() & TSK_ERR_AUX) {
+                        tsk_fs_dir_close(fs_dir);
+                        fs_file->name = NULL;
+                        tsk_fs_file_close(fs_file);
+
+                        if (indexToOrderedIndex != NULL) {
+                            free(indexToOrderedIndex);
+                        }
+                        return TSK_WALK_ERROR;
+                    }
+
                     if (tsk_verbose) {
                         tsk_fprintf(stderr,
-                            "tsk_fs_dir_walk_lcl: error reading directory: %"
+                            "tsk_fs_dir_walk_recursive: error reading directory: %"
                             PRIuINUM "\n", fs_file->name->meta_addr);
                         tsk_error_print(stderr);
                     }
@@ -857,7 +912,7 @@ tsk_fs_dir_walk_lcl(TSK_FS_INFO * a_fs, DENT_DINFO * a_dinfo,
             else {
                 if (tsk_verbose)
                     fprintf(stderr,
-                        "tsk_fs_dir_walk_lcl: Loop detected with address %"
+                        "tsk_fs_dir_walk_recursive: Loop detected with address %"
                         PRIuINUM, fs_file->name->meta_addr);
             }
         }
@@ -883,20 +938,23 @@ tsk_fs_dir_walk_lcl(TSK_FS_INFO * a_fs, DENT_DINFO * a_dinfo,
 }
 
 
-/** \ingroup fslib
-* Walk the file names in a directory and obtain the details of the files via a callback.
+/** \internal
+* Internal version of the tsk_fs_dir_walk function with recursion depth.
+* This should be called by file systems when they need to start a new dir_walk
+* to resolve something and they may already be inside of a walk. 
 *
 * @param a_fs File system to analyze
 * @param a_addr Metadata address of the directory to analyze
 * @param a_flags Flags used during analysis
 * @param a_action Callback function that is called for each file name
 * @param a_ptr Pointer to data that is passed to the callback function each time
+* @param macro_recursion_depth Recursion depth to limit the number of self-calls in case the underlying file system also needs to make calls into dir_walk
 * @returns 1 on error and 0 on success
 */
 uint8_t
-tsk_fs_dir_walk(TSK_FS_INFO * a_fs, TSK_INUM_T a_addr,
+tsk_fs_dir_walk_internal(TSK_FS_INFO * a_fs, TSK_INUM_T a_addr,
     TSK_FS_DIR_WALK_FLAG_ENUM a_flags, TSK_FS_DIR_WALK_CB a_action,
-    void *a_ptr)
+    void *a_ptr, int macro_recursion_depth)
 {
     DENT_DINFO dinfo;
     TSK_WALK_RET_ENUM retval;
@@ -904,7 +962,17 @@ tsk_fs_dir_walk(TSK_FS_INFO * a_fs, TSK_INUM_T a_addr,
     if ((a_fs == NULL) || (a_fs->tag != TSK_FS_INFO_TAG)) {
         tsk_error_set_errno(TSK_ERR_FS_ARG);
         tsk_error_set_errstr
-            ("tsk_fs_dir_walk: called with NULL or unallocated structures");
+            ("tsk_fs_dir_walk_internal: called with NULL or unallocated structures");
+        return 1;
+    }
+
+    // 128 is a somewhat arbitrary value.
+    // https://github.com/sleuthkit/sleuthkit/issues/1859 identified
+    // an overflow with 240 levels of recursion with FAT
+    if (macro_recursion_depth > 128) {
+        tsk_error_set_errno(TSK_ERR_FS_ARG);
+        tsk_error_set_errstr
+            ("tsk_fs_dir_walk_internal: recursion depth exceeds maximum (%d)", macro_recursion_depth);
         return 1;
     }
 
@@ -930,8 +998,8 @@ tsk_fs_dir_walk(TSK_FS_INFO * a_fs, TSK_INUM_T a_addr,
     }
     tsk_release_lock(&a_fs->list_inum_named_lock);
 
-    retval = tsk_fs_dir_walk_lcl(a_fs, &dinfo, a_addr, a_flags,
-        a_action, a_ptr);
+    retval = tsk_fs_dir_walk_recursive(a_fs, &dinfo, a_addr, a_flags,
+        a_action, a_ptr, macro_recursion_depth);
 
     // if we were saving the list of named files in the temp list,
     // then now save them to FS_INFO
@@ -957,6 +1025,24 @@ tsk_fs_dir_walk(TSK_FS_INFO * a_fs, TSK_INUM_T a_addr,
 }
 
 
+/** \ingroup fslib
+* Walk the file names in a directory and obtain the details of the files via a callback.
+*
+* @param a_fs File system to analyze
+* @param a_addr Metadata address of the directory to analyze
+* @param a_flags Flags used during analysis
+* @param a_action Callback function that is called for each file name
+* @param a_ptr Pointer to data that is passed to the callback function each time
+* @returns 1 on error and 0 on success
+*/
+uint8_t
+tsk_fs_dir_walk(TSK_FS_INFO * a_fs, TSK_INUM_T a_addr,
+    TSK_FS_DIR_WALK_FLAG_ENUM a_flags, TSK_FS_DIR_WALK_CB a_action,
+    void *a_ptr)
+{
+	return tsk_fs_dir_walk_internal(a_fs, a_addr, a_flags, a_action, a_ptr, 0);
+}
+
 /** \internal
 * Create a dummy NAME entry for the Orphan file virtual directory.
 * @param a_fs File system directory is for
@@ -1079,9 +1165,9 @@ tsk_fs_dir_load_inum_named(TSK_FS_INFO * a_fs)
      * specify UNALLOC only as a flag on the assumption that there will
      * be fewer callbacks for UNALLOC than ALLOC.
      */
-    if (tsk_fs_dir_walk(a_fs, a_fs->root_inum,
+    if (tsk_fs_dir_walk_internal(a_fs, a_fs->root_inum,
             TSK_FS_NAME_FLAG_UNALLOC | TSK_FS_DIR_WALK_FLAG_RECURSE |
-            TSK_FS_DIR_WALK_FLAG_NOORPHAN, load_named_dir_walk_cb, NULL)) {
+            TSK_FS_DIR_WALK_FLAG_NOORPHAN, load_named_dir_walk_cb, NULL, 0)) {
         tsk_error_errstr2_concat
             ("- tsk_fs_dir_load_inum_named: identifying inodes allocated by file names");
         return TSK_ERR;
@@ -1221,10 +1307,10 @@ find_orphan_meta_walk_cb(TSK_FS_FILE * a_fs_file, void *a_ptr)
                 "find_orphan_meta_walk_cb: Going into directory %" PRIuINUM
                 " to mark contents as seen\n", a_fs_file->meta->addr);
 
-        if (tsk_fs_dir_walk(fs, a_fs_file->meta->addr,
+        if (tsk_fs_dir_walk_internal(fs, a_fs_file->meta->addr,
                 TSK_FS_DIR_WALK_FLAG_UNALLOC | TSK_FS_DIR_WALK_FLAG_RECURSE
                 | TSK_FS_DIR_WALK_FLAG_NOORPHAN, load_orphan_dir_walk_cb,
-                data)) {
+                data, 0)) {
             tsk_error_errstr2_concat
                 (" - find_orphan_meta_walk_cb: identifying inodes allocated by file names");
             return TSK_WALK_ERROR;
@@ -1345,6 +1431,12 @@ tsk_fs_dir_find_orphans(TSK_FS_INFO * a_fs, TSK_FS_DIR * a_fs_dir)
     for (i = 0; i < a_fs_dir->names_used; i++) {
         if (tsk_list_find(data.orphan_subdir_list,
                 a_fs_dir->names[i].meta_addr)) {
+
+            // Unclear what should happen in this situation, but it can happen,
+            // So skipping over this situation for now.
+            if (a_fs_dir->names_used == i + 1) {
+                continue;
+            }
             if (a_fs_dir->names_used > 1) {
                 tsk_fs_name_copy(&a_fs_dir->names[i],
                     &a_fs_dir->names[a_fs_dir->names_used - 1]);
diff --git a/tsk/fs/fs_open.c b/tsk/fs/fs_open.c
index c3e96e4e3a2a6a2f2af63310f834b01da4320b52..f25523719122786036409c2448cdab26e20c1d0f 100644
--- a/tsk/fs/fs_open.c
+++ b/tsk/fs/fs_open.c
@@ -27,6 +27,7 @@
 #include "tsk_fs_i.h"
 #include "tsk/util/detect_encryption.h"
 #include "tsk/img/unsupported_types.h"
+#include "tsk/img/logical_img.h"
 
 /**
  * \file fs_open.c
@@ -146,7 +147,6 @@ tsk_fs_open_img_decrypt(TSK_IMG_INFO * a_img_info, TSK_OFF_T a_offset,
         { "ISO9660",  iso9660_open, TSK_FS_TYPE_ISO9660_DETECT },
         { "APFS",     apfs_open_auto_detect,    TSK_FS_TYPE_APFS_DETECT }
     };
-
     if (a_img_info == NULL) {
         tsk_error_reset();
         tsk_error_set_errno(TSK_ERR_FS_ARG);
@@ -154,6 +154,21 @@ tsk_fs_open_img_decrypt(TSK_IMG_INFO * a_img_info, TSK_OFF_T a_offset,
         return NULL;
     }
 
+	/* If the image is type IMG_DIR_INFO, then the file system is
+	 * automatically the logical directory file system type. It is an
+	 * error to try to use any other file system type in that case.
+	 */
+	if (a_img_info->itype == TSK_IMG_TYPE_LOGICAL) {
+		if (!(a_ftype == TSK_FS_TYPE_DETECT || a_ftype == TSK_FS_TYPE_LOGICAL)) {
+			tsk_error_reset();
+			tsk_error_set_errno(TSK_ERR_FS_ARG);
+			tsk_error_set_errstr("tsk_fs_open_img: Incompatable file system type given for logical file image");
+			return NULL;
+		}
+
+		return logical_fs_open(a_img_info);
+	}
+
     /* We will try different file systems ...
      * We need to try all of them in case more than one matches
      */
@@ -182,7 +197,7 @@ tsk_fs_open_img_decrypt(TSK_IMG_INFO * a_img_info, TSK_OFF_T a_offset,
                     fs_first->close(fs_first);
                     fs_info->close(fs_info);
                     tsk_error_reset();
-                    tsk_error_set_errno(TSK_ERR_FS_UNKTYPE);
+                    tsk_error_set_errno(TSK_ERR_FS_MULTTYPE);
                     tsk_error_set_errstr(
                         "%s or %s", FS_OPENERS[i].name, name_first);
                     return NULL;
@@ -206,7 +221,7 @@ tsk_fs_open_img_decrypt(TSK_IMG_INFO * a_img_info, TSK_OFF_T a_offset,
                     unsupportedSignatureFound = 1;
                     tsk_error_reset();
                     tsk_error_set_errno(TSK_ERR_IMG_UNSUPTYPE);
-                    tsk_error_set_errstr(imageType);
+                    tsk_error_set_errstr("%s", imageType);
                     free(imageType);
                 }
             }
@@ -217,11 +232,11 @@ tsk_fs_open_img_decrypt(TSK_IMG_INFO * a_img_info, TSK_OFF_T a_offset,
                 if (result != NULL) {
                     if (result->encryptionType == ENCRYPTION_DETECTED_SIGNATURE) {
                         tsk_error_set_errno(TSK_ERR_FS_ENCRYPTED);
-                        tsk_error_set_errstr(result->desc);
+                        tsk_error_set_errstr("%s", result->desc);
                     }
                     else if (result->encryptionType == ENCRYPTION_DETECTED_ENTROPY) {
                         tsk_error_set_errno(TSK_ERR_FS_POSSIBLY_ENCRYPTED);
-                        tsk_error_set_errstr(result->desc);
+                        tsk_error_set_errstr("%s", result->desc);
                     }
                     else {
                         tsk_error_set_errno(TSK_ERR_FS_UNKTYPE);
diff --git a/tsk/fs/fs_types.c b/tsk/fs/fs_types.c
index eef6b4fae4c43afbcf978a6198b49586991a54f8..23981203a17456dee2b95a9dc9920acfd50b3e9b 100644
--- a/tsk/fs/fs_types.c
+++ b/tsk/fs/fs_types.c
@@ -48,6 +48,7 @@ static FS_TYPES fs_type_table[] = {
 #endif
     {"yaffs2", TSK_FS_TYPE_YAFFS2, "YAFFS2"},
     {"apfs", TSK_FS_TYPE_APFS, "APFS"},
+	{"logical", TSK_FS_TYPE_LOGICAL, "Logical Directory"},
     {"ufs", TSK_FS_TYPE_FFS_DETECT, "UFS (Auto Detection)"},
     {"raw", TSK_FS_TYPE_RAW, "Raw Data"}, // RAW == RAW_DETECT
     {"swap", TSK_FS_TYPE_SWAP, "Swap Space"}, // SWAP == SWAP_DETECT
diff --git a/tsk/fs/hfs.c b/tsk/fs/hfs.c
index 233268fa6c7faaeaec4b4d11b5b910923415f5ca..3acc7ffb2e1de62e3511661a3f28a69584ec147d 100644
--- a/tsk/fs/hfs.c
+++ b/tsk/fs/hfs.c
@@ -1020,8 +1020,10 @@ hfs_cat_traverse(HFS_INFO * hfs,
                  */
                 //                rec_cnid = tsk_getu32(fs->endian, key->file_id);
 
+                // The nodesize passed to the callback should contain the available node
+                // data size relative from the start of the key.
                 retval =
-                    a_cb(hfs, HFS_BT_NODE_TYPE_LEAF, key, keylen, nodesize,
+                    a_cb(hfs, HFS_BT_NODE_TYPE_LEAF, key, keylen, nodesize - rec_off,
                     cur_off + rec_off, ptr);
                 if (retval == HFS_BTREE_CB_LEAF_STOP) {
                     is_done = 1;
diff --git a/tsk/fs/hfs_dent.c b/tsk/fs/hfs_dent.c
index 54460f14bfe078ca31ef3edf85d849ecb79af46d..9a69dd7138bb0c0d87844403419692f3755357f3 100644
--- a/tsk/fs/hfs_dent.c
+++ b/tsk/fs/hfs_dent.c
@@ -258,6 +258,11 @@ hfs_dir_open_meta_cb(HFS_INFO * hfs, int8_t level_type,
 
         /* This will link the folder to its parent, which is the ".." entry */
         else if (rec_type == HFS_FOLDER_THREAD) {
+            if ((nodesize < sizeof(hfs_thread)) || (rec_off2 > nodesize - sizeof(hfs_thread))) {
+                tsk_error_set_errno(TSK_ERR_FS_GENFS);
+                tsk_error_set_errstr("hfs_dir_open_meta: nodesize value out of bounds");
+                return HFS_BTREE_CB_ERR;
+            }
             hfs_thread *thread = (hfs_thread *) & rec_buf[rec_off2];
             strcpy(info->fs_name->name, "..");
             info->fs_name->meta_addr =
@@ -268,6 +273,11 @@ hfs_dir_open_meta_cb(HFS_INFO * hfs, int8_t level_type,
 
         /* This is a folder in the folder */
         else if (rec_type == HFS_FOLDER_RECORD) {
+            if ((nodesize < sizeof(hfs_folder)) || (rec_off2 > nodesize - sizeof(hfs_folder))) {
+                tsk_error_set_errno(TSK_ERR_FS_GENFS);
+                tsk_error_set_errstr("hfs_dir_open_meta: nodesize value out of bounds");
+                return HFS_BTREE_CB_ERR;
+            }
             hfs_folder *folder = (hfs_folder *) & rec_buf[rec_off2];
 
             info->fs_name->meta_addr =
@@ -295,7 +305,7 @@ hfs_dir_open_meta_cb(HFS_INFO * hfs, int8_t level_type,
 
         /* This is a normal file in the folder */
         else if (rec_type == HFS_FILE_RECORD) {
-            if ((nodesize < sizeof(hfs_file)) || (rec_off2 >= nodesize - sizeof(hfs_file))) {
+            if ((nodesize < sizeof(hfs_file)) || (rec_off2 > nodesize - sizeof(hfs_file))) {
                 tsk_error_set_errno(TSK_ERR_FS_GENFS);
                 tsk_error_set_errstr("hfs_dir_open_meta: nodesize value out of bounds");
                 return HFS_BTREE_CB_ERR;
@@ -384,11 +394,12 @@ hfs_dir_open_meta_cb(HFS_INFO * hfs, int8_t level_type,
 * @param a_fs_dir Pointer to FS_DIR pointer. Can contain an already allocated
 * structure or a new structure.
 * @param a_addr Address of directory to process.
+* @param recursion_depth Recursion depth to limit the number of self-calls
 * @returns error, corruption, ok etc.
 */
 TSK_RETVAL_ENUM
 hfs_dir_open_meta(TSK_FS_INFO * fs, TSK_FS_DIR ** a_fs_dir,
-    TSK_INUM_T a_addr)
+    TSK_INUM_T a_addr, int recursion_depth)
 {
     HFS_INFO *hfs = (HFS_INFO *) fs;
     uint32_t cnid;              /* catalog node ID of the entry (= inum) */
diff --git a/tsk/fs/ils_lib.c b/tsk/fs/ils_lib.c
index 165bd13ab96344008edc46915c81974fbabd034d..aaf5f8741f4c5d02573755b33a6a20a0e989adbb 100755
--- a/tsk/fs/ils_lib.c
+++ b/tsk/fs/ils_lib.c
@@ -181,7 +181,7 @@ ils_mac_act(TSK_FS_FILE * fs_file, void *ptr)
     }
 
     /* ADD image and file name (if we have one) */
-    TFPRINTF(stdout, _TSK_T("0|<%s-"), data->image);
+    TFPRINTF(stdout, _TSK_T("0|<%" PRIttocTSK "-"), data->image);
     tsk_printf("%s%s%s-%" PRIuINUM ">|%" PRIuINUM "|",
         (fs_file->meta->name2) ? fs_file->meta->name2->name : "",
         (fs_file->meta->name2) ? "-" : "",
diff --git a/tsk/fs/iso9660.c b/tsk/fs/iso9660.c
index 3815517638aeea612adc6003858f6b55ba1590e7..64a142c3f8842113cab0d3f5036dbe4bb8aae7fe 100755
--- a/tsk/fs/iso9660.c
+++ b/tsk/fs/iso9660.c
@@ -556,6 +556,15 @@ iso9660_load_inodes_dir(TSK_FS_INFO * fs, TSK_OFF_T a_offs, int count,
                         in_node = NULL;
                         break;
                     }
+                    if (b_offs >= ISO9660_SSIZE_B - sizeof(iso9660_dentry)) {
+                        if (tsk_verbose)
+                            tsk_fprintf(stderr,
+                                        "iso9660_load_inodes_dir: b_offs out of bounds, bailing\n");
+                        free(in_node);
+                        in_node = NULL;
+                        break;
+                    }
+
 
                     name16 =
                         (UTF16 *) & buf[b_offs + sizeof(iso9660_dentry)];
@@ -570,13 +579,18 @@ iso9660_load_inodes_dir(TSK_FS_INFO * fs, TSK_OFF_T a_offs, int count,
                     }
                     name8 = (UTF8 *) in_node->inode.fn;
 
+                    if ((dentry->fi_len % 2) != 0 || dentry->fi_len > ISO9660_SSIZE_B - sizeof(iso9660_dentry) - b_offs) {
+                        if (tsk_verbose)
+                            tsk_fprintf(stderr,
+                                        "iso9660_load_inodes_dir: UTF-16 name length out of bounds, bailing\n");
+                        free(in_node);
+                        in_node = NULL;
+                        break;
+                    }
                     retVal =
                         tsk_UTF16toUTF8(fs->endian,
-                        (const UTF16 **) &name16,
-                        (UTF16 *) & buf[b_offs + sizeof(iso9660_dentry) +
-                            dentry->fi_len], &name8,
-                        (UTF8 *) ((uintptr_t) & in_node->inode.
-                            fn[ISO9660_MAXNAMLEN_STD]),
+                        (const UTF16 **) &name16, (UTF16 *) & buf[b_offs + sizeof(iso9660_dentry) + dentry->fi_len],
+                        &name8, (UTF8 *) ((uintptr_t) & in_node->inode.fn[ISO9660_MAXNAMLEN_STD]),
                         TSKlenientConversion);
                     if (retVal != TSKconversionOK) {
                         if (tsk_verbose)
@@ -627,14 +641,13 @@ iso9660_load_inodes_dir(TSK_FS_INFO * fs, TSK_OFF_T a_offs, int count,
                     file_ver = NULL;
                 }
 
-                // if no extension, remove the final '.'
-                if (in_node->inode.fn[strlen(in_node->inode.fn) - 1] ==
-                    '.')
-                    in_node->inode.fn[strlen(in_node->inode.fn) - 1] =
-                        '\0';
-                
-                
-                if (strlen(in_node->inode.fn) == 0) {
+                size_t name8_len = strnlen(in_node->inode.fn, ISO9660_MAXNAMLEN);
+                if (name8_len > 0 && in_node->inode.fn[name8_len - 1] == '.') {
+                    // if no extension, remove the final '.'
+                    in_node->inode.fn[name8_len - 1] = '\0';
+                    name8_len -= 1;
+                }
+                if (name8_len == 0) {
                     if (tsk_verbose)
                         tsk_fprintf(stderr,
                                     "iso9660_load_inodes_dir: length of name after processing is 0. bailing\n");
diff --git a/tsk/fs/iso9660_dent.c b/tsk/fs/iso9660_dent.c
index 34de246a422e63863ad0a0118c70e7981149740b..8765f2a33f0187652e24ce91286b723690b15de9 100644
--- a/tsk/fs/iso9660_dent.c
+++ b/tsk/fs/iso9660_dent.c
@@ -218,11 +218,12 @@ iso9660_proc_dir(TSK_FS_INFO * a_fs, TSK_FS_DIR * a_fs_dir, const char *buf,
  * @param a_fs_dir Pointer to FS_DIR pointer. Can contain an already allocated
  * structure or a new structure.
  * @param a_addr Address of directory to process.
+ * @param recursion_depth Recursion depth to limit the number of self-calls
  * @returns error, corruption, ok etc.
  */
 TSK_RETVAL_ENUM
 iso9660_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir,
-    TSK_INUM_T a_addr)
+    TSK_INUM_T a_addr, int recursion_depth)
 {
     TSK_RETVAL_ENUM retval;
     TSK_FS_DIR *fs_dir;
diff --git a/tsk/fs/logical_fs.cpp b/tsk/fs/logical_fs.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..897c465645545187f421045d5e894de477ca5c30
--- /dev/null
+++ b/tsk/fs/logical_fs.cpp
@@ -0,0 +1,1802 @@
+/*
+** The Sleuth Kit
+**
+** Copyright (c) 2022 Basis Technology Corp.  All rights reserved
+** Contact: Brian Carrier [carrier <at> sleuthkit [dot] org]
+**
+** This software is distributed under the Common Public License 1.0
+**
+*/
+
+/**
+*\file logical_fs.cpp
+* Contains the internal TSK logical file system functions.
+*/
+
+#include <vector>
+#include <map>
+#include <algorithm>
+#include <string>
+#include <set>
+#include <string.h>
+
+#include "tsk_fs_i.h"
+#include "tsk_fs.h"
+#include "tsk_logical_fs.h"
+#include "tsk/img/logical_img.h"
+
+#ifdef TSK_WIN32
+#include <Windows.h>
+#endif
+
+using std::vector;
+using std::string;
+using std::wstring;
+
+static uint8_t
+logicalfs_inode_walk(TSK_FS_INFO *fs, TSK_INUM_T start_inum,
+	TSK_INUM_T end_inum, TSK_FS_META_FLAG_ENUM flags,
+	TSK_FS_META_WALK_CB a_action, void *a_ptr)
+{
+	tsk_error_reset();
+	tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC);
+	tsk_error_set_errstr("block_walk for logical directory is not implemented");
+	return 1;
+}
+
+static uint8_t
+logicalfs_block_walk(TSK_FS_INFO *a_fs, TSK_DADDR_T a_start_blk,
+	TSK_DADDR_T a_end_blk, TSK_FS_BLOCK_WALK_FLAG_ENUM a_flags,
+	TSK_FS_BLOCK_WALK_CB a_action, void *a_ptr)
+{
+	tsk_error_reset();
+	tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC);
+	tsk_error_set_errstr("block_walk for logical directory is not implemented");
+	return 1;
+}
+
+static TSK_FS_BLOCK_FLAG_ENUM
+logicalfs_block_getflags(TSK_FS_INFO *fs, TSK_DADDR_T a_addr)
+{
+	return TSK_FS_BLOCK_FLAG_UNUSED;
+}
+
+static TSK_FS_ATTR_TYPE_ENUM
+logicalfs_get_default_attr_type(const TSK_FS_FILE * /*a_file*/)
+{
+	return TSK_FS_ATTR_TYPE_DEFAULT;
+}
+
+/*
+ * Convert a FILETIME to a timet
+ *
+ * @param ft The FILETIME to convert
+ *
+ * @return The converted timet
+ */
+#ifdef TSK_WIN32
+static time_t 
+filetime_to_timet(FILETIME const& ft) 
+{ 
+	ULARGE_INTEGER ull;    
+	ull.LowPart = ft.dwLowDateTime;    
+	ull.HighPart = ft.dwHighDateTime;    
+	return ull.QuadPart / 10000000ULL - 11644473600ULL; 
+}
+#endif
+
+/*
+ * Create a LOGICALFS_SEARCH_HELPER that will run a search for
+ * the given inum.
+ *
+ * @param target_inum The inum to search for
+ *
+ * @return The search helper object (must be freed by caller)
+ */
+static LOGICALFS_SEARCH_HELPER*
+create_inum_search_helper(TSK_INUM_T target_inum) {
+	LOGICALFS_SEARCH_HELPER *helper = (LOGICALFS_SEARCH_HELPER *)tsk_malloc(sizeof(LOGICALFS_SEARCH_HELPER));
+	if (helper == NULL)
+		return NULL;
+
+	helper->target_found = false;
+	helper->search_type = LOGICALFS_SEARCH_BY_INUM;
+	helper->target_path = NULL;
+	helper->target_inum = target_inum;
+	helper->found_path = NULL;
+	return helper;
+}
+
+/*
+* Create a LOGICALFS_SEARCH_HELPER that will run a search over
+* the entire image. Used to find the max inum.
+*
+* @return The search helper object (must be freed by caller)
+*/
+static LOGICALFS_SEARCH_HELPER*
+create_max_inum_search_helper() {
+	LOGICALFS_SEARCH_HELPER *helper = (LOGICALFS_SEARCH_HELPER *)tsk_malloc(sizeof(LOGICALFS_SEARCH_HELPER));
+	if (helper == NULL)
+		return NULL;
+
+	helper->target_found = false;
+	helper->search_type = LOGICALFS_NO_SEARCH;
+	helper->target_path = NULL;
+	helper->found_path = NULL;
+	return helper;
+}
+
+/*
+* Create a LOGICALFS_SEARCH_HELPER that will run a search for
+* the given path.
+*
+* @param target_path The path to search for
+*
+* @return The search helper object (must be freed by caller)
+*/
+static LOGICALFS_SEARCH_HELPER*
+create_path_search_helper(const TSK_TCHAR *target_path) {
+	LOGICALFS_SEARCH_HELPER *helper = (LOGICALFS_SEARCH_HELPER *)tsk_malloc(sizeof(LOGICALFS_SEARCH_HELPER));
+	if (helper == NULL)
+		return NULL;
+
+	helper->target_found = false;
+	helper->search_type = LOGICALFS_SEARCH_BY_PATH;
+	helper->target_path = (TSK_TCHAR*)tsk_malloc(sizeof(TSK_TCHAR) * (TSTRLEN(target_path) + 1));
+	TSTRNCPY(helper->target_path, target_path, TSTRLEN(target_path) + 1);
+	helper->found_inum = LOGICAL_INVALID_INUM;
+	helper->found_path = NULL;
+	return helper;
+}
+
+/*
+ * Free the search helper object
+ *
+ * @param helper The object to free
+ */
+static void
+free_search_helper(LOGICALFS_SEARCH_HELPER* helper) {
+	if (helper->target_path != NULL) {
+		free(helper->target_path);
+	}
+	if (helper->found_path != NULL) {
+		free(helper->found_path);
+	}
+	free(helper);
+}
+
+/*
+ * Convert a wide string to UTF8.
+ * 
+ * @param source The wide string to convert.
+ * 
+ * @return The converted string (must be freed by caller) or "INVALID FILE NAME" if conversion fails. NULL if memory allocation fails.
+ */
+#ifdef TSK_WIN32
+static char*
+convert_wide_string_to_utf8(const wchar_t *source) {
+
+	const char invalidName[] = "INVALID FILE NAME";
+	UTF16 *utf16 = (UTF16 *)source;
+	size_t ilen = wcslen(source);
+	size_t maxUTF8len = ilen * 4;
+	if (maxUTF8len < strlen(invalidName) + 1) {
+		maxUTF8len = strlen(invalidName) + 1;
+	}
+	char *dest = (char*)tsk_malloc(maxUTF8len);
+	if (dest == NULL) {
+		return NULL;
+	}
+	UTF8 *utf8 = (UTF8*)dest;
+
+	TSKConversionResult retVal =
+		tsk_UTF16toUTF8_lclorder((const UTF16 **)&utf16,
+			&utf16[ilen], &utf8,
+			&utf8[maxUTF8len], TSKlenientConversion);
+
+	if (retVal != TSKconversionOK) {
+		// If the conversion failed, use a default name
+		if (tsk_verbose)
+			tsk_fprintf(stderr,
+				"convert_wide_string_to_utf8: error converting logical file name to UTF-8\n");
+		strncpy(dest, invalidName, strlen(invalidName));
+	}
+	return dest;
+}
+#endif
+
+/*
+ * Check if we should set the type as directory.
+ * We currently treat sym links as regular files to avoid
+ * issues trying to read then as directories.
+ */
+ #ifdef TSK_WIN32
+int
+shouldTreatAsDirectory(DWORD dwFileAttributes) {
+	return ((dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)
+		&& (!(dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT)));
+}
+#endif
+
+/*
+ * Use data in the WIN32_FIND_DATA to populate a TSK_FS_FILE object.
+ * Expects a_fs_file and a_fs_file->meta to be allocated
+ *
+ * @param fd        The find data results
+ * @param a_fs_file The file to populate
+ *
+ * @return TSK_OK if successful, TSK_ERR otherwise
+ */
+#ifdef TSK_WIN32
+TSK_RETVAL_ENUM
+populate_fs_file_from_win_find_data(const WIN32_FIND_DATA* fd, TSK_FS_FILE * a_fs_file) {
+
+	if (a_fs_file == NULL || a_fs_file->meta == NULL) {
+		tsk_error_reset();
+		tsk_error_set_errno(TSK_ERR_FS_ARG);
+		tsk_error_set_errstr("populate_fs_file_from_win_find_data - a_fs_file argument not initialized");
+		return TSK_ERR;
+	}
+
+	// For the current use case, we leave the timestamps set to zero.
+	//a_fs_file->meta->crtime = filetime_to_timet(fd->ftCreationTime);
+	//a_fs_file->meta->atime = filetime_to_timet(fd->ftLastAccessTime);
+	//a_fs_file->meta->mtime = filetime_to_timet(fd->ftLastWriteTime);
+
+	// Set the type
+	if (shouldTreatAsDirectory(fd->dwFileAttributes)) {
+		a_fs_file->meta->type = TSK_FS_META_TYPE_DIR;
+	}
+	else {
+		a_fs_file->meta->type = TSK_FS_META_TYPE_REG;
+	}
+
+	// All files are allocated
+	a_fs_file->meta->flags = TSK_FS_META_FLAG_ALLOC;
+
+	// Set the file size
+	LARGE_INTEGER ull;
+	ull.LowPart = fd->nFileSizeLow;
+	ull.HighPart = fd->nFileSizeHigh;
+	a_fs_file->meta->size = ull.QuadPart;
+
+	return TSK_OK;
+}
+#endif
+
+/*
+ * Create the wildcard search path used to find directory contents
+ *
+ * @param base_path The path to the directory to open
+ *
+ * @return The search path with wildcard appended (must be freed by caller)
+ */
+TSK_TCHAR * create_search_path(const TSK_TCHAR *base_path) {
+	size_t len = TSTRLEN(base_path);
+	TSK_TCHAR * searchPath;
+	size_t searchPathLen = len + 4;
+	searchPath = (TSK_TCHAR *)tsk_malloc(sizeof(TSK_TCHAR) * (searchPathLen));
+	if (searchPath == NULL) {
+		return NULL;
+	}
+
+#ifdef TSK_WIN32
+	TSTRNCPY(searchPath, base_path, len + 1);
+	TSTRNCAT(searchPath, L"\\*", 4);
+#else
+	TSTRNCPY(searchPath, base_path, len + 1);
+	TSTRNCAT(searchPath, "/*", 3);
+#endif
+	return searchPath;
+}
+
+/*
+* Create the wildcard search path used to find directory contents using
+* the absolute directory and unicode prefix. We only call this method for
+* long paths because it does not work in cygwin - prepending "\\?\" only
+* works for absolute paths starting with a drive letter.
+*
+* @param base_path The path to the directory to open
+*
+* @return The search path with wildcard appended (must be freed by caller)
+*/
+TSK_TCHAR * create_search_path_long_path(const TSK_TCHAR *base_path) {
+#ifdef TSK_WIN32
+
+	// First convert the base path to an absolute path
+	TCHAR absPath[LOGICAL_MAX_PATH_UNICODE];
+	int ret = GetFullPathNameW(base_path, LOGICAL_MAX_PATH_UNICODE, absPath, NULL);
+
+	size_t len = TSTRLEN(absPath);
+	TSK_TCHAR * searchPath;
+	size_t searchPathLen = len + 9;
+	searchPath = (TSK_TCHAR *)tsk_malloc(sizeof(TSK_TCHAR) * (searchPathLen));
+	if (searchPath == NULL) {
+		return NULL;
+	}
+
+	TSTRNCPY(searchPath, L"\\\\?\\", 5);
+	TSTRNCAT(searchPath, absPath, len + 1);
+	TSTRNCAT(searchPath, L"\\*", 4);
+	return searchPath;
+#else
+	// Nothing to do here if it's not Windows
+	return NULL;
+#endif
+}
+
+/*
+ * Load the names of child files and/or directories into the given vectors.
+ *
+ * @param base_path  The parent path
+ * @param file_names Will be populated with file names contained in the parent dir (if requested)
+ * @param dir_names  Will be populated with dir names contained in the parent dir (if requested)
+ * @param mode       Specifies whether files, directories, or both should be loaded
+ *
+ * @return TSK_OK if successful, TSK_ERR otherwise
+ */
+#ifdef TSK_WIN32
+static TSK_RETVAL_ENUM
+load_dir_and_file_lists_win(const TSK_TCHAR *base_path, vector<wstring>& file_names, vector<wstring>& dir_names, LOGICALFS_DIR_LOADING_MODE mode) {
+
+	WIN32_FIND_DATAW fd;
+	HANDLE hFind;
+
+	// Create the search string (base path + "\*")
+	TSK_TCHAR * search_path_wildcard = create_search_path(base_path);
+	if (search_path_wildcard == NULL) {
+		return TSK_ERR;
+	}
+
+	// If the paths is too long, attempt to make a different version that will work
+	if (TSTRLEN(search_path_wildcard) >= MAX_PATH) {
+		free(search_path_wildcard);
+		search_path_wildcard = create_search_path_long_path(base_path);
+		if (search_path_wildcard == NULL) {
+			tsk_error_reset();
+			tsk_error_set_errno(TSK_ERR_FS_GENFS);
+			tsk_error_set_errstr("load_dir_and_file_lists: Error looking up contents of directory (path too long) %" PRIttocTSK, base_path);
+			return TSK_ERR;
+		}
+	}
+
+	// Look up all files and folders in the base directory 
+	hFind = ::FindFirstFileW(search_path_wildcard, &fd);
+	if (hFind != INVALID_HANDLE_VALUE) {
+		do {
+			if (shouldTreatAsDirectory(fd.dwFileAttributes)) {
+				if (mode == LOGICALFS_LOAD_ALL || mode == LOGICALFS_LOAD_DIRS_ONLY) {
+					// For the moment at least, skip . and ..
+					if (0 != wcsncmp(fd.cFileName, L"..", 3) && 0 != wcsncmp(fd.cFileName, L".", 3)) {
+						dir_names.push_back(fd.cFileName);
+					}
+				}
+			}
+			else {
+				if (mode == LOGICALFS_LOAD_ALL || mode == LOGICALFS_LOAD_FILES_ONLY) {
+					// For now, consider everything else to be a file
+					file_names.push_back(fd.cFileName);
+				}
+			}
+		} while (::FindNextFileW(hFind, &fd));
+		::FindClose(hFind);
+		free(search_path_wildcard);
+		return TSK_OK;
+	}
+	else {
+		free(search_path_wildcard);
+		tsk_error_reset();
+		tsk_error_set_errno(TSK_ERR_FS_GENFS);
+		tsk_error_set_errstr("load_dir_and_file_lists: Error looking up contents of directory %" PRIttocTSK, base_path);
+		return TSK_ERR;
+	}
+}
+#endif
+
+/*
+ * Finds closest cache match for the given path.
+ * If best_path is not NULL, caller must free.
+ *
+ * @param logical_fs_info The logical file system
+ * @param target_path     The full path being searched for
+ * @param best_path       The best match found in the cache (NULL if none are found, must be freed by caller otherwise)
+ * @param best_inum       The inum matching the best path found
+ *
+ * @return TSK_ERR if an error occurred, TSK_OK otherwise
+ */
+static TSK_RETVAL_ENUM
+find_closest_path_match_in_cache(LOGICALFS_INFO *logical_fs_info, TSK_TCHAR *target_path, TSK_TCHAR **best_path, TSK_INUM_T *best_inum) {
+	TSK_IMG_INFO* img_info = logical_fs_info->fs_info.img_info;
+	IMG_LOGICAL_INFO* logical_img_info = (IMG_LOGICAL_INFO*)img_info;
+	tsk_take_lock(&(img_info->cache_lock));
+
+	*best_inum = LOGICAL_INVALID_INUM;
+	*best_path = NULL;
+	int best_match_index = -1;
+	size_t longest_match = 0;
+	size_t target_len = TSTRLEN(target_path);
+	for (int i = 0; i < LOGICAL_INUM_CACHE_LEN; i++) {
+		if (logical_img_info->inum_cache[i].path != NULL) {
+
+			// Check that:
+			// - We haven't already found the exact match (longest_match = target_len)
+			// - The cache entry could potentially be a longer match than what we have so far
+			// - The cache entry isn't longer than what we're looking for
+			size_t cache_path_len = TSTRLEN(logical_img_info->inum_cache[i].path);
+			if ((longest_match != target_len) && (cache_path_len > longest_match) && (cache_path_len <= target_len)) {
+				size_t matching_len;
+#ifdef TSK_WIN32
+				matching_len = 0;
+				if (0 == _wcsnicmp(target_path, logical_img_info->inum_cache[i].path, cache_path_len)) {
+					matching_len = cache_path_len;
+				}
+#endif
+				// Save this path if:
+				// - It is longer than our previous best match
+				// - It is either the full length of the path we're searching for or is a valid
+				//      substring of our path
+				if ((matching_len > longest_match) &&
+					((matching_len == target_len) || (((matching_len < target_len) && (target_path[matching_len] == L'/'))))) {
+
+					// We found the full path or a partial match
+					longest_match = matching_len;
+					best_match_index = i;
+
+					// For the moment, consider any potential best match to have been useful. We could
+					// change this to only reset the age of the actual best match.
+					logical_img_info->inum_cache[i].cache_age = LOGICAL_INUM_CACHE_MAX_AGE;
+				}
+				else {
+					// The cache entry was not useful so decrease the age
+					if (logical_img_info->inum_cache[i].cache_age > 1) {
+						logical_img_info->inum_cache[i].cache_age--;
+					}
+				}
+			}
+			else {
+				// The cache entry was not useful so decrease the age
+				if (logical_img_info->inum_cache[i].cache_age > 1) {
+					logical_img_info->inum_cache[i].cache_age--;
+				}
+			}
+		}
+	}
+
+	// If we found a full or partial match, store the values
+	if (best_match_index >= 0) {
+		*best_inum = logical_img_info->inum_cache[best_match_index].inum;
+		*best_path = (TSK_TCHAR*)tsk_malloc(sizeof(TSK_TCHAR) * (TSTRLEN(logical_img_info->inum_cache[best_match_index].path) + 1));
+		if (*best_path == NULL) {
+			tsk_release_lock(&(img_info->cache_lock));
+			return TSK_ERR;
+		}
+		TSTRNCPY(*best_path, logical_img_info->inum_cache[best_match_index].path, TSTRLEN(logical_img_info->inum_cache[best_match_index].path) + 1);
+	}
+
+	tsk_release_lock(&(img_info->cache_lock));
+	return TSK_OK;
+}
+
+/*
+ * Look up the path corresponding to the given inum in the cache.
+ * Returned path must be freed by caller.
+ * 
+ * @param logical_fs_info The logical file system
+ * @param target_inum     The inum we're searching for
+ *
+ * @return The path corresponding to the given inum or NULL if not found or an error occurred. Must be freed by caller.
+ */
+static TSK_TCHAR*
+find_path_for_inum_in_cache(LOGICALFS_INFO *logical_fs_info, TSK_INUM_T target_inum) {
+	TSK_IMG_INFO* img_info = logical_fs_info->fs_info.img_info;
+	IMG_LOGICAL_INFO* logical_img_info = (IMG_LOGICAL_INFO*)img_info;
+	tsk_take_lock(&(img_info->cache_lock));
+	TSK_TCHAR *target_path = NULL;
+	for (int i = 0; i < LOGICAL_INUM_CACHE_LEN; i++) {
+		if ((target_path == NULL) && (logical_img_info->inum_cache[i].inum == target_inum)) {
+			// The cache entry was useful so reset the age
+			logical_img_info->inum_cache[i].cache_age = LOGICAL_INUM_CACHE_MAX_AGE;
+
+			// Copy the path
+			target_path = (TSK_TCHAR*)tsk_malloc(sizeof(TSK_TCHAR) * (TSTRLEN(logical_img_info->inum_cache[i].path) + 1));
+			if (target_path == NULL) {
+				tsk_release_lock(&(img_info->cache_lock));
+				return NULL;
+			}
+			TSTRNCPY(target_path, logical_img_info->inum_cache[i].path, TSTRLEN(logical_img_info->inum_cache[i].path) + 1);
+		}
+		else {
+			// The cache entry was not useful so decrease the age
+			if (logical_img_info->inum_cache[i].cache_age > 1) {
+				logical_img_info->inum_cache[i].cache_age--;
+			}
+		}
+	}
+
+	tsk_release_lock(&(img_info->cache_lock));
+	return target_path;
+}
+
+/*
+ * Add a directory to the cache
+ *
+ * @param logical_fs_info The logical file system
+ * @param path            The directory path
+ * @param inum            The inum corresponding to the path
+ *
+ * @return TSK_OK if successful, TSK_ERR on error
+ */
+static TSK_RETVAL_ENUM
+add_directory_to_cache(LOGICALFS_INFO *logical_fs_info, const TSK_TCHAR *path, TSK_INUM_T inum) {
+
+	// If the path is very long then don't cache it to make sure the cache stays reasonably small.
+	if (TSTRLEN(path) > LOGICAL_INUM_CACHE_MAX_PATH_LEN) {
+		return TSK_OK;
+	}
+
+	TSK_IMG_INFO* img_info = logical_fs_info->fs_info.img_info;
+	IMG_LOGICAL_INFO* logical_img_info = (IMG_LOGICAL_INFO*)img_info;
+	tsk_take_lock(&(img_info->cache_lock));
+
+	// Find the next cache slot. If we find an unused slot, use that. Otherwise find the entry
+	// with the lowest age.
+	int next_slot = 0;
+	int lowest_age = LOGICAL_INUM_CACHE_MAX_AGE + 1;
+	for (int i = 0; i < LOGICAL_INUM_CACHE_LEN; i++) {
+		if (logical_img_info->inum_cache[i].inum == LOGICAL_INVALID_INUM) {
+			next_slot = i;
+			break;
+		}
+
+		if (logical_img_info->inum_cache[i].cache_age < lowest_age) {
+			next_slot = i;
+			lowest_age = logical_img_info->inum_cache[i].cache_age;
+		}
+	}
+	clear_inum_cache_entry(logical_img_info, next_slot);
+
+	// Copy the data
+	logical_img_info->inum_cache[next_slot].path = (TSK_TCHAR*)tsk_malloc(sizeof(TSK_TCHAR) * (TSTRLEN(path) + 1));
+	if (logical_img_info->inum_cache[next_slot].path == NULL) {
+		tsk_release_lock(&(img_info->cache_lock));
+		return TSK_ERR;
+	}
+	TSTRNCPY(logical_img_info->inum_cache[next_slot].path, path, TSTRLEN(path) + 1);
+	logical_img_info->inum_cache[next_slot].inum = inum;
+	logical_img_info->inum_cache[next_slot].cache_age = LOGICAL_INUM_CACHE_MAX_AGE;
+
+	tsk_release_lock(&(img_info->cache_lock));
+	return TSK_OK;
+}
+
+/*
+ * Main recursive method for walking the directories. Will load and sort all directories found
+ * in parent_path, assign an inum to each and check if this is what we're searching for, calling
+ * this method recursively if not.
+ *
+ * @param parent_path The full path on disk to the directory to open
+ * @last_inum_ptr     Pointer to the last assigned inum. Will be updated for every directory found
+ * @search_helper     Contains information on what type of search is being performed and will store the results in most cases.
+ *
+ * @return TSK_OK if successfull, TSK_ERR otherwise
+ */
+static TSK_RETVAL_ENUM
+search_directory_recursive(LOGICALFS_INFO *logical_fs_info, const TSK_TCHAR * parent_path, TSK_INUM_T *last_inum_ptr, LOGICALFS_SEARCH_HELPER* search_helper) {
+
+#ifdef TSK_WIN32
+	vector<wstring> file_names;
+	vector<wstring> dir_names;
+#else
+	vector<string> file_names;
+	vector<string> dir_names;
+#endif
+
+	// If we're searching for a file and this is the correct directory, load only the files in the folder and
+	// return the correct one.
+	if (search_helper->search_type == LOGICALFS_SEARCH_BY_INUM
+		&& (*last_inum_ptr == (search_helper->target_inum & 0xffff0000))
+		& ((search_helper->target_inum & 0xffff) != 0)) {
+
+#ifdef TSK_WIN32
+		if (TSK_OK != load_dir_and_file_lists_win(parent_path, file_names, dir_names, LOGICALFS_LOAD_FILES_ONLY)) {
+			// Error message already set
+			return TSK_ERR;
+		}
+#endif
+		sort(file_names.begin(), file_names.end());
+
+		// Look for the file corresponding to the given inum
+		size_t file_index = (search_helper->target_inum & 0xffff) - 1;
+		if (file_names.size() <= file_index) {
+			tsk_error_reset();
+			tsk_error_set_errno(TSK_ERR_FS_INODE_NUM);
+			tsk_error_set_errstr("search_directory_recusive - inum %" PRIuINUM " not found", search_helper->target_inum);
+			return TSK_ERR;
+		}
+
+		search_helper->target_found = true;
+		size_t found_path_len = TSTRLEN(parent_path) + 1 + TSTRLEN(file_names[file_index].c_str());
+		search_helper->found_path = (TSK_TCHAR*)tsk_malloc(sizeof(TSK_TCHAR) * (found_path_len + 1));
+		TSTRNCPY(search_helper->found_path, parent_path, TSTRLEN(parent_path) + 1);
+#ifdef TSK_WIN32
+		TSTRNCAT(search_helper->found_path, L"\\", 2);
+#else
+		TSTRNCAT(search_helper->found_path, "/", 2);
+#endif
+		TSTRNCAT(search_helper->found_path, file_names[file_index].c_str(), TSTRLEN(file_names[file_index].c_str()) + 1);
+		return TSK_OK;
+	}
+
+#ifdef TSK_WIN32
+	if (TSK_OK != load_dir_and_file_lists_win(parent_path, file_names, dir_names, LOGICALFS_LOAD_DIRS_ONLY)) {
+		// Error message already set
+		return TSK_ERR;
+	}
+#endif
+
+	// Sort the directory names
+	sort(dir_names.begin(), dir_names.end());
+		
+	// Set up the beginning of full path to the file on disk
+	// The directoy name being added should generally be less than 270 characters, but if necessary we will
+	// make more space available.
+	size_t allocated_dir_name_len = 270;
+	TSK_TCHAR* current_path = (TSK_TCHAR*)tsk_malloc(sizeof(TSK_TCHAR) * (TSTRLEN(parent_path) + 2 + allocated_dir_name_len));
+	if (current_path == NULL)
+		return TSK_ERR;
+	TSTRNCPY(current_path, parent_path, TSTRLEN(parent_path) + 1);
+#ifdef TSK_WIN32
+	TSTRNCAT(current_path, L"\\", 2);
+#else
+	TSTRNCAT(current_path, "/", 2);
+#endif
+	size_t parent_path_len = TSTRLEN(current_path);
+
+	for (size_t i = 0; i < dir_names.size();i++) {
+
+		// If we don't have space for this name, increase the size of the buffer
+		if (TSTRLEN(dir_names[i].c_str()) > allocated_dir_name_len) {
+			free(current_path);
+			allocated_dir_name_len = TSTRLEN(dir_names[i].c_str()) + 20;
+			current_path = (TSK_TCHAR*)tsk_malloc(sizeof(TSK_TCHAR) * (TSTRLEN(parent_path) + 2 + allocated_dir_name_len));
+			if (current_path == NULL)
+				return TSK_ERR;
+			TSTRNCPY(current_path, parent_path, TSTRLEN(parent_path) + 1);
+#ifdef TSK_WIN32
+			TSTRNCAT(current_path, L"\\", 2);
+#else
+			TSTRNCAT(current_path, "/", 2);
+#endif
+		}
+
+		// Append the current directory name to the parent path
+		TSTRNCPY(current_path + parent_path_len, dir_names[i].c_str(), TSTRLEN(dir_names[i].c_str()) + 1);
+		TSK_INUM_T current_inum = *last_inum_ptr + LOGICAL_INUM_DIR_INC;
+		*last_inum_ptr = current_inum;
+		add_directory_to_cache(logical_fs_info, current_path, current_inum);
+
+		// Check if we've found it
+		if ((search_helper->search_type == LOGICALFS_SEARCH_BY_PATH)
+			&& (TSTRCMP(current_path, search_helper->target_path) == 0)) {
+			search_helper->target_found = true;
+			search_helper->found_inum = current_inum;
+			free(current_path);
+			return TSK_OK;
+		}
+
+		if ((search_helper->search_type == LOGICALFS_SEARCH_BY_INUM)
+				&& (current_inum == search_helper->target_inum)) {
+
+			search_helper->target_found = true;
+			search_helper->found_path = (TSK_TCHAR*)tsk_malloc(sizeof(TSK_TCHAR) * (TSTRLEN(current_path) + 1));
+			if (search_helper->found_path == NULL)
+				return TSK_ERR;
+			TSTRNCPY(search_helper->found_path, current_path, TSTRLEN(current_path) + 1);
+			free(current_path);
+			return TSK_OK;
+		}
+
+		TSK_RETVAL_ENUM result = search_directory_recursive(logical_fs_info, current_path, last_inum_ptr, search_helper);
+		if (result != TSK_OK) {
+			free(current_path);
+			return result;
+		}
+		if (search_helper->target_found) {
+			free(current_path);
+			return TSK_OK;
+		}
+	}
+	free(current_path);
+	return TSK_OK;
+}
+
+/*
+ * Find the path corresponding to the given inum
+ *
+ * @param logical_fs_info The logical file system
+ * @param a_addr          The inum to search for
+ *
+ * @return The path corresponding to the inum. Null on error. Must be freed by caller.
+ */
+static TSK_TCHAR *
+load_path_from_inum(LOGICALFS_INFO *logical_fs_info, TSK_INUM_T a_addr) {
+
+	TSK_TCHAR *path = NULL;
+	if (a_addr == logical_fs_info->fs_info.root_inum) {
+		// No need to do a search - it's just the root folder
+		path = (TSK_TCHAR*)tsk_malloc(sizeof(TSK_TCHAR) * (TSTRLEN(logical_fs_info->base_path) + 1));
+		if (path == NULL)
+			return NULL;
+		TSTRNCPY(path, logical_fs_info->base_path, TSTRLEN(logical_fs_info->base_path) + 1);
+		return path;
+	}
+
+	// Default starting position for the search is the base folder
+	TSK_INUM_T starting_inum = logical_fs_info->fs_info.root_inum;
+	const TSK_TCHAR *starting_path = logical_fs_info->base_path;
+
+	// See if the directory is in the cache
+	TSK_INUM_T dir_addr = a_addr & 0xffff0000;
+	TSK_TCHAR *cache_path = find_path_for_inum_in_cache(logical_fs_info, dir_addr);
+	if (cache_path != NULL) {
+		if (dir_addr == a_addr) {
+			// If we were looking for a directory, we're done
+			return cache_path;
+		}
+
+		// Otherwise, set up the search parameters to start with the folder found
+		starting_inum = dir_addr;
+		starting_path = cache_path;
+
+	}
+
+	// Create the struct that holds search params and results
+	LOGICALFS_SEARCH_HELPER *search_helper = create_inum_search_helper(a_addr);
+	if (search_helper == NULL) {
+		return NULL;
+	}
+
+	// Run the search
+	TSK_RETVAL_ENUM result = search_directory_recursive(logical_fs_info, starting_path, &starting_inum, search_helper);
+
+	if (cache_path != NULL) {
+		free(cache_path);
+	}
+
+	if ((result != TSK_OK) || (!search_helper->target_found)) {
+		free_search_helper(search_helper);
+		tsk_error_reset();
+		tsk_error_set_errno(TSK_ERR_FS_INODE_NUM);
+		tsk_error_set_errstr("load_path_from_inum - failed to find path corresponding to inum %" PRIuINUM, search_helper->target_inum);
+		return NULL;
+	}
+
+	// Copy the path
+	path = (TSK_TCHAR*)tsk_malloc(sizeof(TSK_TCHAR) * (TSTRLEN(search_helper->found_path) + 1));
+	if (path == NULL) {
+		free_search_helper(search_helper);
+		return NULL;
+	}
+	TSTRNCPY(path, search_helper->found_path, TSTRLEN(search_helper->found_path) + 1);
+	free_search_helper(search_helper);
+	return path;
+}
+
+static uint8_t
+logicalfs_file_add_meta(TSK_FS_INFO *a_fs, TSK_FS_FILE * a_fs_file,
+	TSK_INUM_T inum)
+{
+	LOGICALFS_INFO *logical_fs_info = (LOGICALFS_INFO*)a_fs;
+	if (a_fs_file == NULL) {
+		tsk_error_reset();
+		tsk_error_set_errno(TSK_ERR_FS_ARG);
+		tsk_error_set_errstr("logicalfs_file_add_meta - null TSK_FS_FILE given");
+		return TSK_ERR;
+	}
+	if (a_fs_file->meta == NULL) {
+		if ((a_fs_file->meta = tsk_fs_meta_alloc(0)) == NULL) {
+			return TSK_ERR;
+		}
+	}
+	else {
+		tsk_fs_meta_reset(a_fs_file->meta);
+	}
+
+	a_fs_file->meta->addr = inum;
+	
+	// Get the full path to the given file
+	TSK_TCHAR* path  = load_path_from_inum(logical_fs_info, inum);
+	if (path == NULL) {
+		tsk_error_reset();
+		tsk_error_set_errno(TSK_ERR_FS_INODE_NUM);
+		tsk_error_set_errstr("logicalfs_file_add_meta - Error loading directory with inum %" PRIuINUM, inum);
+		return TSK_ERR;
+	}
+
+#ifdef TSK_WIN32
+	// Load the file
+	WIN32_FIND_DATAW fd;
+	HANDLE hFind;
+	if (TSTRLEN(path) < MAX_PATH) {
+		hFind = ::FindFirstFileW(path, &fd);
+	} 
+	else {
+		TCHAR absPath[LOGICAL_MAX_PATH_UNICODE + 4];
+		TSTRNCPY(absPath, L"\\\\?\\", 4);
+		int absPathLen = GetFullPathNameW(path, LOGICAL_MAX_PATH_UNICODE, &(absPath[4]), NULL);
+		if (absPathLen <= 0) {
+			free(path);
+			tsk_error_reset();
+			tsk_error_set_errno(TSK_ERR_FS_GENFS);
+			tsk_error_set_errstr("logicalfs_file_add_meta: Error looking up contents of directory (path too long) %" PRIttocTSK, path);
+			return TSK_ERR;
+		}
+		hFind = ::FindFirstFileW(absPath, &fd);
+	}
+
+	if (hFind != INVALID_HANDLE_VALUE) {
+
+		TSK_RETVAL_ENUM result = populate_fs_file_from_win_find_data(&fd, a_fs_file);
+		::FindClose(hFind);
+		free(path);
+		return result;
+	}
+	else {
+		tsk_error_reset();
+		tsk_error_set_errno(TSK_ERR_FS_GENFS);
+		tsk_error_set_errstr("logicalfs_file_add_meta: Error loading directory %" PRIttocTSK, path);
+		free(path);
+		return TSK_ERR;
+	}
+#endif
+	free(path);
+	return TSK_OK;
+}
+
+/*
+* Find the max inum in the logical image
+*
+* @param logical_fs_info The logical file system
+*
+* @return The max inum, or LOGICAL_INVALID_INUM if an error occurred
+*/
+static TSK_INUM_T
+find_max_inum(LOGICALFS_INFO *logical_fs_info) {
+
+	// Create the struct that holds search params and results
+	LOGICALFS_SEARCH_HELPER *search_helper = create_max_inum_search_helper();
+	if (search_helper == NULL) {
+		return LOGICAL_INVALID_INUM;
+	}
+
+	// Run the search to get the maximum directory inum
+	TSK_INUM_T last_assigned_inum = logical_fs_info->fs_info.root_inum;
+	TSK_RETVAL_ENUM result = search_directory_recursive(logical_fs_info, logical_fs_info->base_path, &last_assigned_inum, search_helper);
+	free_search_helper(search_helper);
+
+	if (result != TSK_OK) {
+		return LOGICAL_INVALID_INUM;
+	}
+
+	// The maximum inum will be the inum of the last file in that folder. We don't care which file it is, 
+	// so just getting a count is sufficient. First we need the path on disk corresponding to the last
+	// directory inum.
+	TSK_TCHAR* path = load_path_from_inum(logical_fs_info, last_assigned_inum);
+	if (path == NULL) {
+		return LOGICAL_INVALID_INUM;
+	}
+
+	// Finally we need to get a count of files in that last folder. The max inum is the 
+	// folder inum plus the number of files (if none, it'll just be the folder inum).
+#ifdef TSK_WIN32
+	vector<wstring> file_names;
+	vector<wstring> dir_names;
+	if (TSK_OK != load_dir_and_file_lists_win(path, file_names, dir_names, LOGICALFS_LOAD_FILES_ONLY)) {
+		free(path);
+		return LOGICAL_INVALID_INUM;
+	}
+#else
+	vector<string> file_names;
+	vector<string> dir_names;
+#endif
+	free(path);
+	last_assigned_inum += file_names.size();
+	return last_assigned_inum;
+}
+
+/*
+* Find the inum corresponding to the given path
+*
+* @param logical_fs_info The logical file system
+* @param a_addr          The inum to search for
+* @param base_path       Will be loaded with path corresponding to the inum
+* @param base_path_len   Size of base_path
+*
+* @return The corresponding inum, or LOGICAL_INVALID_INUM if an error occurs
+*/
+static TSK_INUM_T
+#ifdef TSK_WIN32
+get_inum_from_directory_path(LOGICALFS_INFO *logical_fs_info, TSK_TCHAR *base_path, wstring& dir_path) {
+#else
+get_inum_from_directory_path(LOGICALFS_INFO *logical_fs_info, TSK_TCHAR *base_path, string& dir_path) {
+#endif
+
+	// Get the full path on disk by combining the base path for the logical image with the relative path in dir_path
+	size_t len = TSTRLEN(base_path) + dir_path.length() + 1;
+	TSK_TCHAR *path_buf = (TSK_TCHAR*)tsk_malloc(sizeof(TSK_TCHAR) *(len + 2));
+	TSTRNCPY(path_buf, base_path, TSTRLEN(base_path) + 1);
+#ifdef TSK_WIN32
+	TSTRNCAT(path_buf, L"\\", 2);
+#else
+	TSTRNCAT(path_buf, "/", 2);
+#endif
+	TSTRNCAT(path_buf, dir_path.c_str(), TSTRLEN(dir_path.c_str()) + 1);
+
+	// Default starting position for search is the base folder
+	TSK_INUM_T starting_inum = logical_fs_info->fs_info.root_inum;
+	const TSK_TCHAR *starting_path = logical_fs_info->base_path;
+
+	// See how close we can get using the cache
+	TSK_TCHAR *cache_path = NULL;
+	TSK_INUM_T cache_inum = LOGICAL_INVALID_INUM;
+	TSK_RETVAL_ENUM result = find_closest_path_match_in_cache(logical_fs_info, path_buf, &cache_path, &cache_inum);
+	if (result != TSK_OK) {
+		return LOGICAL_INVALID_INUM;
+	}
+	if (cache_inum != LOGICAL_INVALID_INUM) {
+		if (TSTRCMP(path_buf, cache_path) == 0) {
+			// We found an exact match - no need to do a search
+			free(cache_path);
+			return cache_inum;
+		}
+		// Otherwise, we at least have a better place to start the search
+		starting_inum = cache_inum;
+		starting_path = cache_path;
+	}
+
+	// Create the struct that holds search params and results
+	LOGICALFS_SEARCH_HELPER *search_helper = create_path_search_helper(path_buf);
+	free(path_buf);
+	if (search_helper == NULL) {
+		if (cache_path != NULL) {
+			free(cache_path);
+		}
+		return LOGICAL_INVALID_INUM;
+	}
+
+	// Run the search
+	TSK_INUM_T last_assigned_inum = logical_fs_info->fs_info.root_inum;
+	// use last_assigned_inum variable on non-win32 builds to prevent error
+	(void)last_assigned_inum;
+	result = search_directory_recursive(logical_fs_info, starting_path, &starting_inum, search_helper);
+
+	if (cache_path != NULL) {
+		free(cache_path);
+	}
+
+	// Return the target inum if found
+	TSK_INUM_T target_inum;
+	if ((result != TSK_OK) || (!search_helper->target_found)) {
+		target_inum = LOGICAL_INVALID_INUM;
+	}
+	else {
+		target_inum = search_helper->found_inum;
+	}
+	free_search_helper(search_helper);
+	return target_inum;
+}
+
+static TSK_RETVAL_ENUM
+logicalfs_dir_open_meta(TSK_FS_INFO *a_fs, TSK_FS_DIR ** a_fs_dir,
+	TSK_INUM_T a_addr, int recursion_depth)
+{
+	TSK_FS_DIR *fs_dir;
+	LOGICALFS_INFO *logical_fs_info = (LOGICALFS_INFO*)a_fs;
+
+	if (a_fs_dir == NULL) {
+		tsk_error_reset();
+		tsk_error_set_errno(TSK_ERR_FS_ARG);
+		tsk_error_set_errstr("logicalfs_dir_open_meta: NULL fs_dir argument given");
+		return TSK_ERR;
+	}
+	if ((a_addr & 0xffff) != 0) {
+		tsk_error_reset();
+		tsk_error_set_errno(TSK_ERR_FS_ARG);
+		tsk_error_set_errstr("logicalfs_dir_open_meta: Inode %" PRIuINUM " is not a directory", a_addr);
+		return TSK_ERR;
+	}
+	if (a_addr == LOGICAL_INVALID_INUM) {
+		tsk_error_reset();
+		tsk_error_set_errno(TSK_ERR_FS_ARG);
+		tsk_error_set_errstr("logicalfs_dir_open_meta: Inode %" PRIuINUM " is not valid", a_addr);
+		return TSK_ERR;
+	}
+
+	fs_dir = *a_fs_dir;
+	if (fs_dir) {
+		tsk_fs_dir_reset(fs_dir);
+		fs_dir->addr = a_addr;
+	}
+	else if ((*a_fs_dir = fs_dir = tsk_fs_dir_alloc(a_fs, a_addr, 128)) == NULL) {
+		return TSK_ERR;
+	}
+	
+	// Load the base path for the given meta address
+	TSK_TCHAR* path = load_path_from_inum(logical_fs_info, a_addr);
+	if (path == NULL) {
+		return TSK_ERR;
+	}
+
+#ifdef TSK_WIN32
+	// Populate the fs_file field
+	WIN32_FIND_DATAW fd;
+	HANDLE hFind;
+	if (TSTRLEN(path) < MAX_PATH) {
+		hFind = ::FindFirstFileW(path, &fd);
+	}
+	else {
+		TCHAR absPath[LOGICAL_MAX_PATH_UNICODE + 4];
+		TSTRNCPY(absPath, L"\\\\?\\", 4);
+		int absPathLen = GetFullPathNameW(path, LOGICAL_MAX_PATH_UNICODE, &(absPath[4]), NULL);
+		if (absPathLen <= 0) {
+			free(path);
+			tsk_error_reset();
+			tsk_error_set_errno(TSK_ERR_FS_GENFS);
+			tsk_error_set_errstr("logicalfs_dir_open_meta: Error looking up contents of directory (path too long) %" PRIttocTSK, path);
+			return TSK_ERR;
+		}
+		hFind = ::FindFirstFileW(absPath, &fd);
+	}
+	if (hFind != INVALID_HANDLE_VALUE) {
+
+		if ((fs_dir->fs_file = tsk_fs_file_alloc(a_fs)) == NULL) {
+			free(path);
+			return TSK_ERR;
+		}
+
+		if ((fs_dir->fs_file->meta = tsk_fs_meta_alloc(0)) == NULL) {
+			free(path);
+			return TSK_ERR;
+		}
+
+		TSK_RETVAL_ENUM result = populate_fs_file_from_win_find_data(&fd, fs_dir->fs_file); 
+		::FindClose(hFind);
+
+		if (result != TSK_OK) {
+			// Error message already set
+			return TSK_ERR;
+		}
+		
+	}
+	else {
+		tsk_error_reset();
+		tsk_error_set_errno(TSK_ERR_FS_GENFS);
+		tsk_error_set_errstr("logicalfs_dir_open_meta: Error loading directory %" PRIttocTSK, path);
+		free(path);
+		return TSK_ERR;
+	}
+#endif
+
+#ifdef TSK_WIN32
+	vector<wstring> file_names;
+	vector<wstring> dir_names;
+	if (TSK_OK != load_dir_and_file_lists_win(path, file_names, dir_names, LOGICALFS_LOAD_ALL)) {
+		// Error message already set
+		free(path);
+		return TSK_ERR;
+	}
+#else
+	vector<string> file_names;
+	vector<string> dir_names;
+#endif
+
+	// Sort the files and directories
+	sort(file_names.begin(), file_names.end());
+	sort(dir_names.begin(), dir_names.end());
+
+	// Add the folders
+	for (auto it = begin(dir_names); it != end(dir_names); ++it) {
+		TSK_INUM_T dir_inum = get_inum_from_directory_path(logical_fs_info, path, *it);
+		if (dir_inum == LOGICAL_INVALID_INUM) {
+			tsk_error_reset();
+			tsk_error_set_errno(TSK_ERR_FS_GENFS);
+			tsk_error_set_errstr("logicalfs_dir_open_meta: Error looking up inum from path");
+			return TSK_ERR;
+		}
+
+		TSK_FS_NAME *fs_name;
+
+#ifdef TSK_WIN32
+		char *utf8Name = convert_wide_string_to_utf8(it->c_str());
+		if (utf8Name == NULL) {
+			tsk_error_reset();
+			tsk_error_set_errno(TSK_ERR_FS_UNICODE);
+			tsk_error_set_errstr("logicalfs_dir_open_meta: Error converting wide string");
+			return TSK_ERR;
+		}
+		size_t name_len = strlen(utf8Name);
+#else
+		size_t name_len = strlen(it->c_str());
+#endif
+		if ((fs_name = tsk_fs_name_alloc(name_len, 0)) == NULL) {
+#ifdef TSK_WIN32
+			free(utf8Name);
+#endif
+			free(path);
+			return TSK_ERR;
+		}
+
+		fs_name->type = TSK_FS_NAME_TYPE_DIR;
+		fs_name->flags = TSK_FS_NAME_FLAG_ALLOC;
+		fs_name->par_addr = a_addr;
+		fs_name->meta_addr = dir_inum;
+#ifdef TSK_WIN32
+		strncpy(fs_name->name, utf8Name, name_len);
+		free(utf8Name);
+#else
+		strncpy(fs_name->name, it->c_str(), name_len);
+#endif
+		if (tsk_fs_dir_add(fs_dir, fs_name)) {
+			tsk_fs_name_free(fs_name);
+			free(path);
+			return TSK_ERR;
+		}
+		tsk_fs_name_free(fs_name);
+	}
+	free(path);
+
+	// Add the files
+	TSK_INUM_T file_inum = a_addr | 1; // First inum is directory inum in the high part, 1 in the low part
+	for (auto it = begin(file_names); it != end(file_names); ++it) {
+		TSK_FS_NAME *fs_name;
+		size_t name_len;
+#ifdef TSK_WIN32
+		char *utf8Name = convert_wide_string_to_utf8(it->c_str());
+		if (utf8Name == NULL) {
+			tsk_error_reset();
+			tsk_error_set_errno(TSK_ERR_FS_UNICODE);
+			tsk_error_set_errstr("logicalfs_dir_open_meta: Error converting wide string");
+			return TSK_ERR;
+		}
+		name_len = strlen(utf8Name);
+#else
+		name_len = it->length();
+#endif
+		if ((fs_name = tsk_fs_name_alloc(name_len, 0)) == NULL) {
+#ifdef TSK_WIN32
+			free(utf8Name);
+#endif
+			return TSK_ERR;
+		}
+
+		fs_name->type = TSK_FS_NAME_TYPE_REG;
+		fs_name->flags = TSK_FS_NAME_FLAG_ALLOC;
+		fs_name->par_addr = a_addr;
+		fs_name->meta_addr = file_inum;
+#ifdef TSK_WIN32
+		strncpy(fs_name->name, utf8Name, name_len);
+		free(utf8Name);
+#else
+		strncpy(fs_name->name, it->c_str(), name_len);
+#endif
+		if (tsk_fs_dir_add(fs_dir, fs_name)) {
+			tsk_fs_name_free(fs_name);
+			return TSK_ERR;
+		}
+		tsk_fs_name_free(fs_name);
+
+		file_inum++;
+	}
+
+	return TSK_OK;
+}
+
+static uint8_t
+logicalfs_load_attrs(TSK_FS_FILE *file)
+{
+	if (file == NULL || file->meta == NULL || file->fs_info == NULL)
+	{
+		tsk_error_set_errno(TSK_ERR_FS_ARG);
+		tsk_error_set_errstr
+		("logicalfs_load_attrs: called with NULL pointers");
+		return 1;
+	}
+
+	TSK_FS_META* meta = file->meta;
+
+	// See if we have already loaded the runs
+	if ((meta->attr != NULL)
+		&& (meta->attr_state == TSK_FS_META_ATTR_STUDIED)) {
+		return 0;
+	}
+	else if (meta->attr_state == TSK_FS_META_ATTR_ERROR) {
+		return 1;
+	}
+	else if (meta->attr != NULL) {
+		tsk_fs_attrlist_markunused(meta->attr);
+	}
+	else if (meta->attr == NULL) {
+		meta->attr = tsk_fs_attrlist_alloc();
+	}
+
+	TSK_FS_ATTR_RUN *data_run;
+	TSK_FS_ATTR *attr = tsk_fs_attrlist_getnew(meta->attr, TSK_FS_ATTR_NONRES);
+	if (attr == NULL) {
+		meta->attr_state = TSK_FS_META_ATTR_ERROR;
+		return 1;
+	}
+
+	if (meta->size == 0) {
+		data_run = NULL;
+	}
+	else {
+		data_run = tsk_fs_attr_run_alloc();
+		if (data_run == NULL) {
+			meta->attr_state = TSK_FS_META_ATTR_ERROR;
+			return 1;
+		}
+
+		data_run->next = NULL;
+		data_run->offset = 0;
+		data_run->addr = 0;
+		data_run->len = (meta->size + file->fs_info->block_size - 1) / file->fs_info->block_size;
+		data_run->flags = TSK_FS_ATTR_RUN_FLAG_NONE;
+	}
+
+	if (tsk_fs_attr_set_run(file, attr, NULL, NULL,
+		TSK_FS_ATTR_TYPE_DEFAULT, TSK_FS_ATTR_ID_DEFAULT,
+		meta->size, meta->size, 
+		roundup(meta->size, file->fs_info->block_size),
+		(TSK_FS_ATTR_FLAG_ENUM)0, 0)) {
+
+		meta->attr_state = TSK_FS_META_ATTR_ERROR;
+		return 1;
+	}
+
+	// If the file has size zero, return now
+	if (meta->size == 0) {
+		meta->attr_state = TSK_FS_META_ATTR_STUDIED;
+		return 0;
+	}
+
+	// Otherwise add the data run
+	if (0 != tsk_fs_attr_add_run(file->fs_info, attr, data_run)) {
+		return 1;
+	}
+	meta->attr_state = TSK_FS_META_ATTR_STUDIED;
+
+	return 0;
+}
+
+/*
+ * Reads a block from a logical file. If the file is not long enough to complete the block,
+ * null bytes are padded on to the end of the bytes read.
+ *
+ * @param a_fs         File system
+ * @param a_fs_file    File being read
+ * @param a_offset     Starting offset
+ * @param buf          Holds bytes read from the file (should be the size of a block)
+ *
+ * @return Size of the block or -1 on error.
+ */
+ssize_t 
+logicalfs_read_block(TSK_FS_INFO *a_fs, TSK_FS_FILE *a_fs_file, TSK_DADDR_T a_block_num, char *buf) {
+
+	if ((a_fs == NULL) || (a_fs_file == NULL) || (a_fs_file->meta == NULL)) {
+		tsk_error_reset();
+		tsk_error_set_errno(TSK_ERR_FS_ARG);
+		tsk_error_set_errstr("logical_fs_read_block: Called with null arguments");
+		return -1;
+	}
+
+	if (a_fs->ftype != TSK_FS_TYPE_LOGICAL) {
+		tsk_error_reset();
+		tsk_error_set_errno(TSK_ERR_FS_ARG);
+		tsk_error_set_errstr("logical_fs_read_block: Called with files system that is not TSK_FS_TYPE_LOGICAL");
+		return -1;
+	}
+
+	unsigned int block_size = a_fs->block_size;
+
+	// The caching used for logical file blocks is simpler than
+	// the version for images in img_io.c because we will always store complete 
+	// blocks - the block size for logical files is set to the same size as 
+	// the image cache. So each block in the cache will correspond to a
+	// file inum and block number.
+
+	// cache_lock is used for both the cache in IMG_INFO and
+	// the shared variables in the img type specific INFO structs.
+	// Grab it now so that it is held before any reads.
+	IMG_LOGICAL_INFO* logical_img_info = (IMG_LOGICAL_INFO*)a_fs->img_info;
+	TSK_IMG_INFO* img_info = a_fs->img_info;
+	LOGICALFS_INFO *logical_fs_info = (LOGICALFS_INFO*)a_fs;
+	tsk_take_lock(&(img_info->cache_lock));
+
+	// Check if this block is in the cache
+	int cache_next = 0;         // index to lowest age cache (to use next)
+	bool match_found = 0;
+	for (int cache_index = 0; cache_index < TSK_IMG_INFO_CACHE_NUM; cache_index++) {
+
+		// Look into the in-use cache entries
+		if (img_info->cache_len[cache_index] > 0) {
+			if ((logical_img_info->cache_inum[cache_index] == a_fs_file->meta->addr)
+				// check if non-negative and cast to uint to avoid signed/unsigned comparison warning
+				&& (img_info->cache_off[cache_index] >= 0 && (TSK_DADDR_T)img_info->cache_off[cache_index] == a_block_num)) {
+				// We found it
+				memcpy(buf, img_info->cache[cache_index], block_size);
+				match_found = true;
+
+				// reset its "age" since it was useful
+				img_info->cache_age[cache_index] = LOGICAL_IMG_CACHE_AGE;
+
+				// we don't break out of the loop so that we update all ages
+			}
+			else {
+				// Decrease its "age" since it was not useful.
+				// We don't let used ones go below 1 so that they are not
+				// confused with entries that have never been used.
+				if (img_info->cache_age[cache_index] > 2) {
+					img_info->cache_age[cache_index]--;
+				}
+
+				// See if this is the most eligible replacement
+				if ((img_info->cache_len[cache_next] > 0)
+					&& (img_info->cache_age[cache_index] <
+						img_info->cache_age[cache_next])) {
+					cache_next = cache_index;
+				}
+			}
+		}
+	}
+
+	// If we found the block in the cache, we're done
+	if (match_found) {
+		tsk_release_lock(&(img_info->cache_lock));
+		return block_size;
+	}
+
+	// See if this file is already open
+	LOGICAL_FILE_HANDLE_CACHE* file_handle_entry = NULL;
+	for (int i = 0; i < LOGICAL_FILE_HANDLE_CACHE_LEN; i++) {
+		if (logical_img_info->file_handle_cache[i].inum == a_fs_file->meta->addr) {
+			// File is already open
+			file_handle_entry = &(logical_img_info->file_handle_cache[i]);
+		}
+	}
+
+	// If we didn't find it, open the file and save to the cache
+	if (file_handle_entry == NULL) {
+		// Load the path
+		TSK_TCHAR* path = load_path_from_inum(logical_fs_info, a_fs_file->meta->addr);
+
+#ifdef TSK_WIN32
+		// Open the file
+		HANDLE fd;
+		if (TSTRLEN(path) < MAX_PATH) {
+			fd = CreateFileW(path, FILE_READ_DATA,
+				FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0,
+				NULL);
+		}
+		else {
+			TCHAR absPath[LOGICAL_MAX_PATH_UNICODE + 4];
+			TSTRNCPY(absPath, L"\\\\?\\", 4);
+			int absPathLen = GetFullPathNameW(path, LOGICAL_MAX_PATH_UNICODE, &(absPath[4]), NULL);
+			if (absPathLen <= 0) {
+				free(path);
+				tsk_error_reset();
+				tsk_error_set_errno(TSK_ERR_FS_GENFS);
+				tsk_error_set_errstr("logicalfs_read_block: Error looking up contents of directory (path too long) %" PRIttocTSK, path);
+				return TSK_ERR;
+			}
+			fd = CreateFileW(absPath, FILE_READ_DATA,
+				FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0,
+				NULL);
+		}
+		if (fd == INVALID_HANDLE_VALUE) {
+			tsk_release_lock(&(img_info->cache_lock));
+			int lastError = (int)GetLastError();
+			tsk_error_reset();
+			tsk_error_set_errno(TSK_ERR_FS_READ);
+			tsk_error_set_errstr("logical_fs_read_block: file \"%" PRIttocTSK
+				"\" - %d", path, lastError);
+			return -1;
+		}
+#else
+		int fd = 0;
+		// use path variable on non-win32 builds to prevent error
+		(void)path;
+#endif
+
+		// Set up this cache entry
+		file_handle_entry = &(logical_img_info->file_handle_cache[logical_img_info->next_file_handle_cache_slot]);
+		if (file_handle_entry->fd != 0) {
+			// Close the current file handle
+#ifdef TSK_WIN32
+			CloseHandle(file_handle_entry->fd);
+#endif
+		}
+		file_handle_entry->fd = fd;
+		file_handle_entry->inum = a_fs_file->meta->addr;
+		file_handle_entry->seek_pos = 0;
+
+		// Set up the next cache entry to use
+		logical_img_info->next_file_handle_cache_slot++;
+		if (logical_img_info->next_file_handle_cache_slot >= LOGICAL_FILE_HANDLE_CACHE_LEN) {
+			logical_img_info->next_file_handle_cache_slot = 0;
+		}
+	}
+
+	// Seek to the starting offset (if necessary)
+	TSK_OFF_T offset_to_read = a_block_num * block_size;
+	if (offset_to_read != file_handle_entry->seek_pos) {
+#ifdef TSK_WIN32
+		LARGE_INTEGER li;
+		li.QuadPart = a_block_num * block_size;
+
+		li.LowPart = SetFilePointer(file_handle_entry->fd, li.LowPart,
+			&li.HighPart, FILE_BEGIN);
+
+		if ((li.LowPart == INVALID_SET_FILE_POINTER) &&
+			(GetLastError() != NO_ERROR)) {
+
+			tsk_release_lock(&(img_info->cache_lock));
+			int lastError = (int)GetLastError();
+			tsk_error_reset();
+			tsk_error_set_errno(TSK_ERR_IMG_SEEK);
+			tsk_error_set_errstr("logical_fs_read_block: file addr %" PRIuINUM
+				" offset %" PRIdOFF " seek - %d",
+				a_fs_file->meta->addr, a_block_num, lastError);
+			return -1;
+		}
+#endif
+		file_handle_entry->seek_pos = offset_to_read;
+	}
+
+	// Read the data
+	unsigned int len_to_read;
+	if (((a_block_num + 1) * block_size) <= (unsigned long long)a_fs_file->meta->size) {
+		// If the file is large enough to read the entire block, then try to do so
+		len_to_read = block_size;
+	}
+	else {
+		// Otherwise, we expect to only be able to read a smaller number of bytes
+		len_to_read = a_fs_file->meta->size % block_size;
+		memset(buf, 0, block_size);
+	}
+
+#ifdef TSK_WIN32
+	DWORD nread;
+	if (FALSE == ReadFile(file_handle_entry->fd, buf, (DWORD)len_to_read, &nread, NULL)) {
+		tsk_release_lock(&(img_info->cache_lock));
+		int lastError = GetLastError();
+		tsk_error_reset();
+		tsk_error_set_errno(TSK_ERR_IMG_READ);
+		tsk_error_set_errstr("logicalfs_read_block: file addr %" PRIuINUM
+			" offset: %" PRIu64 " read len: %" PRIuSIZE " - %d",
+			a_fs_file->meta->addr, a_block_num, block_size,
+			lastError);
+		return -1;
+	}
+	file_handle_entry->seek_pos += nread;
+#else
+	// otherwise, not used; ensure used to prevent warning
+	(void)len_to_read;
+#endif
+
+	// Copy the block into the cache
+	memcpy(img_info->cache[cache_next], buf, block_size);
+	img_info->cache_len[cache_next] = block_size;
+	img_info->cache_age[cache_next] = LOGICAL_IMG_CACHE_AGE;
+	img_info->cache_off[cache_next] = a_block_num;
+	logical_img_info->cache_inum[cache_next] = a_fs_file->meta->addr;
+
+	tsk_release_lock(&(img_info->cache_lock));
+
+	// If we didn't read the expected number of bytes, return an error
+#ifdef TSK_WIN32
+	if (nread != len_to_read) {
+		int lastError = GetLastError();
+		tsk_error_reset();
+		tsk_error_set_errno(TSK_ERR_IMG_READ);
+		tsk_error_set_errstr("logicalfs_read_block: file addr %" PRIuINUM
+			" offset: %" PRIdOFF " read len: %" PRIuSIZE " - %d",
+			a_fs_file->meta->addr, a_block_num, block_size,
+			lastError);
+		return -1;
+	}
+#endif
+
+	return block_size;
+}
+
+/*
+* Reads data from a logical file.
+*
+* @param a_fs         File system
+* @param a_fs_file    File being read
+* @param a_offset     Starting offset
+* @param a_len        Length to read
+* @param a_buf        Holds bytes read from the file (should have length at least a_len)
+*
+* @return Number of bytes read or -1 on error.
+*/
+ssize_t 
+logicalfs_read(TSK_FS_INFO *a_fs, TSK_FS_FILE *a_fs_file, TSK_DADDR_T a_offset, size_t a_len, char *a_buf) {
+
+	TSK_DADDR_T current_block_num = a_offset / a_fs->block_size;
+	char block_buffer[LOGICAL_BLOCK_SIZE];
+	size_t cnt;
+	char *dest = a_buf;
+	size_t bytes_left = a_len;
+	size_t bytes_read = 0;
+	size_t filler_len = 0;
+
+	if ((a_fs == NULL) || (a_fs_file == NULL) || (a_fs_file->meta == NULL)) {
+		tsk_error_reset();
+		tsk_error_set_errno(TSK_ERR_FS_ARG);
+		tsk_error_set_errstr("logicalfs_read: Called with null arguments");
+		return -1;
+	}
+
+	if (a_offset >= (TSK_DADDR_T)a_fs_file->meta->size) {
+		tsk_error_reset();
+		tsk_error_set_errno(TSK_ERR_FS_ARG);
+		tsk_error_set_errstr("logicalfs_read: Attempted to read offset beyond end of file (file addr: %" 
+			PRIuINUM ", file size: %" PRIdOFF ", offset: %" PRIuDADDR ")", a_fs_file->meta->addr, a_fs_file->meta->size, a_offset);
+		return -1;
+	}
+
+	// Only attempt to read to the end of the file at most
+	if (a_offset + a_len > (TSK_DADDR_T)a_fs_file->meta->size) {
+		bytes_left = a_fs_file->meta->size - a_offset;
+		filler_len = a_offset + a_len - a_fs_file->meta->size;
+
+		// Fill in the end of the buffer
+		if (filler_len > 0) {
+			memset(dest + bytes_left, 0, filler_len);
+		}
+	}
+
+	// Read bytes prior to the first block boundary
+	if (a_offset % a_fs->block_size != 0) {
+		// Read in the smaller of the requested length and the bytes at the end of the block
+		size_t len_to_read = a_fs->block_size - (a_offset % a_fs->block_size);
+		if (len_to_read > bytes_left) {
+			len_to_read = bytes_left;
+		}
+		cnt = logicalfs_read_block(a_fs, a_fs_file, current_block_num, block_buffer);
+		if (cnt != a_fs->block_size) {
+			// Error already set
+			return cnt;
+		}
+		memcpy(dest, block_buffer + (a_offset % a_fs->block_size), len_to_read);
+		dest += len_to_read;
+		bytes_read += len_to_read;
+		bytes_left -= len_to_read;
+		current_block_num++;
+	}
+	// Check if we're done
+	if (bytes_left == 0) {
+		return bytes_read;
+	}
+
+	// Read complete blocks
+	while (bytes_left >= a_fs->block_size) {
+		cnt = logicalfs_read_block(a_fs, a_fs_file, current_block_num, dest);
+		if (cnt != a_fs->block_size) {
+			// Error already set
+			return cnt;
+		}
+		dest += a_fs->block_size;
+		bytes_read += a_fs->block_size;
+		bytes_left -= a_fs->block_size;
+		current_block_num++;
+	}
+
+	// Check if we're done
+	if (bytes_left == 0) {
+		return bytes_read;
+	}
+
+	// Read the final, incomplete block
+	cnt = logicalfs_read_block(a_fs, a_fs_file, current_block_num, block_buffer);
+	if (cnt != a_fs->block_size) {
+		// Error already set
+		return cnt;
+	}
+	memcpy(dest, block_buffer, bytes_left);
+	dest += bytes_left;
+	bytes_read += bytes_left;
+
+	return bytes_read;
+}
+
+/**
+* Print details about the file system to a file handle.
+*
+* @param fs File system to print details on
+* @param hFile File handle to print text to
+*
+* @returns 1 on error and 0 on success
+*/
+static uint8_t
+logicalfs_fsstat(TSK_FS_INFO * fs, FILE * hFile)
+{
+	LOGICALFS_INFO * dirfs = (LOGICALFS_INFO*)fs;
+	tsk_fprintf(hFile, "FILE SYSTEM INFORMATION\n");
+	tsk_fprintf(hFile, "--------------------------------------------\n");
+
+	tsk_fprintf(hFile, "File System Type: Logical Directory\n");
+	tsk_fprintf(hFile,
+		"Base Directory Path: %" PRIttocTSK "\n",
+		dirfs->base_path);
+	return 0;
+}
+
+static uint8_t
+logicalfs_fscheck(TSK_FS_INFO * /*fs*/, FILE * /*hFile*/)
+{
+	tsk_error_reset();
+	tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC);
+	tsk_error_set_errstr("fscheck not supported for logical file systems");
+	return 1;
+}
+
+/**
+* Print details on a specific file to a file handle.
+*
+* @param fs File system file is located in
+* @param hFile File handle to print text to
+* @param inum Address of file in file system
+* @param numblock The number of blocks in file to force print (can go beyond file size)
+* @param sec_skew Clock skew in seconds to also print times in
+*
+* @returns 1 on error and 0 on success
+*/
+static uint8_t
+logicalfs_istat(TSK_FS_INFO *fs, TSK_FS_ISTAT_FLAG_ENUM flags, FILE * hFile, TSK_INUM_T inum,
+	TSK_DADDR_T numblock, int32_t sec_skew)
+{
+	tsk_error_reset();
+	tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC);
+	tsk_error_set_errstr("istat not supported for logical file systems");
+	return 1;
+}
+
+/* logicalfs_close - close a logical file system */
+static void
+logicalfs_close(TSK_FS_INFO *fs)
+{
+	if (fs != NULL) {
+		fs->tag = 0;
+		tsk_fs_free(fs);
+	}
+}
+
+static uint8_t
+logicalfs_jentry_walk(TSK_FS_INFO * /*info*/, int /*entry*/,
+	TSK_FS_JENTRY_WALK_CB /*cb*/, void * /*fn*/)
+{
+	tsk_error_reset();
+	tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC);
+	tsk_error_set_errstr("Journal support for logical directory is not implemented");
+	return 1;
+}
+
+static uint8_t
+logicalfs_jblk_walk(TSK_FS_INFO * /*info*/, TSK_DADDR_T /*daddr*/,
+	TSK_DADDR_T /*daddrt*/, int /*entry*/, TSK_FS_JBLK_WALK_CB /*cb*/,
+	void * /*fn*/)
+{
+	tsk_error_reset();
+	tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC);
+	tsk_error_set_errstr("Journal support for logical directory is not implemented");
+	return 1;
+}
+
+static uint8_t
+logicalfs_jopen(TSK_FS_INFO * /*info*/, TSK_INUM_T /*inum*/)
+{
+	tsk_error_reset();
+	tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC);
+	tsk_error_set_errstr("Journal support for logical directory is not implemented");
+	return 1;
+}
+
+int
+logicalfs_name_cmp(TSK_FS_INFO * a_fs_info, const char *s1, const char *s2)
+{
+#ifdef TSK_WIN32
+	return strcasecmp(s1, s2);
+#else
+	return tsk_fs_unix_name_cmp(a_fs_info, s1, s2);
+#endif
+}
+
+TSK_FS_INFO *
+logical_fs_open(TSK_IMG_INFO * img_info) {
+
+	LOGICALFS_INFO *logical_fs_info = NULL;
+	TSK_FS_INFO *fs = NULL;
+	IMG_LOGICAL_INFO *logical_img_info = NULL;
+
+#ifndef TSK_WIN32
+	tsk_error_reset();
+	tsk_error_set_errno(TSK_ERR_FS_ARG);
+	tsk_error_set_errstr("logical_fs_open: logical file systems currently only enabled on Windows");
+	return NULL;
+#endif
+
+	if (img_info->itype != TSK_IMG_TYPE_LOGICAL) {
+		tsk_error_reset();
+		tsk_error_set_errno(TSK_ERR_FS_ARG);
+		tsk_error_set_errstr("logical_fs_open: image must be of type TSK_IMG_TYPE_DIR");
+		return NULL;
+	}
+	logical_img_info = (IMG_LOGICAL_INFO *)img_info;
+
+	if ((logical_fs_info = (LOGICALFS_INFO *)tsk_fs_malloc(sizeof(LOGICALFS_INFO))) == NULL)
+		return NULL;
+
+	fs = &(logical_fs_info->fs_info);
+	logical_fs_info->base_path = logical_img_info->base_path; // To avoid having to always go through TSK_IMG_INFO
+
+	fs->tag = TSK_FS_INFO_TAG;
+	fs->ftype = TSK_FS_TYPE_LOGICAL;
+	fs->flags = (TSK_FS_INFO_FLAG_ENUM)0;
+	fs->img_info = img_info;
+	fs->offset = 0;
+	fs->endian = TSK_LIT_ENDIAN;
+	fs->duname = "None";
+
+	// Metadata info
+	fs->last_inum = 0; // Will set at the end
+	fs->root_inum = LOGICAL_ROOT_INUM;
+	fs->first_inum = LOGICAL_ROOT_INUM;
+	fs->inum_count = 0;
+
+	// Block info
+	fs->dev_bsize = 0;
+	fs->block_size = LOGICAL_BLOCK_SIZE;
+	fs->block_pre_size = 0;
+	fs->block_post_size = 0;
+	fs->block_count = 0;
+	fs->first_block = 0;
+	fs->last_block = INT64_MAX;
+	fs->last_block_act = INT64_MAX;
+
+	// Set the generic function pointers. Most will be no-ops for now.
+	fs->inode_walk = logicalfs_inode_walk;
+	fs->block_walk = logicalfs_block_walk;
+	fs->block_getflags = logicalfs_block_getflags;
+
+	fs->get_default_attr_type = logicalfs_get_default_attr_type;
+	fs->load_attrs = logicalfs_load_attrs;
+
+	fs->file_add_meta = logicalfs_file_add_meta;
+	fs->dir_open_meta = logicalfs_dir_open_meta;
+	fs->fsstat = logicalfs_fsstat;
+	fs->fscheck = logicalfs_fscheck;
+	fs->istat = logicalfs_istat;
+	fs->name_cmp = logicalfs_name_cmp;
+
+	fs->close = logicalfs_close;
+
+	// Journal functions - also no-ops.
+	fs->jblk_walk = logicalfs_jblk_walk;
+	fs->jentry_walk = logicalfs_jentry_walk;
+	fs->jopen = logicalfs_jopen;
+
+	// Calculate the last inum
+	fs->last_inum = find_max_inum(logical_fs_info);
+
+	// We don't really care about the last inum, but if traversing the 
+	// folders to calculate it fails then we're going to encounter
+	// the same error when using the logical file system.
+	if (fs->last_inum == LOGICAL_INVALID_INUM) {
+		logicalfs_close(fs);
+		return NULL;
+	}
+
+	return fs;
+}
diff --git a/tsk/fs/nofs_misc.c b/tsk/fs/nofs_misc.c
index 0829d1a7dd879e5a5ce0538892510fd3c4151f10..5e24dc59183488e1ef43323fbe959782d76c381a 100644
--- a/tsk/fs/nofs_misc.c
+++ b/tsk/fs/nofs_misc.c
@@ -215,7 +215,7 @@ tsk_fs_nofs_istat(TSK_FS_INFO * a_fs, TSK_FS_ISTAT_FLAG_ENUM istat_flags, FILE *
  */
 TSK_RETVAL_ENUM
 tsk_fs_nofs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir,
-    TSK_INUM_T a_addr)
+    TSK_INUM_T a_addr, int recursion_depth)
 {
     tsk_error_reset();
     tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC);
diff --git a/tsk/fs/ntfs.c b/tsk/fs/ntfs.c
old mode 100755
new mode 100644
index 1761856acb6dd37498da081a945f67e0ecfd7583..fe720fd5cb71a34246e80d91fa0a99eae38ebf10
--- a/tsk/fs/ntfs.c
+++ b/tsk/fs/ntfs.c
@@ -379,6 +379,9 @@ ntfs_dinode_lookup(NTFS_INFO * a_ntfs, char *a_buf, TSK_INUM_T a_mftnum)
     uint16_t upd_off = tsk_getu16(fs->endian, mft->upd_off);
 
     // Make sure upd_cnt > 0 to prevent an integer wrap around.
+    // NOTE: There is a bug here because upd_cnt can be for unused entries.
+    // They are now skipped (as of July 2021). We shoudl refactor this code
+    // to allow upd_cnt = 0. 
     if ((upd_cnt == 0) || (upd_cnt > (((a_ntfs->mft_rsize_b) / 2) + 1))) {
         tsk_error_reset();
         tsk_error_set_errno(TSK_ERR_FS_INODE_COR);
@@ -571,6 +574,7 @@ is_clustalloc(NTFS_INFO * ntfs, TSK_DADDR_T addr)
  * @param ntfs File system that attribute is located in.
  * @param start_vcn The starting VCN for this run.
  * @param runlist The raw runlist data from the MFT entry.
+ * @param runlist_size The size of the raw runlist data from the MFT entry.
  * @param a_data_run_head [out] Pointer to pointer of run that is created. (NULL on error and for $BadClust - special case because it is a sparse file for the entire FS).
  * @param totlen [out] Pointer to location where total length of run (in bytes) can be returned (or NULL)
  * @param mnum MFT entry address
@@ -579,7 +583,7 @@ is_clustalloc(NTFS_INFO * ntfs, TSK_DADDR_T addr)
  */
 static TSK_RETVAL_ENUM
 ntfs_make_data_run(NTFS_INFO * ntfs, TSK_OFF_T start_vcn,
-    ntfs_runlist * runlist_head, TSK_FS_ATTR_RUN ** a_data_run_head,
+    ntfs_runlist * runlist_head, uint32_t runlist_size, TSK_FS_ATTR_RUN ** a_data_run_head,
     TSK_OFF_T * totlen, TSK_INUM_T mnum)
 {
     TSK_FS_INFO *fs = (TSK_FS_INFO *) ntfs;
@@ -588,6 +592,7 @@ ntfs_make_data_run(NTFS_INFO * ntfs, TSK_OFF_T start_vcn,
     unsigned int i, idx;
     TSK_DADDR_T prev_addr = 0;
     TSK_OFF_T file_offset = start_vcn;
+    uint32_t runlist_offset = 0;
 
     run = runlist_head;
     *a_data_run_head = NULL;
@@ -596,11 +601,15 @@ ntfs_make_data_run(NTFS_INFO * ntfs, TSK_OFF_T start_vcn,
     if (totlen)
         *totlen = 0;
 
+    if (runlist_size < 1) {
+        return TSK_ERR;
+    }
+
     /* Cycle through each run in the runlist
      * We go until we find an entry with no length
      * An entry with offset of 0 is for a sparse run
      */
-    while (NTFS_RUNL_LENSZ(run) != 0) {
+    while ((runlist_offset < runlist_size) && NTFS_RUNL_LENSZ(run) != 0) {
         int64_t addr_offset = 0;
 
         /* allocate a new tsk_fs_attr_run */
@@ -627,7 +636,7 @@ ntfs_make_data_run(NTFS_INFO * ntfs, TSK_OFF_T start_vcn,
          * A length of more than eight bytes will not fit in the
          * 64-bit length field (and is likely corrupt)
          */
-        if (NTFS_RUNL_LENSZ(run) > 8) {
+        if (NTFS_RUNL_LENSZ(run) > 8 || NTFS_RUNL_LENSZ(run) > runlist_size - runlist_offset - 1) {
             tsk_error_reset();
             tsk_error_set_errno(TSK_ERR_FS_INODE_COR);
             tsk_error_set_errstr
@@ -755,8 +764,14 @@ ntfs_make_data_run(NTFS_INFO * ntfs, TSK_OFF_T start_vcn,
         }
 
         /* Advance run */
-        run = (ntfs_runlist *) ((uintptr_t) run + (1 + NTFS_RUNL_LENSZ(run)
-                + NTFS_RUNL_OFFSZ(run)));
+        uint32_t run_size = 1 + NTFS_RUNL_LENSZ(run) + NTFS_RUNL_OFFSZ(run);
+        run = (ntfs_runlist *) ((uintptr_t) run + run_size);
+
+        // Abritrary limit runlist_offset at INT32_MAX ((1 << 31) - 1)
+        if (run_size > (((uint32_t) 1UL << 31 ) -1) - runlist_offset) {
+            return TSK_ERR;
+        }
+        runlist_offset += run_size;
     }
 
     /* special case for $BADCLUST, which is a sparse file whose size is
@@ -851,7 +866,16 @@ static int
 ntfs_uncompress_setup(TSK_FS_INFO * fs, NTFS_COMP_INFO * comp,
     uint32_t compunit_size_c)
 {
+    if (fs->block_size == 0 || compunit_size_c == 0) {
+        return 1;
+    }
     comp->buf_size_b = fs->block_size * compunit_size_c;
+
+    // Detect an integer overflow e.g. 65536 * 65536
+    if (comp->buf_size_b < fs->block_size) {
+        return 1;
+    }
+
     if ((comp->uncomp_buf = tsk_malloc(comp->buf_size_b)) == NULL) {
         comp->buf_size_b = 0;
         return 1;
@@ -891,6 +915,7 @@ static uint8_t
 ntfs_uncompress_compunit(NTFS_COMP_INFO * comp)
 {
     size_t cl_index;
+    uint8_t recover_data = 0;
 
     tsk_error_reset();
 
@@ -931,14 +956,15 @@ ntfs_uncompress_compunit(NTFS_COMP_INFO * comp)
 
         blk_end = cl_index + blk_size;
         if (blk_end > comp->comp_len) {
-            tsk_error_set_errno(TSK_ERR_FS_FWALK);
-            tsk_error_set_errstr
-                ("ntfs_uncompress_compunit: Compression block length longer than buffer length: %"
-                PRIuSIZE "", blk_end);
-            return 1;
+            blk_end = comp->comp_len - 1;
+            if (tsk_verbose)
+                tsk_fprintf(stderr,
+                    "WARNING: ntfs_uncompress_compunit: Compression block length longer than buffer length. Attempting to continue.\n");
+            recover_data = 1;
+           // return 0; // zero out the entire block
+           // if we don't return 0, let the function continue to display as much decompressed data as possible
         }
 
-
         /* The MSB identifies if the block is compressed */
         iscomp = ((sb_header & 0x8000) != 0);
 
@@ -1117,7 +1143,10 @@ ntfs_uncompress_compunit(NTFS_COMP_INFO * comp)
             }
         }
     }                           // end of loop inside of compression unit
-
+    // if we are attempting to recover, we may not have decompressed an entire CU. Set uncomp_idx to the expected size.
+    if (recover_data) {
+        comp->uncomp_idx = comp->buf_size_b;
+    }
     return 0;
 }
 
@@ -1214,6 +1243,14 @@ ntfs_proc_compunit(NTFS_INFO * ntfs, NTFS_COMP_INFO * comp,
         for (a = 0; a < comp_unit_size; a++) {
             ssize_t cnt;
 
+            // Prevent an OOB write of comp->uncomp_buf
+            if ((comp->uncomp_idx >= comp->buf_size_b) || (fs->block_size > comp->buf_size_b - comp->uncomp_idx)) {
+                tsk_error_reset();
+                tsk_error_set_errno(TSK_ERR_FS_READ);
+                tsk_error_set_errstr("ntfs_proc_compunit: Buffer not big enough for uncompressed data (Index: %"PRIuSIZE ")", comp->uncomp_idx);
+                return 1;
+            }
+
             cnt =
                 tsk_fs_read_block(fs, comp_unit[a],
                 &comp->uncomp_buf[comp->uncomp_idx], fs->block_size);
@@ -1279,6 +1316,8 @@ ntfs_attr_walk_special(const TSK_FS_ATTR * fs_attr,
         TSK_OFF_T off = 0;
         int retval;
         uint8_t stop_loop = 0;
+        uint8_t init_size_reached = 0;
+        uint8_t has_init_size = 0;
 
         if (fs_attr->nrd.compsize <= 0) {
             tsk_error_set_errno(TSK_ERR_FS_FWALK);
@@ -1301,6 +1340,9 @@ ntfs_attr_walk_special(const TSK_FS_ATTR * fs_attr,
             return 1;
         }
         retval = TSK_WALK_CONT;
+        
+        if (fs_attr->nrd.initsize != fs_attr->fs_file->meta->size)
+            has_init_size = 1;
 
         /* cycle through the number of runs we have */
         for (fs_attr_run = fs_attr->nrd.run; fs_attr_run;
@@ -1398,19 +1440,36 @@ ntfs_attr_walk_special(const TSK_FS_ATTR * fs_attr,
                         tsk_fprintf(stderr,
                             "ntfs_proc_compunit: Decompressing at file offset %"PRIdOFF"\n", off);
 
-                    // decompress the unit
-                    if (ntfs_proc_compunit(ntfs, &comp, comp_unit,
+                    // decompress the unit if we have not passed initsize yet.
+                    if (!init_size_reached) {
+                        if (ntfs_proc_compunit(ntfs, &comp, comp_unit,
                             comp_unit_idx)) {
-                        tsk_error_set_errstr2("%" PRIuINUM " - type: %"
-                            PRIu32 "  id: %d Status: %s",
-                            fs_attr->fs_file->meta->addr, fs_attr->type,
-                            fs_attr->id,
-                            (fs_attr->fs_file->meta->
-                                flags & TSK_FS_META_FLAG_ALLOC) ?
-                            "Allocated" : "Deleted");
-                        free(comp_unit);
-                        ntfs_uncompress_done(&comp);
-                        return 1;
+                            tsk_error_set_errstr2("%" PRIuINUM " - type: %"
+                                PRIu32 "  id: %d Status: %s",
+                                fs_attr->fs_file->meta->addr, fs_attr->type,
+                                fs_attr->id,
+                                (fs_attr->fs_file->meta->
+                                    flags & TSK_FS_META_FLAG_ALLOC) ?
+                                "Allocated" : "Deleted");
+                            free(comp_unit);
+                            ntfs_uncompress_done(&comp);
+                            return 1;
+                        }
+
+                        /* if we've passed the initialized size while reading this block, 
+                         * zero out the buffer beyond the initialized size. */
+                        if (has_init_size && (off < fs_attr->nrd.initsize)) {
+                            const int64_t prev_remanining_init_size = fs_attr->nrd.initsize - off;
+                            if (prev_remanining_init_size < (int64_t)comp.buf_size_b) {
+                                memset(&comp.uncomp_buf[prev_remanining_init_size], 0, comp.buf_size_b - prev_remanining_init_size);
+                                init_size_reached = 1;
+                            }
+                        }
+                    }
+                    // set the buffers to 0s if we are past initsize
+                    else {
+                        ntfs_uncompress_reset(&comp);
+                        comp.uncomp_idx = comp.buf_size_b;
                     }
 
                     // now call the callback with the uncompressed data
@@ -1549,6 +1608,8 @@ ntfs_file_read_special(const TSK_FS_ATTR * a_fs_attr,
         uint32_t comp_unit_idx = 0;
         NTFS_COMP_INFO comp;
         size_t buf_idx = 0;
+        uint8_t init_size_reached = 0;
+        uint8_t has_init_size = 0;
 
         if (a_fs_attr->nrd.compsize <= 0) {
             tsk_error_set_errno(TSK_ERR_FS_FWALK);
@@ -1583,6 +1644,9 @@ ntfs_file_read_special(const TSK_FS_ATTR * a_fs_attr,
             return len;
         }
 
+        if (a_fs_attr->nrd.initsize	!= a_fs_attr->fs_file->meta->size)
+            has_init_size = 1;
+
         /* Allocate the buffers and state structure */
         if (ntfs_uncompress_setup(fs, &comp, a_fs_attr->nrd.compsize)) {
             return -1;
@@ -1642,19 +1706,36 @@ ntfs_file_read_special(const TSK_FS_ATTR * a_fs_attr,
                         && (data_run_cur->next == NULL))) {
                     size_t cpylen;
 
-                    // decompress the unit
-                    if (ntfs_proc_compunit(ntfs, &comp, comp_unit,
+                    // decompress the unit if we are still in initsize
+                    if (!init_size_reached) {
+                        if (ntfs_proc_compunit(ntfs, &comp, comp_unit,
                             comp_unit_idx)) {
-                        tsk_error_set_errstr2("%" PRIuINUM " - type: %"
-                            PRIu32 "  id: %d  Status: %s",
-                            a_fs_attr->fs_file->meta->addr,
-                            a_fs_attr->type, a_fs_attr->id,
-                            (a_fs_attr->fs_file->meta->
-                                flags & TSK_FS_META_FLAG_ALLOC) ?
-                            "Allocated" : "Deleted");
-                        free(comp_unit);
-                        ntfs_uncompress_done(&comp);
-                        return -1;
+                            tsk_error_set_errstr2("%" PRIuINUM " - type: %"
+                                PRIu32 "  id: %d  Status: %s",
+                                a_fs_attr->fs_file->meta->addr,
+                                a_fs_attr->type, a_fs_attr->id,
+                                (a_fs_attr->fs_file->meta->
+                                    flags & TSK_FS_META_FLAG_ALLOC) ?
+                                "Allocated" : "Deleted");
+                            free(comp_unit);
+                            ntfs_uncompress_done(&comp);
+                            return -1;
+                        }
+
+                        /* if we've passed the initialized size while reading this block, 
+                         * zero out the buffer beyond the initialized size
+                         */
+                        if (has_init_size) {
+                            const int64_t remanining_init_size = a_fs_attr->nrd.initsize - buf_idx - a_offset;
+                            if (remanining_init_size < (int64_t)comp.buf_size_b) {
+                                memset(comp.uncomp_buf + remanining_init_size, 0, comp.buf_size_b - remanining_init_size);
+                                init_size_reached = 1;
+                            }
+                        }
+                    }
+                    else {
+                        ntfs_uncompress_reset(&comp);
+                        comp.uncomp_idx = comp.buf_size_b;
                     }
 
                     // copy uncompressed data to the output buffer
@@ -1685,6 +1766,7 @@ ntfs_file_read_special(const TSK_FS_ATTR * a_fs_attr,
                     byteoffset = 0;
                     buf_idx += cpylen;
                     comp_unit_idx = 0;
+
                 }
                 /* If it is a sparse run, don't increment the addr so that
                  * it remains 0 */
@@ -1978,19 +2060,23 @@ ntfs_proc_attrseq(NTFS_INFO * ntfs,
                 return TSK_COR;
             }
 
+            uint32_t attr_len = tsk_getu32(fs->endian, attr->len);
+            uint64_t run_start_vcn = tsk_getu64(fs->endian, attr->c.nr.start_vcn);
+            uint16_t run_off = tsk_getu16(fs->endian, attr->c.nr.run_off);
+
             // sanity check
-            if (tsk_getu16(fs->endian, attr->c.nr.run_off) > tsk_getu32(fs->endian, attr->len)) {
+            if ((run_off < 48) || (run_off >= attr_len)) {
                 if (tsk_verbose)
-                    tsk_fprintf(stderr, "ntfs_proc_attrseq: run offset too big\n");
+                    tsk_fprintf(stderr, "ntfs_proc_attrseq: run offset out of bounds\n");
                 break;
             }
 
             /* convert the run to generic form */
             retval = ntfs_make_data_run(ntfs,
-                tsk_getu64(fs->endian, attr->c.nr.start_vcn),
-                (ntfs_runlist *) ((uintptr_t)
-                    attr + tsk_getu16(fs->endian,
-                        attr->c.nr.run_off)), &fs_attr_run, NULL,
+                run_start_vcn,
+                (ntfs_runlist *) ((uintptr_t) attr + run_off),
+                attr_len - run_off,
+                &fs_attr_run, NULL,
                 a_attrinum);
             if (retval != TSK_OK) {
                 tsk_error_errstr2_concat(" - proc_attrseq");
@@ -2248,10 +2334,9 @@ ntfs_proc_attrseq(NTFS_INFO * ntfs,
 
         /* File Name (always resident) */
         else if (type == NTFS_ATYPE_FNAME) {
-            ntfs_attr_fname *fname;
-            TSK_FS_META_NAME_LIST *fs_name;
-            UTF16 *name16;
-            UTF8 *name8;
+            uint32_t attr_len = tsk_getu32(fs->endian, attr->len);
+            uint16_t attr_off = tsk_getu16(fs->endian, attr->c.r.soff);
+
             if (attr->res != NTFS_MFT_RES) {
                 tsk_error_reset();
                 tsk_error_set_errno(TSK_ERR_FS_INODE_COR);
@@ -2259,9 +2344,22 @@ ntfs_proc_attrseq(NTFS_INFO * ntfs,
                     ("proc_attr_seq: File Name Attribute is not resident!");
                 return TSK_COR;
             }
-            fname =
-                (ntfs_attr_fname *) ((uintptr_t) attr +
-                tsk_getu16(fs->endian, attr->c.r.soff));
+            if ((attr_off < 16) || (attr_off >= attr_len)) {
+                tsk_error_reset();
+                tsk_error_set_errno(TSK_ERR_FS_INODE_COR);
+                tsk_error_set_errstr
+                    ("proc_attrseq: resident data offset of File Name Attribute is out of bounds!");
+                return TSK_COR;
+            }
+            // A File Name Attribute should be at least 66 bytes in size
+            if ((attr_len < 66) || (attr_off > attr_len - 66)) {
+                tsk_error_reset();
+                tsk_error_set_errno(TSK_ERR_FS_INODE_COR);
+                tsk_error_set_errstr
+                    ("proc_attrseq: resident data of File Name Attribute is too small!");
+                return TSK_COR;
+            }
+            ntfs_attr_fname *fname = (ntfs_attr_fname *) ((uintptr_t) attr + attr_off);
             if (fname->nspace == NTFS_FNAME_DOS) {
                 continue;
             }
@@ -2288,6 +2386,7 @@ ntfs_proc_attrseq(NTFS_INFO * ntfs,
 
             fs_file->meta->time2.ntfs.fn_id = id;
 
+            TSK_FS_META_NAME_LIST *fs_name;
 
             /* Seek to the end of the fs_name structures in TSK_FS_META */
             if (fs_file->meta->name2) {
@@ -2314,9 +2413,16 @@ ntfs_proc_attrseq(NTFS_INFO * ntfs,
                 }
                 fs_name->next = NULL;
             }
+            if (fname->nlen > attr_len - 66) {
+                tsk_error_reset();
+                tsk_error_set_errno(TSK_ERR_FS_INODE_COR);
+                tsk_error_set_errstr
+                    ("proc_attrseq: invalid name value size out of bounds!");
+                return TSK_COR;
+            }
+            UTF16 *name16 = (UTF16 *) & fname->name;
+            UTF8 *name8 = (UTF8 *) fs_name->name;
 
-            name16 = (UTF16 *) & fname->name;
-            name8 = (UTF8 *) fs_name->name;
             retVal =
                 tsk_UTF16toUTF8(fs->endian, (const UTF16 **) &name16,
                 (UTF16 *) ((uintptr_t) name16 +
@@ -3157,23 +3263,26 @@ ntfs_load_bmap(NTFS_INFO * ntfs)
         tsk_getu16(fs->endian, mft->attr_off));
     data_attr = NULL;
 
+    uint32_t attr_len = 0;
+    uint32_t attr_type = 0;
+
     /* cycle through them */
     while ((uintptr_t) attr + sizeof (ntfs_attr) <=
             ((uintptr_t) mft + (uintptr_t) ntfs->mft_rsize_b)) {
 
-        if ((tsk_getu32(fs->endian, attr->len) == 0) ||
-            (tsk_getu32(fs->endian, attr->type) == 0xffffffff)) {
+        attr_len = tsk_getu32(fs->endian, attr->len);
+        attr_type = tsk_getu32(fs->endian, attr->type);
+
+        if ((attr_len == 0) || (attr_type == 0xffffffff)) {
             break;
         }
 
-        if (tsk_getu32(fs->endian, attr->type) == NTFS_ATYPE_DATA) {
+        if (attr_type == NTFS_ATYPE_DATA) {
             data_attr = attr;
             break;
         }
 
-        attr =
-            (ntfs_attr *) ((uintptr_t) attr + tsk_getu32(fs->endian,
-                attr->len));
+        attr = (ntfs_attr *) ((uintptr_t) attr + attr_len);
     }
 
     /* did we get it? */
@@ -3183,18 +3292,28 @@ ntfs_load_bmap(NTFS_INFO * ntfs)
         tsk_error_set_errstr("Error Finding Bitmap Data Attribute");
         goto on_error;
     }
-    uint32_t attr_len = tsk_getu32(fs->endian, data_attr->len);
+    attr_len = tsk_getu32(fs->endian, data_attr->len);
     if (attr_len > ntfs->mft_rsize_b) {
         goto on_error;
     }
 
-    /* convert to generic form */
+    uint64_t run_start_vcn = tsk_getu64(fs->endian, data_attr->c.nr.start_vcn);
+    uint16_t run_off = tsk_getu16(fs->endian, data_attr->c.nr.run_off);
+
+    if ((run_off < 48) ||
+        (run_off >= attr_len) ||
+        ((uintptr_t) data_attr + run_off) > ((uintptr_t) mft + (uintptr_t) ntfs->mft_rsize_b)) {
+        tsk_error_reset();
+        tsk_error_set_errno(TSK_ERR_FS_INODE_COR);
+        tsk_error_set_errstr("Invalid run_off of Bitmap Data Attribute - value out of bounds");
+        goto on_error;
+    }
+    /* convert data run to generic form */
     if ((ntfs_make_data_run(ntfs,
-                tsk_getu64(fs->endian, data_attr->c.nr.start_vcn),
-                (ntfs_runlist
-                    *) ((uintptr_t) data_attr + tsk_getu16(fs->endian,
-                        data_attr->c.nr.run_off)), &(ntfs->bmap),
-                NULL, NTFS_MFT_BMAP)) != TSK_OK) {
+                run_start_vcn,
+                (ntfs_runlist *) ((uintptr_t) data_attr + run_off),
+                attr_len - run_off,
+                &(ntfs->bmap), NULL, NTFS_MFT_BMAP)) != TSK_OK) {
         goto on_error;
     }
     ntfs->bmap_buf = (char *) tsk_malloc(fs->block_size);
@@ -3424,7 +3543,6 @@ ntfs_get_sds(TSK_FS_INFO * fs, uint32_t secid)
     uint64_t sii_sds_file_off = 0;
     uint32_t sii_sds_ent_size = 0;
 
-
     if ((fs == NULL) || (secid == 0)) {
         tsk_error_reset();
         tsk_error_set_errno(TSK_ERR_FS_ARG);
@@ -3432,7 +3550,6 @@ ntfs_get_sds(TSK_FS_INFO * fs, uint32_t secid)
         return NULL;
     }
 
-
     // Loop through all the SII entries looking for the security id matching that found in the file.
     // This lookup is obviously O(n^2) for all n files. However, since so many files have the exact
     // same security identifier, it is not really that bad. In reality, 100,000 files may only map to
@@ -3440,79 +3557,68 @@ ntfs_get_sds(TSK_FS_INFO * fs, uint32_t secid)
     // increase incrementally, we could go directly to the entry in question ((secid * 0x28) + 256).
     // SII entries started at 256 on Vista; however, I did not look at the starting secid for other
     // versions of NTFS.
-    for (i = 0; i < ntfs->sii_data.used; i++) {
-        if (tsk_getu32(fs->endian,
-                ((ntfs_attr_sii *) (ntfs->sii_data.buffer))[i].
-                key_sec_id) == secid) {
-            sii = &((ntfs_attr_sii *) (ntfs->sii_data.buffer))[i];
-            break;
-        }
-    }
-
-    if (sii == NULL) {
-        tsk_error_reset();
-        tsk_error_set_errno(TSK_ERR_FS_GENFS);
-        tsk_error_set_errstr("ntfs_get_sds: SII entry not found (%" PRIu32
-            ")", secid);
-        return NULL;
-    }
-
-    sii_secid = tsk_getu32(fs->endian, sii->key_sec_id);
-    sii_sechash = tsk_getu32(fs->endian, sii->data_hash_sec_desc);
-    sii_sds_file_off = tsk_getu64(fs->endian, sii->sec_desc_off);
-    sii_sds_ent_size = tsk_getu32(fs->endian, sii->sec_desc_size);
-
-    // Check that we do not go out of bounds.
-    if (sii_sds_file_off > ntfs->sds_data.size) {
-        tsk_error_reset();
-        tsk_error_set_errno(TSK_ERR_FS_GENFS);
-        tsk_error_set_errstr("ntfs_get_sds: SII offset too large (%" PRIu64
-            ")", sii_sds_file_off);
-        return NULL;
-    }
-    else if (!sii_sds_ent_size) {
-        tsk_error_reset();
-        tsk_error_set_errno(TSK_ERR_FS_GENFS);
-        tsk_error_set_errstr("ntfs_get_sds: SII entry size is invalid (%"
-            PRIu32 ")", sii_sds_ent_size);
-        return NULL;
-    }
-
-    sds =
-        (ntfs_attr_sds *) ((uint8_t *) ntfs->sds_data.buffer +
-        sii_sds_file_off);
-    sds_secid = tsk_getu32(fs->endian, sds->sec_id);
-    sds_sechash = tsk_getu32(fs->endian, sds->hash_sec_desc);
-    sds_file_off = tsk_getu64(fs->endian, sds->file_off);
-    //sds_ent_size = tsk_getu32(fs->endian, sds->ent_size);
-
-    // Sanity check to make sure the $SII entry points to
-    // the correct $SDS entry.
-    if ((sds_secid == sii_secid) &&
-        (sds_sechash == sii_sechash) && (sds_file_off == sii_sds_file_off)
-        //&& (sds_ent_size == sii_sds_ent_size)
-        ) {
-        return sds;
-    }
-    else {
-        if (tsk_verbose)
-            tsk_fprintf(stderr,
-                "ntfs_get_sds: entry found was for wrong Security ID (%"
-                PRIu32 " vs %" PRIu32 ")\n", sds_secid, sii_secid);
-
-//        if (sii_secid != 0) {
-
-        // There is obviously a mismatch between the information in the SII entry and that in the SDS entry.
-        // After looking at these mismatches, it appears there is not a pattern. Perhaps some entries have been reused.
-
-        //printf("\nsecid %d hash %x offset %I64x size %x\n", sii_secid, sii_sechash, sii_sds_file_off, sii_sds_ent_size);
-        //printf("secid %d hash %x offset %I64x size %x\n", sds_secid, sds_sechash, sds_file_off, sds_ent_size);
-        //      }
-    }
-
-    tsk_error_reset();
-    tsk_error_set_errno(TSK_ERR_FS_GENFS);
-    tsk_error_set_errstr("ntfs_get_sds: Got to end w/out data");
+	//
+	// It appears that the file format may have changed since this was first written. There now appear to
+	// be multiple entries for each security ID. Some may no longer be valid, so we loop over all of them
+	// until we find one that looks valid.
+	for (i = 0; i < ntfs->sii_data.used; i++) {
+		if (! (tsk_getu32(fs->endian,
+			((ntfs_attr_sii *)(ntfs->sii_data.buffer))[i].key_sec_id) == secid)) {
+			continue;
+		}
+
+		// We found a potentially good SII entry
+		sii = &((ntfs_attr_sii *)(ntfs->sii_data.buffer))[i];
+		sii_secid = tsk_getu32(fs->endian, sii->key_sec_id);
+		sii_sechash = tsk_getu32(fs->endian, sii->data_hash_sec_desc);
+		sii_sds_file_off = tsk_getu64(fs->endian, sii->sec_desc_off);
+		sii_sds_ent_size = tsk_getu32(fs->endian, sii->sec_desc_size);
+
+		// Check that we do not go out of bounds.
+		if (sii_sds_file_off > ntfs->sds_data.size) {
+			tsk_error_reset();
+			tsk_error_set_errno(TSK_ERR_FS_GENFS);
+			tsk_error_set_errstr("ntfs_get_sds: SII offset too large (%" PRIu64
+				")", sii_sds_file_off);
+			continue;
+		}
+		else if (!sii_sds_ent_size) {
+			tsk_error_reset();
+			tsk_error_set_errno(TSK_ERR_FS_GENFS);
+			tsk_error_set_errstr("ntfs_get_sds: SII entry size is invalid (%"
+				PRIu32 ")", sii_sds_ent_size);
+			continue;
+		}
+
+		sds =
+			(ntfs_attr_sds *)((uint8_t *)ntfs->sds_data.buffer +
+				sii_sds_file_off);
+		sds_secid = tsk_getu32(fs->endian, sds->sec_id);
+		sds_sechash = tsk_getu32(fs->endian, sds->hash_sec_desc);
+		sds_file_off = tsk_getu64(fs->endian, sds->file_off);
+
+		// Sanity check to make sure the $SII entry points to
+		// the correct $SDS entry.
+		if ((sds_secid == sii_secid) &&
+			(sds_sechash == sii_sechash) && (sds_file_off == sii_sds_file_off)
+			//&& (sds_ent_size == sii_sds_ent_size)
+			) {
+			// Clear any previous errors
+			tsk_error_reset();
+			return sds;
+		}
+		tsk_error_reset();
+		tsk_error_set_errno(TSK_ERR_FS_GENFS);
+		tsk_error_set_errstr("ntfs_get_sds: SII entry %" PRIu32 " not found");
+	}
+
+	// If we never even found an SII entry that matched our secid, update the error state.
+	// Otherwise leave it as the last error recorded.
+	if (sii == NULL) {
+		tsk_error_reset();
+		tsk_error_set_errno(TSK_ERR_FS_GENFS);
+		tsk_error_set_errstr("ntfs_get_sds: Got to end w/out data");
+	}
     return NULL;
 }
 #endif
@@ -3545,8 +3651,8 @@ ntfs_file_get_sidstr(TSK_FS_FILE * a_fs_file, char **sid_str)
     if (!a_fs_file->meta->attr) {
         tsk_error_reset();
         tsk_error_set_errno(TSK_ERR_FS_GENFS);
-        tsk_error_set_errstr
-            ("ntfs_file_get_sidstr: file argument has no meta data");
+		tsk_error_set_errstr
+		("ntfs_file_get_sidstr: file argument has no meta data");
         return 1;
     }
 
@@ -3615,7 +3721,7 @@ ntfs_proc_sii(TSK_FS_INFO * fs, NTFS_SXX_BUFFER * sii_buffer)
     for (sii_buffer_offset = 0; sii_buffer_offset < sii_buffer->size;
         sii_buffer_offset += ntfs->idx_rsize_b) {
 
-        uintptr_t idx_buffer_end = 0;
+        uint8_t* idx_buffer_end = 0;
 
         ntfs_idxrec *idxrec =
             (ntfs_idxrec *) & sii_buffer->buffer[sii_buffer_offset];
@@ -3638,35 +3744,57 @@ ntfs_proc_sii(TSK_FS_INFO * fs, NTFS_SXX_BUFFER * sii_buffer)
         }
 
         // get pointer to first record
-        sii =
-            (ntfs_attr_sii *) ((uintptr_t) & idxrec->list +
-            tsk_getu32(fs->endian, idxrec->list.begin_off));
+		uint8_t* sii_data_ptr = ((uint8_t*)& idxrec->list +
+			tsk_getu32(fs->endian, idxrec->list.begin_off));
 
         // where last record ends
-        idx_buffer_end = (uintptr_t) & idxrec->list +
+        idx_buffer_end = (uint8_t*) & idxrec->list +
             tsk_getu32(fs->endian, idxrec->list.bufend_off);
 
 
         // copy records into NTFS_INFO
-        while ((uintptr_t)sii + sizeof(ntfs_attr_sii) <= idx_buffer_end) {
-/*
-			if ((tsk_getu16(fs->endian,sii->size) == 0x14) &&
-				(tsk_getu16(fs->endian,sii->data_off) == 0x14) &&
-				(tsk_getu16(fs->endian,sii->ent_size) == 0x28)
-				)
-			{
-*/
-            /* make sure we don't go over bounds of ntfs->sii_data.buffer */
-            if ((ntfs->sii_data.used + 1) * sizeof(ntfs_attr_sii) > ntfs->sii_data.size) {
-                if (tsk_verbose)
-                    tsk_fprintf(stderr, "ntfs_proc_sii: data buffer too small\n");
-                return; // reached end of ntfs->sii_data.buffer
-            }
+		while (sii_data_ptr + sizeof(ntfs_attr_sii) <= idx_buffer_end) {
+			/* make sure we don't go over bounds of ntfs->sii_data.buffer */
+			if ((ntfs->sii_data.used + 1) * sizeof(ntfs_attr_sii) > ntfs->sii_data.size) {
+				if (tsk_verbose)
+					tsk_fprintf(stderr, "ntfs_proc_sii: data buffer too small\n");
+				return; // reached end of ntfs->sii_data.buffer
+			}
 
-            memcpy(ntfs->sii_data.buffer +
-                (ntfs->sii_data.used * sizeof(ntfs_attr_sii)), sii,
-                sizeof(ntfs_attr_sii));
-            ntfs->sii_data.used++;
+			// It appears that perhaps older versions of NTFS always had entries of length 0x28. Now it appears we also can
+			// have entries of length 0x30. And there are also some entries that take up 0x28 bytes but have their length set to 0x10.
+
+			// 1400140000000000280004000000000002110000f233505302110000a026320000000000ec000000  // Normal entry of length 0x28
+			// 0000000000000000100000000200000003110000a65c02000311000090273200000000005c010000  // Possibly deleted? entry of length 0x28 but reporting length 0x10
+			// 140014000000000030000400010000001d150000abb032671d150000805a3a0000000000e80000006800000000000000  // Entry of length 0x30. Unclear what the eight final bytes are
+			// 00000000000000001800000003001b00540000000000000067110000a0823200000000003c0100005400000000000000  // I think this is the possibly deleted form of a long entry
+			//
+			// I haven't been able to find any documentation of what's going on - it's all old and says the entry length will be 0x28. The flags 
+			// are also different across these three types but I also can't find any documentation on what they mean. So this is a best guess on 
+			// how we should handle things:
+			// - If the length field is 0x30 or the first two fields are null and the length is 0x18, save the entry and advance 0x30 bytes. 
+			//         The last eight bytes on the long entries will be ignored.
+			// - Otherwise save the entry and advance by 0x28 bytes.
+			//
+			sii = (ntfs_attr_sii*)sii_data_ptr;
+			int data_off = tsk_getu16(fs->endian, sii->data_off);
+			int data_size = tsk_getu16(fs->endian, sii->size);
+			int ent_size = tsk_getu16(fs->endian, sii->ent_size);
+
+			// Copy the entry. It seems like we could have a check here that the first two fields are 0x14
+			// but we don't know for sure that not having those indicates an invalid entry.
+			memcpy(ntfs->sii_data.buffer +
+				(ntfs->sii_data.used * sizeof(ntfs_attr_sii)), sii_data_ptr,
+				sizeof(ntfs_attr_sii));
+			ntfs->sii_data.used++;
+
+			// Advance the pointer
+			if (ent_size == 0x30 || (data_off == 0 && data_size == 0 && ent_size == 0x18)) {
+				sii_data_ptr += 0x30;
+			}
+			else {
+				sii_data_ptr += 0x28;
+			}
 
 /*
 				printf("Security id %d is at offset 0x%I64x for 0x%x bytes\n", tsk_getu32(fs->endian,sii->key_sec_id),
@@ -3683,7 +3811,6 @@ ntfs_proc_sii(TSK_FS_INFO * fs, NTFS_SXX_BUFFER * sii_buffer)
 																		   tsk_getu32(fs->endian,sii->sec_desc_size));
 			}
 */
-            sii++;
         }
     }
 }
@@ -5040,6 +5167,41 @@ ntfs_close(TSK_FS_INFO * fs)
     tsk_fs_free(fs);
 }
 
+/**
+ * Check if the boot format matches that produced in KAPE VHDs
+ * that are missing the 0x55AA marker.
+ * Will also set the endianness.
+ *
+ * @param ntfs_info File system info
+ * @returns 0 if format appeares valid, 1 otherwise
+ */
+static int
+process_kape_boot_format(NTFS_INFO* ntfs_info) {
+
+    // Check that we have a VHD
+    if (ntfs_info->fs_info.img_info->itype != TSK_IMG_TYPE_VHD_VHD) {
+        return 1;
+    }
+
+    // Check that expected name is present
+    if (strncmp(ntfs_info->fs->oemname, "NTFS    ", 8) != 0) {
+        return 1;
+    }
+
+    // Check endianness using the sector size
+    uint16_t ssize = tsk_getu16(TSK_LIT_ENDIAN, ntfs_info->fs->ssize);
+    if ((ssize != 0) && (ssize % 512 == 0)) {
+        ntfs_info->fs_info.endian = TSK_LIT_ENDIAN;
+        return 0;
+    }
+    ssize = tsk_getu16(TSK_BIG_ENDIAN, ntfs_info->fs->ssize);
+    if ((ssize != 0) && (ssize % 512 == 0)) {
+        ntfs_info->fs_info.endian = TSK_BIG_ENDIAN;
+        return 0;
+    }
+
+    return 1;
+}
 
 /**
  * Open part of a disk image as an NTFS file system.
@@ -5113,12 +5275,14 @@ ntfs_open(TSK_IMG_INFO * img_info, TSK_OFF_T offset,
 
     /* Check the magic value */
     if (tsk_fs_guessu16(fs, ntfs->fs->magic, NTFS_FS_MAGIC)) {
-        tsk_error_reset();
-        tsk_error_set_errno(TSK_ERR_FS_MAGIC);
-        tsk_error_set_errstr("Not a NTFS file system (magic)");
-        if (tsk_verbose)
-            fprintf(stderr, "ntfs_open: Incorrect NTFS magic\n");
-        goto on_error;
+        if (process_kape_boot_format(ntfs)) {
+            tsk_error_reset();
+            tsk_error_set_errno(TSK_ERR_FS_MAGIC);
+            tsk_error_set_errstr("Not a NTFS file system (magic)");
+            if (tsk_verbose)
+                fprintf(stderr, "ntfs_open: Incorrect NTFS magic\n");
+            goto on_error;
+        }
     }
 
 
diff --git a/tsk/fs/ntfs_dent.cpp b/tsk/fs/ntfs_dent.cpp
index 9070182fe3ec326f97ea47ed1405cbbde2f706a7..3bc7a78ffb551c1cd2e7955053e750187a863ba2 100644
--- a/tsk/fs/ntfs_dent.cpp
+++ b/tsk/fs/ntfs_dent.cpp
@@ -237,8 +237,11 @@ ntfs_parent_act(TSK_FS_FILE * fs_file, void * /*ptr*/)
 
 /****************/
 
+/**
+  * @returns 1 on error
+  */
 static uint8_t
-ntfs_dent_copy(NTFS_INFO * ntfs, ntfs_idxentry * idxe,
+ntfs_dent_copy(NTFS_INFO * ntfs, ntfs_idxentry * idxe, uintptr_t endaddr,
     TSK_FS_NAME * fs_name)
 {
     ntfs_attr_fname *fname = (ntfs_attr_fname *) & idxe->stream;
@@ -254,10 +257,18 @@ ntfs_dent_copy(NTFS_INFO * ntfs, ntfs_idxentry * idxe,
 
     name16 = (UTF16 *) & fname->name;
     name8 = (UTF8 *) fs_name->name;
+    
+    const UTF16 * sourceEnd = (UTF16 *) ((uintptr_t) name16 + fname->nlen * 2);
+    if (((uintptr_t) sourceEnd) >= endaddr) {
+        if (tsk_verbose)
+            tsk_fprintf(stderr,
+                "sourceEnd: %" PRIuINUM " is out of endaddr bounds: %" PRIuINUM,
+                sourceEnd, endaddr);
+        return 1;
+    }
 
     retVal = tsk_UTF16toUTF8(fs->endian, (const UTF16 **) &name16,
-        (UTF16 *) ((uintptr_t) name16 +
-            fname->nlen * 2), &name8,
+        sourceEnd, &name8,
         (UTF8 *) ((uintptr_t) name8 +
             fs_name->name_size), TSKlenientConversion);
 
@@ -549,7 +560,7 @@ ntfs_proc_idxentry(NTFS_INFO * a_ntfs, TSK_FS_DIR * a_fs_dir,
         }
 
         /* Copy it into the generic form */
-        if (ntfs_dent_copy(a_ntfs, a_idxe, fs_name)) {
+        if (ntfs_dent_copy(a_ntfs, a_idxe, endaddr, fs_name)) {
             if (tsk_verbose)
                 tsk_fprintf(stderr,
                     "ntfs_proc_idxentry: Skipping because error copying dent_entry\n");
@@ -743,11 +754,12 @@ ntfs_fix_idxrec(NTFS_INFO * ntfs, ntfs_idxrec * idxrec, uint32_t len)
 * @param a_fs_dir Pointer to FS_DIR pointer. Can contain an already allocated
 * structure or a new structure.
 * @param a_addr Address of directory to process.
+* @param recursion_depth Recursion depth to limit the number of self-calls
 * @returns error, corruption, ok etc.
 */
 TSK_RETVAL_ENUM
 ntfs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir,
-    TSK_INUM_T a_addr)
+    TSK_INUM_T a_addr, int recursion_depth)
 {
     NTFS_INFO *ntfs = (NTFS_INFO *) a_fs;
     TSK_FS_DIR *fs_dir;
@@ -1001,7 +1013,7 @@ ntfs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir,
         }
     }
     else {
-        int off;
+        unsigned int off;
 
         if (fs_attr_idx->flags & TSK_FS_ATTR_RES) {
             tsk_error_reset();
@@ -1011,11 +1023,22 @@ ntfs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir,
             return TSK_COR;
         }
 
+        // Taking 128 MiB as an arbitrary upper bound
+        if (fs_attr_idx->nrd.allocsize > (128 * 1024 * 1024)) {
+            tsk_error_reset();
+           tsk_error_set_errno(TSK_ERR_FS_INODE_COR);
+           tsk_error_set_errstr
+               ("fs_attr_idx->nrd.allocsize value out of bounds");
+           return TSK_COR;
+        }
+
         /*
          * Copy the index allocation run into a big buffer
          */
         idxalloc_len = fs_attr_idx->nrd.allocsize;
-        if ((idxalloc = (char *)tsk_malloc((size_t) idxalloc_len)) == NULL) {
+        // default to null unless length is greater than 0
+        idxalloc = NULL;
+        if ((idxalloc_len > 0) && ((idxalloc = (char *)tsk_malloc((size_t)idxalloc_len)) == NULL)) {
             return TSK_ERR;
         }
 
@@ -1067,7 +1090,7 @@ ntfs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir,
             uint32_t list_len, rec_len;
 
             // Ensure that there is enough data for an idxrec
-            if (sizeof(ntfs_idxrec) > idxalloc_len - off) {
+            if ((idxalloc_len < sizeof(ntfs_idxrec)) || (off > idxalloc_len - sizeof(ntfs_idxrec))) {
                 tsk_error_reset();
                 tsk_error_set_errno(TSK_ERR_FS_INODE_COR);
                 tsk_error_set_errstr
@@ -1277,9 +1300,11 @@ ntfs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir,
         
         std::vector <NTFS_META_ADDR> &childFiles = ntfs_parent_map_get(ntfs, a_addr, seqToSrch);
 
-        if ((fs_name = tsk_fs_name_alloc(256, 0)) == NULL)
+        if ((fs_name = tsk_fs_name_alloc(256, 0)) == NULL){
+            tsk_release_lock(&ntfs->orphan_map_lock);
             return TSK_ERR;
-
+        }
+        
         fs_name->type = TSK_FS_NAME_TYPE_UNDEF;
         fs_name->par_addr = a_addr;
         fs_name->par_seq = fs_dir->fs_file->meta->seq;
diff --git a/tsk/fs/tsk_apfs.hpp b/tsk/fs/tsk_apfs.hpp
index ab549ce43a7055fd0e9a06a48375363c0b832304..700cd54f07ef2569dda723fcf5c3393b07e36779 100755
--- a/tsk/fs/tsk_apfs.hpp
+++ b/tsk/fs/tsk_apfs.hpp
@@ -131,14 +131,23 @@ class APFSBtreeNodeIterator {
   }
 
   template <typename Void = void>
-  auto init_value()
+  auto init_value(int recursion_depth)
       -> std::enable_if_t<Node::is_variable_kv_node::value, Void> {
+    if ((recursion_depth < 0) || (recursion_depth > 64)) {
+      throw std::runtime_error("init_value exceeds recursion depth");
+    }
     if (this->_node->has_fixed_kv_size()) {
       throw std::runtime_error("btree does not have variable sized keys");
     }
     const auto &t = _node->_table_data.toc.variable[_index];
     const auto key_data = _node->_table_data.koff + t.key_offset;
     const auto val_data = _node->_table_data.voff - t.val_offset;
+    if (key_data > _node->_storage.data() + _node->_storage.size()) {
+      throw std::runtime_error("init_value: invalid key_offset");
+    }
+    if (val_data < _node->_storage.data()) {
+      throw std::runtime_error("init_value: invalid val_offset");
+    }
 
     memory_view key{key_data, t.key_length};
 
@@ -150,18 +159,27 @@ class APFSBtreeNodeIterator {
       const auto block_num = *((apfs_block_num *)val_data);
 
       _child_it = std::make_unique<typename Node::iterator>(
-          own_node(_node.get(), block_num), 0);
+          own_node(_node.get(), block_num), 0, recursion_depth);
     }
   }
 
   template <typename Void = void>
-  auto init_value() -> std::enable_if_t<Node::is_fixed_kv_node::value, Void> {
+  auto init_value(int recursion_depth) -> std::enable_if_t<Node::is_fixed_kv_node::value, Void> {
+    if ((recursion_depth < 0) || (recursion_depth > 64)) {
+      throw std::runtime_error("init_value exceeds recursion depth");
+    }
     if (!this->_node->has_fixed_kv_size()) {
       throw std::runtime_error("btree does not have fixed sized keys");
     }
     const auto &t = _node->_table_data.toc.fixed[_index];
     const auto key_data = _node->_table_data.koff + t.key_offset;
     const auto val_data = _node->_table_data.voff - t.val_offset;
+    if (key_data > _node->_storage.data() + _node->_storage.size()) {
+      throw std::runtime_error("init_value: invalid key_offset");
+    }
+    if (val_data < _node->_storage.data()) {
+      throw std::runtime_error("init_value: invalid val_offset");
+    }
 
     if (_node->is_leaf()) {
       _val = {(typename Node::key_type)key_data,
@@ -170,7 +188,7 @@ class APFSBtreeNodeIterator {
       const auto block_num = *((apfs_block_num *)val_data);
 
       _child_it = std::make_unique<typename Node::iterator>(
-          own_node(_node.get(), block_num), 0);
+          own_node(_node.get(), block_num), 0, recursion_depth);
     }
   }
 
@@ -178,9 +196,9 @@ class APFSBtreeNodeIterator {
   // Forward iterators must be DefaultConstructible
   APFSBtreeNodeIterator() = default;
 
-  APFSBtreeNodeIterator(const Node *node, uint32_t index);
+  APFSBtreeNodeIterator(const Node *node, uint32_t index, int recursion_depth);
 
-  APFSBtreeNodeIterator(lw_shared_ptr<Node> &&node, uint32_t index);
+  APFSBtreeNodeIterator(lw_shared_ptr<Node> &&node, uint32_t index, int recursion_depth);
 
   APFSBtreeNodeIterator(const Node *node, uint32_t index,
                         typename Node::iterator &&child);
@@ -270,7 +288,7 @@ class APFSBtreeNodeIterator {
         auto index{_index};
 
         this->~APFSBtreeNodeIterator();
-        new (this) APFSBtreeNodeIterator(std::move(node), index);
+        new (this) APFSBtreeNodeIterator(std::move(node), index, 0);
       }
       return (*this);
     }
@@ -287,7 +305,7 @@ class APFSBtreeNodeIterator {
     auto index{_index};
 
     this->~APFSBtreeNodeIterator();
-    new (this) APFSBtreeNodeIterator(std::move(node), index);
+    new (this) APFSBtreeNodeIterator(std::move(node), index, 0);
 
     return (*this);
   }
@@ -434,8 +452,17 @@ class APFSBtreeNode : public APFSObject, public APFSOmap::node_tag {
     }
 
     _table_data.toc = {_storage.data() + toffset()};
+    if ((uintptr_t)_table_data.toc.v - (uintptr_t)_storage.data() > _storage.size()) {
+      throw std::runtime_error("APFSBtreeNode: invalid toffset");
+    }
     _table_data.voff = _storage.data() + voffset();
+    if (_table_data.voff > _storage.data() + _storage.size()) {
+      throw std::runtime_error("APFSBtreeNode: invalid voffset");
+    }
     _table_data.koff = _storage.data() + koffset();
+    if (_table_data.koff > _storage.data() + _storage.size()) {
+      throw std::runtime_error("APFSBtreeNode: invalid koffset");
+    }
   }
 
   inline bool is_root() const noexcept {
@@ -484,8 +511,8 @@ class APFSBtreeNode : public APFSObject, public APFSOmap::node_tag {
  public:
   using iterator = APFSBtreeNodeIterator<APFSBtreeNode>;
 
-  iterator begin() const { return {this, 0}; }
-  iterator end() const { return {this, key_count()}; }
+  iterator begin() const { return {this, 0, 0}; }
+  iterator end() const { return {this, key_count(), 0}; }
 
   template <typename T, typename Compare>
   iterator find(const T &value, Compare comp) const {
@@ -505,7 +532,7 @@ class APFSBtreeNode : public APFSObject, public APFSOmap::node_tag {
 
         if (res == 0) {
           // We've found it!
-          return {this, i - 1};
+          return {this, i - 1, 0};
         }
 
         if (res < 0) {
@@ -526,7 +553,7 @@ class APFSBtreeNode : public APFSObject, public APFSOmap::node_tag {
       const auto &k = key(i - 1);
 
       if (comp(k, value) <= 0) {
-        iterator it{this, i - 1};
+        iterator it{this, i - 1, 0};
 
         auto ret = it._child_it->_node->find(value, comp);
         if (ret == it._child_it->_node->end()) {
@@ -580,8 +607,8 @@ class APFSJObjBtreeNode : public APFSBtreeNode<> {
 
   inline bool is_leaf() const noexcept { return (bn()->level == 0); }
 
-  inline iterator begin() const { return {this, 0}; }
-  inline iterator end() const { return {this, key_count()}; }
+  inline iterator begin() const { return {this, 0, 0}; }
+  inline iterator end() const { return {this, key_count(), 0}; }
 
   template <typename T, typename Compare>
   inline iterator find(const T &value, Compare comp) const {
@@ -595,7 +622,7 @@ class APFSJObjBtreeNode : public APFSBtreeNode<> {
 
         if (res == 0) {
           // We've found it!
-          return {this, i};
+          return {this, i, 0};
         }
 
         if (res > 0) {
@@ -627,7 +654,7 @@ class APFSJObjBtreeNode : public APFSBtreeNode<> {
       if (v == 0) {
         // We need to see if the jobj might be in the last node
         if (last != 0) {
-          iterator it{this, last - 1};
+          iterator it{this, last - 1, 0};
 
           auto ret = it._child_it->_node->find(value, comp);
           if (ret != it._child_it->_node->end()) {
@@ -644,7 +671,7 @@ class APFSJObjBtreeNode : public APFSBtreeNode<> {
       return end();
     }
 
-    iterator it{this, last};
+    iterator it{this, last, 0};
 
     auto ret = it._child_it->_node->find(value, comp);
     if (ret == it._child_it->_node->end()) {
@@ -828,7 +855,7 @@ class APFSKeybag : public APFSObject {
   }
 
   using key = struct {
-    Guid uuid;
+    TSKGuid uuid;
     std::unique_ptr<uint8_t[]> data;
     uint16_t type;
   };
@@ -837,7 +864,7 @@ class APFSKeybag : public APFSObject {
   APFSKeybag(const APFSPool &pool, const apfs_block_num block_num,
              const uint8_t *key, const uint8_t *key2 = nullptr);
 
-  std::unique_ptr<uint8_t[]> get_key(const Guid &uuid, uint16_t type) const;
+  std::unique_ptr<uint8_t[]> get_key(const TSKGuid &uuid, uint16_t type) const;
 
   std::vector<key> get_keys() const;
 };
@@ -876,7 +903,7 @@ class APFSSuperblock : public APFSObject {
     return spaceman().num_free_blocks();
   }
 
-  inline Guid uuid() const { return {sb()->uuid}; }
+  inline TSKGuid uuid() const { return {sb()->uuid}; }
 
   const std::vector<apfs_block_num> volume_blocks() const;
   const std::vector<apfs_block_num> sm_bitmap_blocks() const;
@@ -961,12 +988,12 @@ class APFSFileSystem : public APFSObject {
   };
 
   struct wrapped_kek {
-    Guid uuid;
+    TSKGuid uuid;
     uint8_t data[0x28];
     uint64_t iterations;
     uint64_t flags;
     uint8_t salt[0x10];
-    wrapped_kek(Guid &&uuid, const std::unique_ptr<uint8_t[]> &);
+    wrapped_kek(TSKGuid &&uuid, const std::unique_ptr<uint8_t[]> &);
 
     inline bool hw_crypt() const noexcept {
       // If this bit is set, some sort of hardware encryption is used.
@@ -1032,7 +1059,7 @@ class APFSFileSystem : public APFSObject {
 
   bool unlock(const std::string &password) noexcept;
 
-  inline Guid uuid() const noexcept { return {fs()->uuid}; }
+  inline TSKGuid uuid() const noexcept { return {fs()->uuid}; }
 
   inline std::string name() const { return {fs()->name}; }
 
@@ -1135,10 +1162,16 @@ APFSBtreeNodeIterator<APFSJObjBtreeNode>::own_node(
 
 template <>
 template <>
-inline void APFSBtreeNodeIterator<APFSJObjBtreeNode>::init_value<void>() {
+inline void APFSBtreeNodeIterator<APFSJObjBtreeNode>::init_value<void>(int recursion_depth) {
   const auto &t = _node->_table_data.toc.variable[_index];
   const auto key_data = _node->_table_data.koff + t.key_offset;
   const auto val_data = _node->_table_data.voff - t.val_offset;
+  if (key_data > _node->_storage.data() + _node->_storage.size()) {
+    throw std::runtime_error("APFSBtreeNodeIterator<APFSJObjBtreeNode>::init_value: invalid key_offset");
+  }
+  if (val_data < _node->_storage.data()) {
+    throw std::runtime_error("APFSBtreeNodeIterator<APFSJObjBtreeNode>::init_value: invalid val_offset");
+  }
 
   memory_view key{key_data, t.key_length};
 
@@ -1156,32 +1189,32 @@ inline void APFSBtreeNodeIterator<APFSJObjBtreeNode>::init_value<void>() {
     }
 
     _child_it = std::make_unique<typename APFSJObjBtreeNode::iterator>(
-        own_node(_node.get(), it->value->paddr), 0);
+        own_node(_node.get(), it->value->paddr), 0, recursion_depth);
   }
 }
 
 template <typename Node>
 APFSBtreeNodeIterator<Node>::APFSBtreeNodeIterator(const Node *node,
-                                                   uint32_t index)
+                                                   uint32_t index, int recursion_depth)
     : _node{own_node(node)}, _index{index} {
   // If we're the end, then there's nothing to do
   if (index >= _node->key_count()) {
     return;
   }
 
-  init_value();
+  init_value(recursion_depth + 1);
 }
 
 template <typename Node>
 APFSBtreeNodeIterator<Node>::APFSBtreeNodeIterator(lw_shared_ptr<Node> &&node,
-                                                   uint32_t index)
+                                                   uint32_t index, int recursion_depth)
     : _node{std::forward<lw_shared_ptr<Node>>(node)}, _index{index} {
   // If we're the end, then there's nothing to do
   if (index >= _node->key_count()) {
     return;
   }
 
-  init_value();
+  init_value(recursion_depth + 1);
 }
 
 template <typename Node>
diff --git a/tsk/fs/tsk_exfatfs.h b/tsk/fs/tsk_exfatfs.h
index 2f88ab145c2a9b47a7f0c431c4c8888f98859b55..475672fe1f6c482ec27dfc1d5f43be5a0261bcd4 100755
--- a/tsk/fs/tsk_exfatfs.h
+++ b/tsk/fs/tsk_exfatfs.h
@@ -401,7 +401,7 @@ extern "C" {
 
     extern TSK_RETVAL_ENUM
     exfatfs_dent_parse_buf(FATFS_INFO *a_fatfs, TSK_FS_DIR *a_fs_dir, char *a_buf,
-        TSK_OFF_T a_buf_len, TSK_DADDR_T *a_sector_addrs);
+        TSK_OFF_T a_buf_len, TSK_DADDR_T *a_sector_addrs, int recursion_depth);
 
 #ifdef __cplusplus
 }
diff --git a/tsk/fs/tsk_ext2fs.h b/tsk/fs/tsk_ext2fs.h
index 71640f5fd67ffbb68815568886a7bd8263e17adc..348b6b9ae2070db67a8a78b06d24f3ae02bd8ea0 100644
--- a/tsk/fs/tsk_ext2fs.h
+++ b/tsk/fs/tsk_ext2fs.h
@@ -679,7 +679,7 @@ extern "C" {
 
     extern TSK_RETVAL_ENUM
         ext2fs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir,
-        TSK_INUM_T a_addr);
+        TSK_INUM_T a_addr, int recursion_depth);
     extern uint8_t ext2fs_jentry_walk(TSK_FS_INFO *, int,
         TSK_FS_JENTRY_WALK_CB, void *);
     extern uint8_t ext2fs_jblk_walk(TSK_FS_INFO *, TSK_DADDR_T,
diff --git a/tsk/fs/tsk_fatfs.h b/tsk/fs/tsk_fatfs.h
index ba60294c282bdd8ee67ccf72f5545d4af756a07b..1e3133728618852d025fbe9fb02149f0d011f848 100644
--- a/tsk/fs/tsk_fatfs.h
+++ b/tsk/fs/tsk_fatfs.h
@@ -266,7 +266,7 @@ extern "C" {
 
         TSK_RETVAL_ENUM (*dent_parse_buf)(FATFS_INFO *a_fatfs, 
             TSK_FS_DIR *a_fs_dir, char *a_buf, TSK_OFF_T a_buf_len, 
-            TSK_DADDR_T *a_sector_addrs);
+            TSK_DADDR_T *a_sector_addrs, int recursion_depth);
 
         TSK_RETVAL_ENUM (*dinode_copy)(FATFS_INFO *a_fatfs, TSK_INUM_T a_inum, 
             FATFS_DENTRY *a_dentry, uint8_t a_cluster_is_alloc, TSK_FS_FILE *a_fs_file);
@@ -347,7 +347,7 @@ extern "C" {
 
     extern TSK_RETVAL_ENUM
         fatfs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir,
-        TSK_INUM_T a_addr);
+        TSK_INUM_T a_addr, int recursion_depth);
 
     extern int fatfs_name_cmp(TSK_FS_INFO *, const char *, const char *);
 
diff --git a/tsk/fs/tsk_fatxxfs.h b/tsk/fs/tsk_fatxxfs.h
index e315a244811f02ebd9f2c921e6df97d87883eecc..919458e74d095357102c5f250784810b97a3ca4f 100755
--- a/tsk/fs/tsk_fatxxfs.h
+++ b/tsk/fs/tsk_fatxxfs.h
@@ -195,7 +195,7 @@ extern "C" {
 
     extern TSK_RETVAL_ENUM
     fatxxfs_dent_parse_buf(FATFS_INFO * fatfs, TSK_FS_DIR * a_fs_dir, char *buf,
-        TSK_OFF_T len, TSK_DADDR_T * addrs);
+        TSK_OFF_T len, TSK_DADDR_T * addrs, int recursion_depth);
 
 #ifdef __cplusplus
 }
diff --git a/tsk/fs/tsk_ffs.h b/tsk/fs/tsk_ffs.h
index 08f55341506142248d1700b0bb5b809d56795212..0255b5c91e461c2772f949798c7251092b192164 100644
--- a/tsk/fs/tsk_ffs.h
+++ b/tsk/fs/tsk_ffs.h
@@ -498,7 +498,7 @@ extern "C" {
     } FFS_INFO;
 
     extern TSK_RETVAL_ENUM ffs_dir_open_meta(TSK_FS_INFO * a_fs,
-        TSK_FS_DIR ** a_fs_dir, TSK_INUM_T a_addr);
+        TSK_FS_DIR ** a_fs_dir, TSK_INUM_T a_addr, int recursion_depth);
 
 #ifdef __cplusplus
 }
diff --git a/tsk/fs/tsk_fs.h b/tsk/fs/tsk_fs.h
index d0b5d1dc59697222a59fc889a8e3de8248a17e45..3cfdd914e0f0c3ea8c6b4b3464bc966fc44e13c1 100644
--- a/tsk/fs/tsk_fs.h
+++ b/tsk/fs/tsk_fs.h
@@ -821,6 +821,7 @@ extern "C" {
         TSK_FS_TYPE_HFS_LEGACY= 0x00008000,   ///< HFS file system
         TSK_FS_TYPE_APFS = 0x00010000, ///< APFS file system
         TSK_FS_TYPE_APFS_DETECT = 0x00010000, ///< APFS auto detection
+		TSK_FS_TYPE_LOGICAL = 0x00020000, ///< Logical directory (aut detection not supported)
         TSK_FS_TYPE_UNSUPP = 0xffffffff,        ///< Unsupported file system
     };
     /* NOTE: Update bindings/java/src/org/sleuthkit/datamodel/TskData.java
@@ -904,6 +905,13 @@ extern "C" {
 #define TSK_FS_TYPE_ISAPFS(ftype) \
     (((ftype) & TSK_FS_TYPE_APFS_DETECT)?1:0)
 
+	/**
+	* \ingroup fslib
+	* Macro that takes a file system type and returns 1 if the type
+	* is for a logical directory "file system". */
+#define TSK_FS_TYPE_ISDIR(ftype) \
+	(((ftype) & TSK_FS_TYPE_LOGICAL)?1:0)
+
 
     /**
     * Flags for the FS_INFO structure
@@ -1016,7 +1024,7 @@ extern "C" {
          uint8_t(*istat) (TSK_FS_INFO * fs, TSK_FS_ISTAT_FLAG_ENUM flags, FILE * hFile, TSK_INUM_T inum,
             TSK_DADDR_T numblock, int32_t sec_skew);
 
-         TSK_RETVAL_ENUM(*dir_open_meta) (TSK_FS_INFO * fs, TSK_FS_DIR ** a_fs_dir, TSK_INUM_T inode);  ///< \internal Call tsk_fs_dir_open_meta() instead.
+         TSK_RETVAL_ENUM(*dir_open_meta) (TSK_FS_INFO * fs, TSK_FS_DIR ** a_fs_dir, TSK_INUM_T inode, int recursion_depth);  ///< \internal Call tsk_fs_dir_open_meta() instead.
 
          uint8_t(*jopen) (TSK_FS_INFO *, TSK_INUM_T);   ///< \internal
 
diff --git a/tsk/fs/tsk_fs_i.h b/tsk/fs/tsk_fs_i.h
index 0340e82acf2e602e8b2929acab4ac8cd5ceada27..3ad572579bf073767611128f148dbcaabc339c75 100644
--- a/tsk/fs/tsk_fs_i.h
+++ b/tsk/fs/tsk_fs_i.h
@@ -69,6 +69,11 @@ extern "C" {
 #define setbit(a,i)     (((uint8_t *)(a))[(i)/NBBY] |= (1<<((i)%NBBY)))
 #endif                          /*  */
 
+/* Threshold to prevent the processing of very large directories.
+ * This is the maximum number of entries in a single directory that will be
+ * processed before bailing out */
+#define MAX_DIR_SIZE_TO_PROCESS 1000000
+
 /* Data structure and action to internally load a file */
     typedef struct {
         char *base;
@@ -141,6 +146,9 @@ extern "C" {
     extern void tsk_fs_dir_reset(TSK_FS_DIR * a_fs_dir);
     extern uint8_t tsk_fs_dir_contains(TSK_FS_DIR * a_fs_dir, TSK_INUM_T meta_addr, uint32_t hash);
     extern uint32_t tsk_fs_dir_hash(const char *str);
+    extern uint8_t tsk_fs_dir_walk_internal(TSK_FS_INFO * a_fs, TSK_INUM_T a_addr,
+        TSK_FS_DIR_WALK_FLAG_ENUM a_flags, TSK_FS_DIR_WALK_CB a_action,
+        void *a_ptr, int macro_recursion_depth);
 
     /* Orphan Directory Support */
     TSK_RETVAL_ENUM tsk_fs_dir_load_inum_named(TSK_FS_INFO * a_fs);
@@ -198,6 +206,7 @@ extern "C" {
         TSK_FS_TYPE_ENUM, uint8_t);
     extern TSK_FS_INFO *yaffs2_open(TSK_IMG_INFO *, TSK_OFF_T,
         TSK_FS_TYPE_ENUM, uint8_t);
+	extern TSK_FS_INFO *logical_fs_open(TSK_IMG_INFO *);
 
     /* Specific pool file system routines */
     extern TSK_FS_INFO *apfs_open_auto_detect(TSK_IMG_INFO*, TSK_OFF_T,
@@ -228,7 +237,7 @@ extern "C" {
     extern uint8_t tsk_fs_nofs_istat(TSK_FS_INFO * a_fs, TSK_FS_ISTAT_FLAG_ENUM istat_flags, FILE * hFile,
         TSK_INUM_T inum, TSK_DADDR_T numblock, int32_t sec_skew);
     extern TSK_RETVAL_ENUM tsk_fs_nofs_dir_open_meta(TSK_FS_INFO * a_fs,
-        TSK_FS_DIR ** a_fs_dir, TSK_INUM_T a_addr);
+        TSK_FS_DIR ** a_fs_dir, TSK_INUM_T a_addr, int recursion_depth);
     extern uint8_t tsk_fs_nofs_jopen(TSK_FS_INFO * a_fs, TSK_INUM_T inum);
     extern uint8_t tsk_fs_nofs_jentry_walk(TSK_FS_INFO * a_fs,
         int a_flags, TSK_FS_JENTRY_WALK_CB a_action, void *a_ptr);
diff --git a/tsk/fs/tsk_hfs.h b/tsk/fs/tsk_hfs.h
index 2530e0cfe324680852dd25d011cc5bbb49eb03c5..93323518ac1ac148ffab1297902943bd538c06bd 100644
--- a/tsk/fs/tsk_hfs.h
+++ b/tsk/fs/tsk_hfs.h
@@ -740,7 +740,7 @@ extern uint16_t hfs_get_idxkeylen(HFS_INFO * hfs, uint16_t keylen,
 
 
 extern TSK_RETVAL_ENUM hfs_dir_open_meta(TSK_FS_INFO *, TSK_FS_DIR **,
-    TSK_INUM_T);
+    TSK_INUM_T, int);
 extern int hfs_name_cmp(TSK_FS_INFO *, const char *, const char *);
 
 extern uint8_t hfs_jopen(TSK_FS_INFO *, TSK_INUM_T);
diff --git a/tsk/fs/tsk_iso9660.h b/tsk/fs/tsk_iso9660.h
index c822d1847d454214bdd00a73fefe4a0b51438153..7deaa97272dba803d9b774ae9ef3eaae6bbd99dd 100644
--- a/tsk/fs/tsk_iso9660.h
+++ b/tsk/fs/tsk_iso9660.h
@@ -392,7 +392,7 @@ typedef struct {
 } ISO_INFO;
 
 extern TSK_RETVAL_ENUM iso9660_dir_open_meta(TSK_FS_INFO * a_fs,
-    TSK_FS_DIR ** a_fs_dir, TSK_INUM_T a_addr);
+    TSK_FS_DIR ** a_fs_dir, TSK_INUM_T a_addr, int recursion_depth);
 
 extern uint8_t iso9660_dinode_load(ISO_INFO * iso, TSK_INUM_T inum,
     iso9660_inode * dinode);
diff --git a/tsk/fs/tsk_logical_fs.h b/tsk/fs/tsk_logical_fs.h
new file mode 100644
index 0000000000000000000000000000000000000000..e2cfd0e6d8ae93d3401fe2b15c3ef7b552583ebe
--- /dev/null
+++ b/tsk/fs/tsk_logical_fs.h
@@ -0,0 +1,62 @@
+/*
+** The Sleuth Kit
+**
+** Copyright (c) 2022 Basis Technology Corp.  All rights reserved
+** Contact: Brian Carrier [carrier <at> sleuthkit [dot] org]
+**
+** This software is distributed under the Common Public License 1.0
+**
+*/
+
+/*
+ * Contains the structures and function APIs for logcial file system support.
+ */
+
+#ifndef _TSK_LOGICALFS_H
+#define _TSK_LOGICALFS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define LOGICAL_ROOT_INUM 0x10000
+#define LOGICAL_INUM_DIR_INC 0x10000
+#define LOGICAL_BLOCK_SIZE TSK_IMG_INFO_CACHE_LEN
+#define LOGICAL_MAX_PATH_UNICODE 32767
+
+/*
+* Structure of an logcial file system handle.
+*/
+typedef struct {
+	TSK_FS_INFO fs_info;    /* super class */
+	TSK_TCHAR * base_path;  // Base path - pointer to data in IMG_DIR_INFO 
+} LOGICALFS_INFO;
+
+typedef enum  {
+	LOGICALFS_NO_SEARCH = 0,         ///< Traverse entire file system
+	LOGICALFS_SEARCH_BY_PATH = 1,    ///< Search file system for given path
+	LOGICALFS_SEARCH_BY_INUM = 2     ///< Search file system for given inum
+} LOGICALFS_SEARCH_TYPE;
+
+typedef struct {
+	LOGICALFS_SEARCH_TYPE search_type;
+	TSK_TCHAR* target_path;
+	TSK_INUM_T target_inum;
+	int target_found;
+	TSK_TCHAR* found_path;
+	TSK_INUM_T found_inum;
+} LOGICALFS_SEARCH_HELPER;
+
+enum LOGICALFS_DIR_LOADING_MODE {
+	LOGICALFS_LOAD_ALL = 0,
+	LOGICALFS_LOAD_DIRS_ONLY = 1,
+	LOGICALFS_LOAD_FILES_ONLY = 2
+};
+
+extern ssize_t logicalfs_read_block(TSK_FS_INFO *a_fs, TSK_FS_FILE *a_fs_file, TSK_DADDR_T a_offset, char *buf);
+extern ssize_t logicalfs_read(TSK_FS_INFO *a_fs, TSK_FS_FILE *a_fs_file, TSK_DADDR_T a_offset, size_t len, char *buf);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/tsk/fs/tsk_ntfs.h b/tsk/fs/tsk_ntfs.h
index 6364dc8bf219ed99612d5d17f0db098bc0424e74..c16696a75c5209341872913297f7910e035ace3b 100644
--- a/tsk/fs/tsk_ntfs.h
+++ b/tsk/fs/tsk_ntfs.h
@@ -750,7 +750,7 @@ extern "C" {
     extern TSK_RETVAL_ENUM ntfs_dinode_lookup(NTFS_INFO *, char *,
         TSK_INUM_T);
     extern TSK_RETVAL_ENUM ntfs_dir_open_meta(TSK_FS_INFO * a_fs,
-        TSK_FS_DIR ** a_fs_dir, TSK_INUM_T a_addr);
+        TSK_FS_DIR ** a_fs_dir, TSK_INUM_T a_addr, int recursion_depth);
 
     extern void ntfs_orphan_map_free(NTFS_INFO * a_ntfs);
 
diff --git a/tsk/fs/yaffs.cpp b/tsk/fs/yaffs.cpp
index 620f38e70410303c6a9d840b7e556e70322613fa..b8798126ae76bde351e734461cf486c037cecfc0 100755
--- a/tsk/fs/yaffs.cpp
+++ b/tsk/fs/yaffs.cpp
@@ -2686,7 +2686,7 @@ static TSK_RETVAL_ENUM
 
 static TSK_RETVAL_ENUM
     yaffsfs_dir_open_meta(TSK_FS_INFO *a_fs, TSK_FS_DIR ** a_fs_dir,
-    TSK_INUM_T a_addr)
+    TSK_INUM_T a_addr, int recursion_depth)
 {
     TSK_FS_DIR *fs_dir;
     TSK_FS_NAME *fs_name;
diff --git a/tsk/hashdb/encase.c b/tsk/hashdb/encase.c
index 7e9575a41323c663742c990ab31c6370313b8ca4..bfccf2af2fcbc709496352328f095943b365c7e6 100755
--- a/tsk/hashdb/encase.c
+++ b/tsk/hashdb/encase.c
@@ -129,7 +129,7 @@ uint8_t
 
     /* Status */
     if (tsk_verbose)
-        TFPRINTF(stderr, _TSK_T("Extracting Data from Database (%s)\n"),
+        TFPRINTF(stderr, _TSK_T("Extracting Data from Database (%" PRIttocTSK ")\n"),
         hdb_binsrch_info->base.db_fname);
 
     memset(phash, '0', sizeof(phash));
diff --git a/tsk/hashdb/hashkeeper.c b/tsk/hashdb/hashkeeper.c
index 3c7a3b012eadc9349d1899accda547ff38f3fd75..8be4c25198f61b60b5ce3571ded66a5f44b4d62d 100755
--- a/tsk/hashdb/hashkeeper.c
+++ b/tsk/hashdb/hashkeeper.c
@@ -280,7 +280,7 @@ uint8_t
 
     /* Status */
     if (tsk_verbose)
-        TFPRINTF(stderr, _TSK_T("Extracting Data from Database (%s)\n"),
+        TFPRINTF(stderr, _TSK_T("Extracting Data from Database (%" PRIttocTSK ")\n"),
         hdb_binsrch_info->base.db_fname);
 
     /* Allocate a buffer to hold the previous hash values */
diff --git a/tsk/hashdb/md5sum.c b/tsk/hashdb/md5sum.c
index 638008dfd5b5355bc479cd87888d51d10c4e401c..d4a3a3239ff39daa67934abebd144c02473d7542 100755
--- a/tsk/hashdb/md5sum.c
+++ b/tsk/hashdb/md5sum.c
@@ -218,7 +218,7 @@ uint8_t
 
     /* Status */
     if (tsk_verbose)
-        TFPRINTF(stderr, _TSK_T("Extracting Data from Database (%s)\n"),
+        TFPRINTF(stderr, _TSK_T("Extracting Data from Database (%" PRIttocTSK ")\n"),
         hdb_info->base.db_fname);
 
     /* Allocate a buffer for the previous hash value */
diff --git a/tsk/hashdb/nsrl.c b/tsk/hashdb/nsrl.c
index 7963e59002142b0725973d6b4f212554e600c87b..119ba979b33367cb0d33c56fd35cae0a0d3e7f56 100755
--- a/tsk/hashdb/nsrl.c
+++ b/tsk/hashdb/nsrl.c
@@ -401,7 +401,7 @@ uint8_t
 
     /* Status */
     if (tsk_verbose)
-        TFPRINTF(stderr, _TSK_T("Extracting Data from Database (%s)\n"),
+        TFPRINTF(stderr, _TSK_T("Extracting Data from Database (%" PRIttocTSK ")\n"),
         hdb_info_base->db_fname);
 
     /* Allocate a buffer for the previous hash value */
diff --git a/tsk/img/Makefile.am b/tsk/img/Makefile.am
index 9997d28750ee89e8a3fed4edb332f49751ad1b92..82eeee5dd61c4da194081d0ae79653b982b2ed48 100644
--- a/tsk/img/Makefile.am
+++ b/tsk/img/Makefile.am
@@ -2,7 +2,7 @@ AM_CPPFLAGS = -I../.. -I$(srcdir)/../..
 EXTRA_DIST = .indent.pro 
 
 noinst_LTLIBRARIES = libtskimg.la
-libtskimg_la_SOURCES = img_open.cpp img_types.c raw.c raw.h \
+libtskimg_la_SOURCES = img_open.cpp img_types.c raw.c raw.h logical_img.c logical_img.h \
     aff.c aff.h ewf.cpp ewf.h tsk_img_i.h img_io.c mult_files.c \
     vhd.c vhd.h vmdk.c vmdk.h img_writer.cpp img_writer.h unsupported_types.c unsupported_types.h
 
diff --git a/tsk/img/img_io.c b/tsk/img/img_io.c
index a8c54b3c0b8dbf407d948598b0ade8fe73e532c3..dd84b24c6857f8f25121459239977d883de309a5 100755
--- a/tsk/img/img_io.c
+++ b/tsk/img/img_io.c
@@ -21,7 +21,7 @@ static ssize_t tsk_img_read_no_cache(TSK_IMG_INFO * a_img_info, TSK_OFF_T a_off,
 
     /* Some of the lower-level methods like block-sized reads.
         * So if the len is not that multiple, then make it. */
-    if (a_len % a_img_info->sector_size) {
+    if ((a_img_info->sector_size > 0) && (a_len % a_img_info->sector_size)) {
         char *buf2 = a_buf;
 
         size_t len_tmp;
@@ -91,6 +91,7 @@ tsk_img_read(TSK_IMG_INFO * a_img_info, TSK_OFF_T a_off,
     // maps to an int64 we prefer it over size_t although likely checking
     // for ( a_len > SSIZE_MAX ) is better but the code does not seem to
     // use that approach.
+
     if ((TSK_OFF_T) a_len < 0) {
         tsk_error_reset();
         tsk_error_set_errno(TSK_ERR_IMG_ARG);
@@ -212,6 +213,7 @@ tsk_img_read(TSK_IMG_INFO * a_img_info, TSK_OFF_T a_off,
         // since read_count is used in the calculation it may not be negative.
         // Also it does not make sense to copy data when the read_count is 0.
         if (read_count > 0) {
+
             TSK_OFF_T rel_off = 0;
             a_img_info->cache_age[cache_next] = CACHE_AGE;
             a_img_info->cache_len[cache_next] = read_count;
diff --git a/tsk/img/img_open.cpp b/tsk/img/img_open.cpp
index 6f879e3cadcdd6b121c3d8f3c07f254a2200bc4e..2b72d2da51967a0eda47d87e6d59403a80c82543 100644
--- a/tsk/img/img_open.cpp
+++ b/tsk/img/img_open.cpp
@@ -17,6 +17,7 @@
 #include "tsk_img_i.h"
 
 #include "raw.h"
+#include "logical_img.h"
 
 #if HAVE_LIBAFFLIB
 #include "aff.h"
@@ -109,7 +110,7 @@ tsk_img_open(int num_img,
 
     if (tsk_verbose)
         TFPRINTF(stderr,
-            _TSK_T("tsk_img_open: Type: %d   NumImg: %d  Img1: %s\n"),
+            _TSK_T("tsk_img_open: Type: %d   NumImg: %d  Img1: %" PRIttocTSK "\n"),
             type, num_img, images[0]);
 
 
@@ -230,22 +231,14 @@ tsk_img_open(int num_img,
         return NULL;
     }
 
-#if HAVE_LIBVHDI
-    case TSK_IMG_TYPE_VHD_VHD:
-        img_info = vhdi_open(num_img, images, a_ssize);
-        break;
-#endif
-
-#if HAVE_LIBVMDK
-    case TSK_IMG_TYPE_VMDK_VMDK:
-        img_info = vmdk_open(num_img, images, a_ssize);
-        break;
-#endif
-
     case TSK_IMG_TYPE_RAW:
         img_info = raw_open(num_img, images, a_ssize);
         break;
 
+	case TSK_IMG_TYPE_LOGICAL:
+		img_info = logical_open(num_img, images, a_ssize);
+		break;
+
 #if HAVE_LIBAFFLIB
     case TSK_IMG_TYPE_AFF_AFF:
     case TSK_IMG_TYPE_AFF_AFD:
@@ -261,6 +254,18 @@ tsk_img_open(int num_img,
         break;
 #endif
 
+#if HAVE_LIBVMDK
+    case TSK_IMG_TYPE_VMDK_VMDK:
+        img_info = vmdk_open(num_img, images, a_ssize);
+        break;
+#endif
+
+#if HAVE_LIBVHDI
+    case TSK_IMG_TYPE_VHD_VHD:
+        img_info = vhdi_open(num_img, images, a_ssize);
+        break;
+#endif
+
     default:
         tsk_error_reset();
         tsk_error_set_errno(TSK_ERR_IMG_UNSUPTYPE);
diff --git a/tsk/img/img_types.c b/tsk/img/img_types.c
index 5a36a76cd160507df2f8c9c21c5ca24f831c140b..0e920812efae6e06547e53e0bf6db45497aebdf1 100644
--- a/tsk/img/img_types.c
+++ b/tsk/img/img_types.c
@@ -45,6 +45,9 @@ static IMG_TYPES img_open_table[] = {
 #endif
 #if HAVE_LIBVHDI
     {"vhd", TSK_IMG_TYPE_VHD_VHD, "Virtual Hard Drive (Microsoft)"},
+#endif
+#ifdef TSK_WIN32
+	{"logical", TSK_IMG_TYPE_LOGICAL, "Logical Directory"},
 #endif
     {0,0,""},
 };
diff --git a/tsk/img/logical_img.c b/tsk/img/logical_img.c
new file mode 100644
index 0000000000000000000000000000000000000000..c651286e2afa03c3663c86299e3570b6bb0215fd
--- /dev/null
+++ b/tsk/img/logical_img.c
@@ -0,0 +1,186 @@
+/*
+** The Sleuth Kit
+**
+** Copyright (c) 2022 Basis Technology Corp.  All rights reserved
+** Contact: Brian Carrier [carrier <at> sleuthkit [dot] org]
+**
+** This software is distributed under the Common Public License 1.0
+**
+*/
+
+/**
+ * Internal code to open and read logical directories
+ */
+
+#include "tsk_img_i.h"
+#include "logical_img.h"
+#include "tsk/util/file_system_utils.h"
+
+/** 
+ * \internal
+ * Display information about the disk image set.
+ *
+ * @param img_info Disk image to analyze
+ * @param hFile Handle to print information to
+ */
+static void
+logical_imgstat(TSK_IMG_INFO * img_info, FILE * hFile)
+{
+	IMG_LOGICAL_INFO *dir_info = (IMG_LOGICAL_INFO *) img_info;
+
+    tsk_fprintf(hFile, "IMAGE FILE INFORMATION\n");
+    tsk_fprintf(hFile, "--------------------------------------------\n");
+    tsk_fprintf(hFile, "Image Type: logical directory\n");
+	tsk_fprintf(hFile,
+		"Base Directory Path: %" PRIttocTSK "\n",
+		dir_info->base_path);
+
+    return;
+}
+
+/*
+ * Clear a cache entry. Assumes we acquired the cache_lock already or are in the process
+ * of closing the image and don't need it.
+ */
+void
+clear_inum_cache_entry(IMG_LOGICAL_INFO *logical_img_info, int index) {
+	logical_img_info->inum_cache[index].inum = LOGICAL_INVALID_INUM;
+	if (logical_img_info->inum_cache[index].path != NULL) {
+		free(logical_img_info->inum_cache[index].path);
+		logical_img_info->inum_cache[index].path = NULL;
+	}
+	logical_img_info->inum_cache[index].cache_age = 0;
+}
+
+/** 
+ * \internal
+ * 
+ *
+ * @param img_info logical directory to close
+ */
+static void
+logical_close(TSK_IMG_INFO * img_info)
+{
+	IMG_LOGICAL_INFO *logical_img_info = (IMG_LOGICAL_INFO *)img_info;
+	free(logical_img_info->base_path);
+	for (int i = 0; i < LOGICAL_FILE_HANDLE_CACHE_LEN; i++) {
+#ifdef TSK_WIN32
+		if (logical_img_info->file_handle_cache[i].fd != 0) {
+			CloseHandle(logical_img_info->file_handle_cache[i].fd);
+		}
+#endif
+	}
+	for (int i = 0; i < LOGICAL_INUM_CACHE_LEN; i++) {
+		clear_inum_cache_entry(logical_img_info, i);
+	}
+	tsk_img_free(img_info);
+}
+
+static ssize_t
+logical_read(TSK_IMG_INFO * img_info, TSK_OFF_T offset, char *buf, size_t len)
+{
+	tsk_error_reset();
+	tsk_error_set_errno(TSK_ERR_IMG_READ);
+	tsk_error_set_errstr("logical_read: Logical image read is not supported");
+	return 0;
+}
+
+/** 
+ * \internal
+ * 
+ *
+ * @param a_num_img Number of images in set
+ * @param a_images List of disk image paths (in sorted order)
+ * @param a_ssize Size of device sector in bytes (or 0 for default)
+ *
+ * @return NULL on error
+ */
+TSK_IMG_INFO *
+logical_open(int a_num_img, const TSK_TCHAR * const a_images[],
+	unsigned int a_ssize)
+{
+	IMG_LOGICAL_INFO *logical_info;
+	TSK_IMG_INFO *img_info;
+
+	if (LOGICAL_IMG_DEBUG_PRINT) fprintf(stderr, "logical_open - Opening image\n");
+	fflush(stderr);
+
+#ifndef TSK_WIN32
+	tsk_error_reset();
+	tsk_error_set_errno(TSK_ERR_IMG_ARG);
+	tsk_error_set_errstr("logical_open: Logical directories not supported for non-Windows systems");
+	return NULL;
+#endif
+
+	if (a_num_img != 1) {
+		tsk_error_reset();
+		tsk_error_set_errno(TSK_ERR_IMG_ARG);
+		tsk_error_set_errstr("logical_open: Only one directory (image name) is supported for logical directories");
+		return NULL;
+	}
+
+	if ((logical_info =
+		(IMG_LOGICAL_INFO *)tsk_img_malloc(sizeof(IMG_LOGICAL_INFO))) == NULL)
+		return NULL;
+	img_info = (TSK_IMG_INFO *)logical_info;
+
+	logical_info->is_winobj = 0;
+#ifdef TSK_WIN32
+	logical_info->is_winobj = is_windows_device_path(a_images[0]);
+#endif
+
+	// Check that the given path exists and is a directory (return value = -3)
+	TSK_OFF_T size_result = get_size_of_file_on_disk(a_images[0], logical_info->is_winobj);
+	if (size_result != -3) {
+		tsk_error_reset();
+		tsk_error_set_errno(TSK_ERR_IMG_ARG);
+		tsk_error_set_errstr("logical_open: Image path is not a directory");
+		tsk_img_free(img_info);
+		return NULL;
+	}
+
+	img_info->size = INT64_MAX;
+	img_info->itype = TSK_IMG_TYPE_LOGICAL;
+
+	// Initialize file handle cache
+	for (int i = 0; i < LOGICAL_FILE_HANDLE_CACHE_LEN; i++) {
+		logical_info->file_handle_cache[i].fd = 0;
+		logical_info->file_handle_cache[i].inum = LOGICAL_INVALID_INUM;
+	}
+	logical_info->next_file_handle_cache_slot = 0;
+
+	// Initialize the inum cache
+	for (int i = 0; i < LOGICAL_INUM_CACHE_LEN; i++) {
+		logical_info->inum_cache[i].inum = LOGICAL_INVALID_INUM;
+		logical_info->inum_cache[i].path = NULL;
+		logical_info->inum_cache[i].cache_age = 0;
+	}
+
+	img_info->read = logical_read;
+	img_info->close = logical_close;
+	img_info->imgstat = logical_imgstat;
+
+	size_t len = TSTRLEN(a_images[0]);
+	logical_info->base_path =
+		(TSK_TCHAR *)tsk_malloc(sizeof(TSK_TCHAR) * (len + 1));
+	if (logical_info->base_path == NULL) {
+		tsk_img_free(img_info);
+		return NULL;
+	}
+	TSTRNCPY(logical_info->base_path, a_images[0], len + 1);
+	// Remove trailing slash
+#ifdef TSK_WIN32
+	if ((logical_info->base_path[TSTRLEN(logical_info->base_path) - 1] == L'/') 
+			|| (logical_info->base_path[TSTRLEN(logical_info->base_path) - 1] == L'\\')) {
+		logical_info->base_path[TSTRLEN(logical_info->base_path) - 1] = '\0';
+	}
+#else
+	if (logical_info->base_path[TSTRLEN(logical_info->base_path) - 1] == '/') {
+		logical_info->base_path[TSTRLEN(logical_info->base_path) - 1] = '\0';
+	}
+#endif
+
+	if (LOGICAL_IMG_DEBUG_PRINT) fprintf(stderr, "logical_open - Image opened successfully\n");
+	fflush(stderr);
+    return img_info;
+}
diff --git a/tsk/img/logical_img.h b/tsk/img/logical_img.h
new file mode 100644
index 0000000000000000000000000000000000000000..1be1109afa643053c35117a095b0713b3c9b5709
--- /dev/null
+++ b/tsk/img/logical_img.h
@@ -0,0 +1,74 @@
+/*
+** The Sleuth Kit
+**
+** Copyright (c) 2022 Basis Technology Corp.  All rights reserved
+** Contact: Brian Carrier [carrier <at> sleuthkit [dot] org]
+**
+** This software is distributed under the Common Public License 1.0
+**
+*/
+
+/* 
+ * Contains the logical directory image-specific functions and structures.
+ */
+
+#ifndef _LOGICAL_H
+#define _LOGICAL_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define LOGICAL_IMG_DEBUG_PRINT 0
+#define LOGICAL_IMG_CACHE_AGE   1000
+#define LOGICAL_FILE_HANDLE_CACHE_LEN 10
+#define LOGICAL_INUM_CACHE_LEN 1000
+#define LOGICAL_INUM_CACHE_MAX_AGE 10000
+#define LOGICAL_INUM_CACHE_MAX_PATH_LEN 500
+#define LOGICAL_INVALID_INUM 0
+
+	typedef struct {
+#ifdef TSK_WIN32
+		HANDLE fd;
+#else
+		int fd;
+#endif
+		TSK_INUM_T inum;
+		TSK_OFF_T seek_pos;
+	} LOGICAL_FILE_HANDLE_CACHE;
+
+	typedef struct {
+		TSK_INUM_T inum;
+		TSK_TCHAR *path;
+		int cache_age;
+	} LOGICAL_INUM_CACHE;
+
+    typedef struct {
+		TSK_IMG_INFO img_info;
+		TSK_TCHAR * base_path;
+		uint8_t is_winobj;
+
+		// Goes with the cache handling in tsk_img.h.
+		// To cache blocks, we need to keep track of both the file inum and the offset,
+		// so we need one additional array to track logical file data.
+		TSK_INUM_T cache_inum[TSK_IMG_INFO_CACHE_NUM];    ///< starting byte offset of corresponding cache entry (r/w shared - lock) 
+
+		// Cache a number of open file handles (protected by cache_lock)
+		LOGICAL_FILE_HANDLE_CACHE file_handle_cache[LOGICAL_FILE_HANDLE_CACHE_LEN];     /* small number of fds for open images */
+		int next_file_handle_cache_slot;
+
+		// Cache a number of inums / directory path pairs (protected by cache_lock)
+		LOGICAL_INUM_CACHE inum_cache[LOGICAL_INUM_CACHE_LEN];
+
+    } IMG_LOGICAL_INFO;
+
+	extern TSK_IMG_INFO *logical_open(int a_num_img,
+		const TSK_TCHAR * const a_images[], unsigned int a_ssize);
+
+	extern void
+		clear_inum_cache_entry(IMG_LOGICAL_INFO *logical_img_info, int index);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/tsk/img/pool.hpp b/tsk/img/pool.hpp
index 17827c0c31d96114fa78705ef55baaf15a72cdb0..4d5ea6f9ab54ea02d768eb835f617e68b30e5a5e 100644
--- a/tsk/img/pool.hpp
+++ b/tsk/img/pool.hpp
@@ -15,7 +15,6 @@
 #define _POOL_H
 
 #include "../pool/tsk_pool.h"
-#include "../fs/tsk_apfs.hpp"
 
 #ifdef __cplusplus
 extern "C" {
@@ -27,6 +26,8 @@ extern "C" {
         const TSK_POOL_INFO *pool_info;
         TSK_DADDR_T pvol_block;
 
+        void *impl;
+
     } IMG_POOL_INFO;
 
 #ifdef __cplusplus
diff --git a/tsk/img/raw.c b/tsk/img/raw.c
index 2b92ab6173575eff511034902c3bc52da715e0fa..2c42e54e1099d95e9bcfec02f57a887e22b4cd54 100755
--- a/tsk/img/raw.c
+++ b/tsk/img/raw.c
@@ -17,6 +17,7 @@
 
 #include "tsk_img_i.h"
 #include "raw.h"
+#include "tsk/util/file_system_utils.h"
 
 #ifdef __APPLE__
 #include <sys/disk.h>
@@ -39,19 +40,7 @@
 #define S_IFDIR __S_IFDIR
 #endif
 
-/**
- * \internal
- * Test if the image is a Windows device
- * @param The path to test
- *
- * Return 1 if the path represents a Windows device, 0 otherwise
- */
-#ifdef TSK_WIN32
-static int
-is_windows_device_path(const TSK_TCHAR * image_name) {
-    return (TSTRNCMP(image_name, _TSK_T("\\\\.\\"), 4) == 0);
-}
-#endif
+
 
 /** 
  * \internal
@@ -475,173 +464,7 @@ raw_close(TSK_IMG_INFO * img_info)
 }
 
 
-/**
- * Get the size in bytes of the given file.
- *
- * @param a_file The file to test
- * @param is_winobj 1 if the file is a windows object and not a real file
- *
- * @return the size in bytes, or -1 on error/unknown,
- *         -2 if unreadable, -3 if it's a directory.
- */
-static TSK_OFF_T
-get_size(const TSK_TCHAR * a_file, uint8_t a_is_winobj)
-{
-    TSK_OFF_T size = -1;
-    struct STAT_STR sb;
-
-    if (TSTAT(a_file, &sb) < 0) {
-        if (a_is_winobj) {
-            /* stat can fail for Windows objects; ignore that */
-            if (tsk_verbose) {
-                tsk_fprintf(stderr,
-                    "raw_open: ignoring stat result on Windows device %"
-                    PRIttocTSK "\n", a_file);
-            }
-        }
-        else {
-            tsk_error_reset();
-            tsk_error_set_errno(TSK_ERR_IMG_STAT);
-            tsk_error_set_errstr("raw_open: image \"%" PRIttocTSK
-                "\" - %s", a_file, strerror(errno));
-            return -2;
-        }
-    }
-    else if ((sb.st_mode & S_IFMT) == S_IFDIR) {
-        tsk_error_reset();
-        tsk_error_set_errno(TSK_ERR_IMG_MAGIC);
-        tsk_error_set_errstr("raw_open: image \"%" PRIttocTSK
-            "\" - is a directory", a_file);
-        return -3;
-    }
-
-#ifdef TSK_WIN32
-    {
-        HANDLE fd;
-        DWORD dwHi, dwLo;
-
-        if ((fd = CreateFile(a_file, FILE_READ_DATA,
-                    FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, 
-                    OPEN_EXISTING, 0, NULL)) ==
-            INVALID_HANDLE_VALUE) {
-            int lastError = (int)GetLastError();
-            tsk_error_reset();
-            tsk_error_set_errno(TSK_ERR_IMG_OPEN);
-            // print string of commonly found errors
-            if (lastError == ERROR_ACCESS_DENIED) {
-                tsk_error_set_errstr("raw_open: file \"%" PRIttocTSK
-                    "\" - access denied", a_file);
-            }
-            else if (lastError == ERROR_SHARING_VIOLATION) {
-                tsk_error_set_errstr("raw_open: file \"%" PRIttocTSK
-                    "\" - sharing violation", a_file);
-            }
-            else if (lastError == ERROR_FILE_NOT_FOUND) {
-                tsk_error_set_errstr("raw_open: file \"%" PRIttocTSK
-                    "\" - file not found", a_file);
-            }
-            else {
-                tsk_error_set_errstr("raw_open: file \"%" PRIttocTSK
-                    "\" - (error %d)", a_file, lastError);
-            }
-            return -2;
-        }
-
-        /* We need different techniques to determine the size of Windows physical
-         * devices versus normal files */
-        if (a_is_winobj == 0) {
-            dwLo = GetFileSize(fd, &dwHi);
-            if (dwLo == 0xffffffff) {
-                int lastError = (int)GetLastError();
-                tsk_error_reset();
-                tsk_error_set_errno(TSK_ERR_IMG_OPEN);
-                tsk_error_set_errstr("raw_open: file \"%" PRIttocTSK
-                    "\" - GetFileSize: %d", a_file, lastError);
-                size = -1;
-            }
-            else {
-                size = dwLo | ((TSK_OFF_T) dwHi << 32);
-            }
-        }
-        else {
-            
-            //use GET_PARTITION_INFO_EX prior to IOCTL_DISK_GET_DRIVE_GEOMETRY
-            // to determine the physical disk size because
-            //calculating it with the help of GET_DRIVE_GEOMETRY gives only
-            // approximate number
-            DWORD junk;
-            
-            PARTITION_INFORMATION_EX partition;
-            if (FALSE == DeviceIoControl(fd,
-                IOCTL_DISK_GET_PARTITION_INFO_EX,
-                NULL, 0, &partition, sizeof(partition), &junk,
-                (LPOVERLAPPED)NULL) )  {
-                DISK_GEOMETRY pdg;
-
-                if (FALSE == DeviceIoControl(fd, IOCTL_DISK_GET_DRIVE_GEOMETRY,
-                        NULL, 0, &pdg, sizeof(pdg), &junk, (LPOVERLAPPED) NULL)) {
-                    int lastError = (int)GetLastError();
-                    tsk_error_reset();
-                    tsk_error_set_errno(TSK_ERR_IMG_OPEN);
-                    tsk_error_set_errstr("raw_open: file \"%" PRIttocTSK
-                        "\" - DeviceIoControl: %d", a_file,
-                        lastError);
-                    size = -1;
-                }
-                else {
-                    size = pdg.Cylinders.QuadPart *
-                        (TSK_OFF_T) pdg.TracksPerCylinder *
-                        (TSK_OFF_T) pdg.SectorsPerTrack *
-                        (TSK_OFF_T) pdg.BytesPerSector;
-                }
-            }
-            else {
-                size = partition.PartitionLength.QuadPart;
-            }
-        }
-
-        CloseHandle(fd);
-    }
-#else
 
-    int fd;
-
-    if ((fd = open(a_file, O_RDONLY | O_BINARY)) < 0) {
-        tsk_error_reset();
-        tsk_error_set_errno(TSK_ERR_IMG_OPEN);
-        tsk_error_set_errstr("raw_open: file \"%" PRIttocTSK "\" - %s",
-            a_file, strerror(errno));
-        return -2;
-    }
-
-#ifdef __APPLE__
-    /* OS X doesn't support SEEK_END on char devices */
-    if ((sb.st_mode & S_IFMT) != S_IFCHR) {
-        size = lseek(fd, 0, SEEK_END);
-    }
-
-    if (size <= 0) {
-        int blkSize;
-        long long blkCnt;
-
-        if (ioctl(fd, DKIOCGETBLOCKSIZE, &blkSize) >= 0) {
-            if (ioctl(fd, DKIOCGETBLOCKCOUNT, &blkCnt) >= 0) {
-                size = blkCnt * (long long) blkSize;
-            }
-        }
-    }
-#else
-    /* We don't use the stat output because it doesn't work on raw
-     * devices and such */
-    size = lseek(fd, 0, SEEK_END);
-#endif
-
-    close(fd);
-
-#endif
-
-    return size;
-}
 
 #ifdef TSK_WIN32
 /**
@@ -800,7 +623,7 @@ raw_open(int a_num_img, const TSK_TCHAR * const a_images[],
 #endif
 
     /* Check that the first image file exists and is not a directory */
-    first_seg_size = get_size(a_images[0], raw_info->is_winobj);
+    first_seg_size = get_size_of_file_on_disk(a_images[0], raw_info->is_winobj);
     if (first_seg_size < -1) {
         tsk_img_free(raw_info);
         return NULL;
@@ -919,7 +742,7 @@ raw_open(int a_num_img, const TSK_TCHAR * const a_images[],
     for (i = 1; i < raw_info->img_info.num_img; i++) {
         TSK_OFF_T size;
         raw_info->cptr[i] = -1;
-        size = get_size(raw_info->img_info.images[i], raw_info->is_winobj);
+        size = get_size_of_file_on_disk(raw_info->img_info.images[i], raw_info->is_winobj);
         if (size < 0) {
             if (size == -1) {
                 if (tsk_verbose) {
diff --git a/tsk/img/tsk_img.h b/tsk/img/tsk_img.h
index 4c41c92b13292c8e0e2b067f6bd9026150e062cb..ebd37562edd43dd4a038ae3ca97415ee13359ba7 100644
--- a/tsk/img/tsk_img.h
+++ b/tsk/img/tsk_img.h
@@ -46,6 +46,13 @@ extern "C" {
 #define TSK_IMG_TYPE_ISEWF(t) \
     ((((t) & TSK_IMG_TYPE_EWF_EWF))?1:0)
 
+	 /**
+	 * \ingroup imglib
+	 * Macro that takes a image type and returns 1 if the type
+	 * is for a logical directory file format. */
+#define TSK_IMG_TYPE_ISDIR(t) \
+    ((((t) & TSK_IMG_TYPE_LOGICAL))?1:0)
+
 
     /**
      * Flag values for the disk image format type.  Each type has a
@@ -69,6 +76,7 @@ extern "C" {
         TSK_IMG_TYPE_VHD_VHD = 0x0100,   ///< VHD version
         TSK_IMG_TYPE_EXTERNAL = 0x1000,  ///< external defined format which at least implements TSK_IMG_INFO, used by pytsk
         TSK_IMG_TYPE_POOL = 0x4000,      ///< Pool
+		TSK_IMG_TYPE_LOGICAL = 0x8000,       ///< Logical directory
 
         TSK_IMG_TYPE_UNSUPP = 0xffff   ///< Unsupported disk image type
     } TSK_IMG_TYPE_ENUM;
diff --git a/tsk/pool/Makefile.am b/tsk/pool/Makefile.am
index eb5e48ba05f5255f34b9fe4f0b8d59b0405225e0..5fd34cbebead3411fbeb81bea416ec9a19d2f6cd 100644
--- a/tsk/pool/Makefile.am
+++ b/tsk/pool/Makefile.am
@@ -4,7 +4,9 @@ EXTRA_DIST = .indent.pro
 
 noinst_LTLIBRARIES = libtskpool.la
 libtskpool_la_SOURCES = pool_open.cpp pool_read.cpp pool_types.cpp \
-	apfs_pool_compat.cpp apfs_pool.cpp
+	apfs_pool_compat.cpp apfs_pool.cpp \
+	img_bfio_handle.c img_bfio_handle.h \
+	lvm_pool_compat.cpp lvm_pool.cpp
 
 indent:
 	indent *.c *.cpp *.h *.hpp
diff --git a/tsk/pool/apfs_pool_compat.cpp b/tsk/pool/apfs_pool_compat.cpp
index a77cef65f0be1f2e9f011a486b72fab6cb42791d..2b1d16acab19c036a26941da644c01639c1f3ffa 100755
--- a/tsk/pool/apfs_pool_compat.cpp
+++ b/tsk/pool/apfs_pool_compat.cpp
@@ -185,7 +185,7 @@ uint8_t APFSPoolCompat::poolstat(FILE *hFile) const noexcept try {
                                    vol.changed() % 1000000000, time_buf));
 
     const auto unmount_log = vol.unmount_log();
-    if (unmount_log.size() != 0) {
+    if (!unmount_log.empty()) {
       tsk_fprintf(hFile, "|\n");
       tsk_fprintf(hFile, "|   Unmount Logs\n");
       tsk_fprintf(hFile, "|   ------------\n");
diff --git a/tsk/pool/img_bfio_handle.c b/tsk/pool/img_bfio_handle.c
new file mode 100644
index 0000000000000000000000000000000000000000..58b67c0a8092553475a67b6c8946c225443068fd
--- /dev/null
+++ b/tsk/pool/img_bfio_handle.c
@@ -0,0 +1,255 @@
+/*
+ * The Sleuth Kit - Image BFIO handle
+ *
+ * Copyright (c) 2022 Joachim Metz <joachim.metz@gmail.com>
+ *
+ * This software is distributed under the Common Public License 1.0
+ */
+
+#include "tsk/base/tsk_base_i.h"
+
+#ifdef HAVE_LIBBFIO
+
+#include "img_bfio_handle.h"
+
+#include <libbfio.h>
+
+#include "tsk/img/tsk_img.h"
+
+/* Initializes the image BFIO handle
+ * Returns 1 if successful or -1 on error
+ */
+int img_bfio_handle_initialize(
+     libbfio_handle_t **handle,
+     TSK_IMG_INFO *image,
+     TSK_OFF_T offset,
+     libbfio_error_t **error )
+{
+	img_bfio_handle_t *img_bfio_handle = NULL;
+
+	img_bfio_handle = (img_bfio_handle_t *) tsk_malloc( sizeof( img_bfio_handle_t ) );
+
+	if( img_bfio_handle == NULL )
+	{
+		return( -1 );
+	}
+	img_bfio_handle->image = image;
+	img_bfio_handle->base_offset = offset;
+	img_bfio_handle->logical_offset = 0;
+	img_bfio_handle->access_flags = LIBBFIO_ACCESS_FLAG_READ;
+
+	if( libbfio_handle_initialize(
+	     handle,
+	     (intptr_t *) img_bfio_handle,
+	     (int (*)(intptr_t **, libbfio_error_t **)) img_bfio_handle_free,
+	     NULL,
+	     (int (*)(intptr_t *, int, libbfio_error_t **)) img_bfio_handle_open,
+	     (int (*)(intptr_t *, libbfio_error_t **)) img_bfio_handle_close,
+	     (ssize_t (*)(intptr_t *, uint8_t *, size_t, libbfio_error_t **)) img_bfio_handle_read,
+	     NULL,
+	     (off64_t (*)(intptr_t *, off64_t, int, libbfio_error_t **)) img_bfio_handle_seek_offset,
+	     (int (*)(intptr_t *, libbfio_error_t **)) img_bfio_handle_exists,
+	     (int (*)(intptr_t *, libbfio_error_t **)) img_bfio_handle_is_open,
+	     (int (*)(intptr_t *, size64_t *, libbfio_error_t **)) img_bfio_handle_get_size,
+	     LIBBFIO_FLAG_IO_HANDLE_MANAGED | LIBBFIO_FLAG_IO_HANDLE_CLONE_BY_FUNCTION,
+	     error ) != 1 )
+	{
+		free(
+		 img_bfio_handle );
+
+		return( -1 );
+	}
+	return( 1 );
+}
+
+/* Frees an image BFIO handle
+ * Returns 1 if succesful or -1 on error
+ */
+int img_bfio_handle_free(
+     img_bfio_handle_t **img_bfio_handle,
+     libbfio_error_t **error )
+{
+	if( img_bfio_handle == NULL )
+	{
+		return( -1 );
+	}
+	if( *img_bfio_handle != NULL )
+	{
+		free(
+		 *img_bfio_handle );
+
+		*img_bfio_handle = NULL;
+	}
+	return( 1 );
+}
+
+/* Opens the image BFIO handle
+ * Returns 1 if successful or -1 on error
+ */
+int img_bfio_handle_open(
+     img_bfio_handle_t *img_bfio_handle,
+     int access_flags,
+     libbfio_error_t **error )
+{
+	if( img_bfio_handle == NULL )
+	{
+		return( -1 );
+	}
+	if( img_bfio_handle->image == NULL )
+	{
+		return( -1 );
+	}
+	if( ( ( access_flags & LIBBFIO_ACCESS_FLAG_READ ) != 0 )
+	 && ( ( access_flags & LIBBFIO_ACCESS_FLAG_WRITE ) != 0 ) )
+	{
+		return( -1 );
+	}
+	if( ( access_flags & LIBBFIO_ACCESS_FLAG_WRITE ) != 0 )
+	{
+		return( -1 );
+	}
+	/* No need to do anything here, because the file object is already open
+	 */
+	img_bfio_handle->access_flags = access_flags;
+
+	return( 1 );
+}
+
+/* Closes the image BFIO handle
+ * Returns 0 if successful or -1 on error
+ */
+int img_bfio_handle_close(
+     img_bfio_handle_t *img_bfio_handle,
+     libbfio_error_t **error )
+{
+	if( img_bfio_handle == NULL )
+	{
+		return( -1 );
+	}
+	if( img_bfio_handle->image == NULL )
+	{
+		return( -1 );
+	}
+	/* Do not close the image, have Sleuthkit deal with it
+	 */
+	img_bfio_handle->access_flags = 0;
+
+	return( 0 );
+}
+
+/* Reads a buffer from the image BFIO handle
+ * Returns the number of bytes read if successful, or -1 on error
+ */
+ssize_t img_bfio_handle_read(
+         img_bfio_handle_t *img_bfio_handle,
+         uint8_t *buffer,
+         size_t size,
+         libbfio_error_t **error )
+{
+	ssize_t read_count = 0;
+
+	if( img_bfio_handle == NULL )
+	{
+		return( -1 );
+	}
+	read_count = tsk_img_read(
+	              img_bfio_handle->image,
+	              img_bfio_handle->base_offset + img_bfio_handle->logical_offset,
+	              (char *) buffer,
+	              size );
+
+	if( read_count == -1 )
+	{
+		return( -1 );
+	}
+	return( read_count );
+}
+
+/* Seeks a certain offset within the image BFIO handle
+ * Returns the offset if the seek is successful or -1 on error
+ */
+off64_t img_bfio_handle_seek_offset(
+         img_bfio_handle_t *img_bfio_handle,
+         off64_t offset,
+         int whence,
+         libbfio_error_t **error )
+{
+	if( img_bfio_handle == NULL )
+	{
+		return( -1 );
+	}
+/* TODO add support for SEEK_CUR and SEEK_CUR */
+	if( whence != SEEK_SET )
+	{
+		return( -1 );
+	}
+	img_bfio_handle->logical_offset = offset;
+
+	return( offset );
+}
+
+/* Function to determine if a file exists
+ * Returns 1 if file exists, 0 if not or -1 on error
+ */
+int img_bfio_handle_exists(
+     img_bfio_handle_t *img_bfio_handle,
+     libbfio_error_t **error )
+{
+	if( img_bfio_handle == NULL )
+	{
+		return( -1 );
+	}
+	if( img_bfio_handle->image == NULL )
+	{
+		return( 0 );
+	}
+	return( 1 );
+}
+
+/* Check if the file is open
+ * Returns 1 if open, 0 if not or -1 on error
+ */
+int img_bfio_handle_is_open(
+     img_bfio_handle_t *img_bfio_handle,
+     libbfio_error_t **error )
+{
+	if( img_bfio_handle == NULL )
+	{
+		return( -1 );
+	}
+	if( img_bfio_handle->image == NULL )
+	{
+		return( -1 );
+	}
+	/* As far as BFIO is concerned the file object is always open
+	 */
+	return( 1 );
+}
+
+/* Retrieves the file size
+ * Returns 1 if successful or -1 on error
+ */
+int img_bfio_handle_get_size(
+     img_bfio_handle_t *img_bfio_handle,
+     size64_t *size,
+     libbfio_error_t **error )
+{
+	if( img_bfio_handle == NULL )
+	{
+		return( -1 );
+	}
+	if( img_bfio_handle->image == NULL )
+	{
+		return( -1 );
+	}
+	if( size == NULL )
+	{
+		return( -1 );
+	}
+	*size = img_bfio_handle->image->size;
+
+	return( 1 );
+}
+
+#endif /* HAVE_LIBBFIO */
+
diff --git a/tsk/pool/img_bfio_handle.h b/tsk/pool/img_bfio_handle.h
new file mode 100644
index 0000000000000000000000000000000000000000..842d155eea226604c80cc4d8bac784e4c9f3e309
--- /dev/null
+++ b/tsk/pool/img_bfio_handle.h
@@ -0,0 +1,90 @@
+/*
+ * The Sleuth Kit - Image BFIO handle
+ *
+ * Copyright (c) 2022 Joachim Metz <joachim.metz@gmail.com>
+ *
+ * This software is distributed under the Common Public License 1.0
+ */
+
+#if !defined( _IMG_BFIO_HANDLE_H )
+#define _IMG_BFIO_HANDLE_H
+
+#include "tsk/base/tsk_base_i.h"
+
+#ifdef HAVE_LIBBFIO
+
+#include <libbfio.h>
+
+#include "tsk/img/tsk_img.h"
+
+#if defined( __cplusplus )
+extern "C" {
+#endif
+
+typedef struct img_bfio_handle img_bfio_handle_t;
+
+struct img_bfio_handle
+{
+	TSK_IMG_INFO *image;
+	TSK_OFF_T base_offset;
+	TSK_OFF_T logical_offset;
+	int access_flags;
+};
+
+int img_bfio_handle_initialize(
+     libbfio_handle_t **handle,
+     TSK_IMG_INFO *image,
+     TSK_OFF_T offset,
+     libbfio_error_t **error );
+
+int img_bfio_handle_free(
+     img_bfio_handle_t **img_bfio_handle,
+     libbfio_error_t **error );
+
+int img_bfio_handle_clone(
+     img_bfio_handle_t **destination_img_bfio_handle,
+     img_bfio_handle_t *source_img_bfio_handle,
+     libbfio_error_t **error );
+
+int img_bfio_handle_open(
+     img_bfio_handle_t *img_bfio_handle,
+     int access_flags,
+     libbfio_error_t **error );
+
+int img_bfio_handle_close(
+     img_bfio_handle_t *img_bfio_handle,
+     libbfio_error_t **error );
+
+ssize_t img_bfio_handle_read(
+         img_bfio_handle_t *img_bfio_handle,
+         uint8_t *buffer,
+         size_t size,
+         libbfio_error_t **error );
+
+off64_t img_bfio_handle_seek_offset(
+         img_bfio_handle_t *img_bfio_handle,
+         off64_t offset,
+         int whence,
+         libbfio_error_t **error );
+
+int img_bfio_handle_exists(
+     img_bfio_handle_t *img_bfio_handle,
+     libbfio_error_t **error );
+
+int img_bfio_handle_is_open(
+     img_bfio_handle_t *img_bfio_handle,
+     libbfio_error_t **error );
+
+int img_bfio_handle_get_size(
+     img_bfio_handle_t *img_bfio_handle,
+     size64_t *size,
+     libbfio_error_t **error );
+
+#if defined( __cplusplus )
+}
+#endif
+
+#endif /* HAVE_LIBBFIO */
+
+#endif /* !defined( _IMG_BFIO_HANDLE_H ) */
+
diff --git a/tsk/pool/lvm_pool.cpp b/tsk/pool/lvm_pool.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..308e72ac19810472bf0f0a2ea027cb9ab658f3b0
--- /dev/null
+++ b/tsk/pool/lvm_pool.cpp
@@ -0,0 +1,122 @@
+/*
+ * The Sleuth Kit - Add on for Linux LVM support
+ *
+ * Copyright (c) 2022 Joachim Metz <joachim.metz@gmail.com>
+ *
+ * This software is distributed under the Common Public License 1.0
+ */
+
+#include "tsk/base/tsk_base_i.h"
+
+#if HAVE_LIBVSLVM
+
+#include "img_bfio_handle.h"
+#include "tsk_lvm.hpp"
+
+#include "tsk/auto/guid.h"
+
+#include <stdexcept>
+#include <tuple>
+
+#include <libbfio.h>
+#include <libvslvm.h>
+
+#if !defined( LIBVSLVM_HAVE_BFIO )
+
+LIBVSLVM_EXTERN \
+int libvslvm_handle_open_file_io_handle(
+     libvslvm_handle_t *handle,
+     libbfio_handle_t *file_io_handle,
+     int access_flags,
+     libvslvm_error_t **error );
+
+LIBVSLVM_EXTERN \
+int libvslvm_handle_open_physical_volume_files_file_io_pool(
+     libvslvm_handle_t *handle,
+     libbfio_pool_t *file_io_pool,
+     libcerror_error_t **error );
+
+#endif /* !defined( LIBVSLVM_HAVE_BFIO ) */
+
+LVMPool::LVMPool(std::vector<img_t>&& imgs)
+    : TSKPool(std::forward<std::vector<img_t>>(imgs)) {
+  if (_members.size() != 1) {
+    throw std::runtime_error(
+        "Only single physical volume LVM pools are currently supported");
+  }
+  std::tie(_img, _offset) = _members[0];
+
+  libbfio_handle_t *file_io_handle = NULL;
+  int file_io_pool_entry =  0;
+
+  if (img_bfio_handle_initialize(&file_io_handle, _img, _offset, NULL) != 1) {
+    throw std::runtime_error("Unable to initialize image BFIO handle");
+  }
+  if (libbfio_pool_initialize(&( _file_io_pool ), 0, LIBBFIO_POOL_UNLIMITED_NUMBER_OF_OPEN_HANDLES, NULL) != 1) {
+    libbfio_handle_free(&file_io_handle, NULL);
+    throw std::runtime_error("Unable to initialize BFIO pool");
+  }
+  if (libbfio_pool_append_handle(_file_io_pool, &file_io_pool_entry, file_io_handle, LIBBFIO_OPEN_READ, NULL) != 1) {
+    libbfio_pool_free(&( _file_io_pool ), NULL);
+    libbfio_handle_free(&file_io_handle, NULL);
+    throw std::runtime_error("Unable to add image BFIO handle to BFIO pool");
+  }
+  if (libvslvm_handle_initialize(&( _lvm_handle ), NULL) != 1) {
+    libbfio_pool_free(&( _file_io_pool ), NULL);
+    throw std::runtime_error("Unable to initialize LVM handle");
+  }
+  if (libvslvm_handle_open_file_io_handle(_lvm_handle, file_io_handle, LIBVSLVM_OPEN_READ, NULL) != 1) {
+    libvslvm_handle_free(&( _lvm_handle ), NULL);
+    libbfio_pool_free(&( _file_io_pool ), NULL);
+    throw std::runtime_error("Unable to open LVM handle");
+  }
+  if (libvslvm_handle_open_physical_volume_files_file_io_pool(_lvm_handle, _file_io_pool, NULL) != 1) {
+    libvslvm_handle_free(&( _lvm_handle ), NULL);
+    libbfio_pool_free(&( _file_io_pool ), NULL);
+    throw std::runtime_error("Unable to open LVM handle");
+  }
+  if (libvslvm_handle_get_volume_group(_lvm_handle, &( _lvm_volume_group ), NULL) != 1) {
+    libvslvm_handle_free(&( _lvm_handle ), NULL);
+    libbfio_pool_free(&( _file_io_pool ), NULL);
+    throw std::runtime_error("Unable to retrieve LVM volume group");
+  }
+  if (tsk_verbose) {
+    tsk_fprintf(stderr, "LVMPool: retrieved LVM volume group.\n" );
+  }
+  char identifier_string[ 64 ];
+
+  if (libvslvm_volume_group_get_identifier(_lvm_volume_group, identifier_string, 64, NULL) != 1) {
+    libvslvm_volume_group_free(&( _lvm_volume_group ), NULL);
+    libvslvm_handle_free(&( _lvm_handle ), NULL);
+    libbfio_pool_free(&( _file_io_pool ), NULL);
+    throw std::runtime_error("Unable to retrieve LVM volume group identifier");
+  }
+  identifier = std::string(identifier_string);
+
+  _block_size = 0;
+  _dev_block_size = _img->sector_size;
+  _num_blocks = 0;
+
+  _num_vols = 0;
+}
+
+LVMPool::~LVMPool() {
+  if (_lvm_volume_group != nullptr) {
+    libvslvm_volume_group_free(&( _lvm_volume_group ), NULL);
+  }
+  if (_lvm_handle != nullptr) {
+    libvslvm_handle_free(&( _lvm_handle ), NULL);
+  }
+  if (_file_io_pool != nullptr) {
+    libbfio_pool_free(&( _file_io_pool ), NULL);
+  }
+}
+
+ssize_t LVMPool::read(uint64_t address, char* buf, size_t buf_size) const
+    noexcept {
+  // TODO implement, this functions appears to be only used by the JNI bindings
+  return tsk_img_read(_img, address + _offset, buf, buf_size);
+}
+
+#endif /* HAVE_LIBVSLVM */
+
diff --git a/tsk/pool/lvm_pool_compat.cpp b/tsk/pool/lvm_pool_compat.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..85cedb7d72a42dd02110beb73ea31aae9f5c974b
--- /dev/null
+++ b/tsk/pool/lvm_pool_compat.cpp
@@ -0,0 +1,179 @@
+/*
+ * The Sleuth Kit - Add on for Linux LVM support
+ *
+ * Copyright (c) 2022 Joachim Metz <joachim.metz@gmail.com>
+ *
+ * This software is distributed under the Common Public License 1.0
+ */
+
+#include "tsk/base/tsk_base_i.h"
+
+#if HAVE_LIBVSLVM
+
+#include "lvm_pool_compat.hpp"
+
+#include "tsk/img/pool.hpp"
+#include "tsk/img/tsk_img_i.h"
+
+#include <stdexcept>
+
+/**
+ * Get error string from libvslvm and make buffer empty if that didn't work. 
+ * @returns 1 if error message was not set
+ */
+static uint8_t getError(libvslvm_error_t *vslvm_error, char error_string[512])
+{
+    error_string[0] = '\0';
+    int retval = libvslvm_error_backtrace_sprint(vslvm_error, error_string, 512);
+    return retval <= 0;
+}
+
+uint8_t LVMPoolCompat::poolstat(FILE *hFile) const noexcept try {
+
+    tsk_fprintf(hFile, "POOL CONTAINER INFORMATION\n");
+    tsk_fprintf(hFile, "--------------------------------------------\n\n");
+    tsk_fprintf(hFile, "Volume group %s\n", identifier.c_str());
+    tsk_fprintf(hFile, "==============================================\n");
+    tsk_fprintf(hFile, "Type: LVM\n");
+
+    int number_of_logical_volumes = 0;
+    if (libvslvm_volume_group_get_number_of_logical_volumes(_lvm_volume_group, &number_of_logical_volumes, NULL) != 1 ) {
+        return 1;
+    }
+    libvslvm_logical_volume_t *lvm_logical_volume = NULL;
+    char volume_name[ 64 ];
+    char volume_identifier[ 64 ];
+
+    for (int volume_index = 0; volume_index < number_of_logical_volumes; volume_index++ ) {
+        if (libvslvm_volume_group_get_logical_volume(_lvm_volume_group, volume_index, &lvm_logical_volume, NULL) != 1 ) {
+            return 1;
+        }
+        if (libvslvm_logical_volume_get_identifier(lvm_logical_volume, volume_identifier, 64, NULL) != 1 ) {
+            return 1;
+        }
+        if (libvslvm_logical_volume_get_name(lvm_logical_volume, volume_name, 64, NULL) != 1 ) {
+            return 1;
+        }
+        if (libvslvm_logical_volume_free(&lvm_logical_volume, NULL) != 1 ) {
+            return 1;
+        }
+        tsk_fprintf(hFile, "|\n");
+        tsk_fprintf(hFile, "+-> Volume %s\n", volume_identifier);
+        tsk_fprintf(hFile, "|   ===========================================\n");
+        tsk_fprintf(hFile, "|   Name: %s\n", volume_name);
+    }
+    return 0;
+} catch (const std::exception &e) {
+    tsk_error_reset();
+    tsk_error_set_errno(TSK_ERR_POOL_GENPOOL);
+    tsk_error_set_errstr("%s", e.what());
+    return 1;
+}
+
+static void
+lvm_logical_volume_img_close(TSK_IMG_INFO * img_info)
+{
+    if (img_info != NULL) {
+        IMG_POOL_INFO *pool_img_info = (IMG_POOL_INFO *)img_info;
+        libvslvm_logical_volume_free((libvslvm_logical_volume_t **) &( pool_img_info->impl ), NULL);
+
+        tsk_deinit_lock(&(img_info->cache_lock));
+        tsk_img_free(img_info);
+    }
+}
+
+static void
+lvm_logical_volume_img_imgstat(TSK_IMG_INFO * img_info, FILE *hFile)
+{
+    tsk_fprintf(hFile, "IMAGE FILE INFORMATION\n");
+    tsk_fprintf(hFile, "--------------------------------------------\n");
+    tsk_fprintf(hFile, "Image Type:\t\tLVM logical volume\n");
+    tsk_fprintf(hFile, "\nSize of data in bytes:\t%" PRIdOFF "\n",
+        img_info->size);
+}
+
+static ssize_t
+lvm_logical_volume_img_read(TSK_IMG_INFO * img_info, TSK_OFF_T offset, char *buf, size_t len)
+{
+    IMG_POOL_INFO *pool_img_info = (IMG_POOL_INFO *)img_info;
+    libvslvm_error_t *vslvm_error = NULL;
+
+    // correct the offset to be relative to the start of the logical volume
+    offset -= pool_img_info->pool_info->img_offset;
+
+    if (tsk_verbose) {
+        tsk_fprintf(stderr, "lvm_logical_volume_img_read: offset: %" PRIdOFF " read len: %" PRIuSIZE ".\n",
+          offset, len);
+    }
+    if ((offset < 0) || (offset > img_info->size)) {
+        return 0;
+    }
+    ssize_t read_count = libvslvm_logical_volume_read_buffer_at_offset((libvslvm_logical_volume_t *) pool_img_info->impl, buf, len, offset, &vslvm_error);
+
+    if (read_count == -1) {
+        char error_string[521];
+        getError(vslvm_error, error_string);
+        tsk_fprintf(stderr, "lvm_logical_volume_img_read: %s\n", error_string);
+    }
+    return read_count;
+}
+
+TSK_IMG_INFO * LVMPoolCompat::getImageInfo(const TSK_POOL_INFO *pool_info, TSK_DADDR_T pvol_block) noexcept try {
+
+    libvslvm_logical_volume_t *lvm_logical_volume = NULL;
+
+    // pvol_block contians the logical volume index + 1
+    if (libvslvm_volume_group_get_logical_volume(_lvm_volume_group, pvol_block - 1, &lvm_logical_volume, NULL) != 1 ) {
+        return NULL;
+    }
+    uint64_t logical_volume_size = 0;
+
+    if (libvslvm_logical_volume_get_size(lvm_logical_volume, &logical_volume_size, NULL) != 1 ) {
+        return NULL;
+    }
+    IMG_POOL_INFO *img_pool_info = (IMG_POOL_INFO *)tsk_img_malloc(sizeof(IMG_POOL_INFO));
+
+    if (img_pool_info == NULL) {
+        return NULL;
+    }
+    img_pool_info->pool_info = pool_info;
+    img_pool_info->pvol_block = pvol_block;
+
+    img_pool_info->img_info.read = lvm_logical_volume_img_read;
+    img_pool_info->img_info.close = lvm_logical_volume_img_close;
+    img_pool_info->img_info.imgstat = lvm_logical_volume_img_imgstat;
+
+    img_pool_info->impl = (void *) lvm_logical_volume;
+
+    TSK_IMG_INFO *img_info = (TSK_IMG_INFO *)img_pool_info;
+
+    img_info->tag = TSK_IMG_INFO_TAG;
+    img_info->itype = TSK_IMG_TYPE_POOL;
+
+    // Copy original info from the first TSK_IMG_INFO. There was a check in the
+    // LVMPool that _members has only one entry.
+    IMG_POOL_INFO *pool_img_info = (IMG_POOL_INFO *)img_info;
+    const auto pool = static_cast<LVMPoolCompat*>(pool_img_info->pool_info->impl);
+    TSK_IMG_INFO *origInfo = pool->_members[0].first;
+
+    img_info->size = logical_volume_size;
+    img_info->num_img = origInfo->num_img;
+    img_info->sector_size = origInfo->sector_size;
+    img_info->page_size = origInfo->page_size;
+    img_info->spare_size = origInfo->spare_size;
+    img_info->images = origInfo->images;
+
+    tsk_init_lock(&(img_info->cache_lock));
+
+    return img_info;
+
+}
+catch (const std::exception &e) {
+    tsk_error_reset();
+    tsk_error_set_errno(TSK_ERR_POOL_GENPOOL);
+    tsk_error_set_errstr("%s", e.what());
+    return NULL;
+}
+
+#endif /* HAVE_LIBVSLVM */
+
diff --git a/tsk/pool/lvm_pool_compat.hpp b/tsk/pool/lvm_pool_compat.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..e9aad68974a6884f60a9928487d345d45f563f6e
--- /dev/null
+++ b/tsk/pool/lvm_pool_compat.hpp
@@ -0,0 +1,30 @@
+/*
+ * The Sleuth Kit - Add on for Linux LVM support
+ *
+ * Copyright (c) 2022 Joachim Metz <joachim.metz@gmail.com>
+ *
+ *
+ * This software is distributed under the Common Public License 1.0
+ */
+
+#pragma once
+
+#include "tsk/base/tsk_base_i.h"
+
+#if HAVE_LIBVSLVM
+
+#include "pool_compat.hpp"
+#include "tsk_lvm.hpp"
+
+class LVMPoolCompat : public TSKPoolCompat<LVMPool> {
+ public:
+  template <typename... Args>
+  LVMPoolCompat(Args&&... args)
+      : TSKPoolCompat<LVMPool>(TSK_POOL_TYPE_LVM, std::forward<Args>(args)...) { }
+
+  uint8_t poolstat(FILE* hFile) const noexcept;
+  TSK_IMG_INFO * getImageInfo(const TSK_POOL_INFO *pool_info, TSK_DADDR_T pvol_block) noexcept;
+};
+
+#endif /* HAVE_LIBVSLVM */
+
diff --git a/tsk/pool/pool_compat.hpp b/tsk/pool/pool_compat.hpp
index 63029c6855739aad613663099f3af8ade98c7903..e310db5abff7e046d79fa260bab6f4cf46e4d219 100644
--- a/tsk/pool/pool_compat.hpp
+++ b/tsk/pool/pool_compat.hpp
@@ -16,6 +16,7 @@
 
 template <typename T,
           typename = std::enable_if_t<std::is_base_of<TSKPool, T>::value>>
+
 class TSKPoolCompat : public T {
  protected:
   TSK_POOL_INFO _info{};
@@ -30,6 +31,8 @@ class TSKPoolCompat : public T {
 
  public:
   template <typename... Args>
+
+
   TSKPoolCompat(TSK_POOL_TYPE_ENUM type, Args &&... args) noexcept(
       std::is_nothrow_constructible<T, Args...>::value)
       : T(std::forward<Args>(args)...) {
diff --git a/tsk/pool/pool_open.cpp b/tsk/pool/pool_open.cpp
index 4de226b76d28224e73b28e8d10a68cb6677835ee..9aae5e8d90f5cfdf6c53fec3b77945481d5f9d07 100755
--- a/tsk/pool/pool_open.cpp
+++ b/tsk/pool/pool_open.cpp
@@ -3,16 +3,19 @@
  *
  * Brian Carrier [carrier <at> sleuthkit [dot] org]
  * Copyright (c) 2018-2019 BlackBag Technologies.  All Rights reserved
+ * Copyright (c) 2022 Joachim Metz <joachim.metz@gmail.com>
  *
  * This software is distributed under the Common Public License 1.0
  */
-#include "../fs/tsk_apfs.hpp"
+#include "tsk/base/tsk_base_i.h"
 
 #include "apfs_pool_compat.hpp"
+#include "lvm_pool_compat.hpp"
 #include "pool_compat.hpp"
 
-#include "../img/tsk_img.h"
-#include "../vs/tsk_vs.h"
+#include "tsk/fs/tsk_apfs.hpp"
+#include "tsk/img/tsk_img.h"
+#include "tsk/vs/tsk_vs.h"
 
 const TSK_POOL_INFO *tsk_pool_open_sing(const TSK_VS_PART_INFO *part,
                                         TSK_POOL_TYPE_ENUM type) {
@@ -35,6 +38,13 @@ const TSK_POOL_INFO *tsk_pool_open_sing(const TSK_VS_PART_INFO *part,
   return tsk_pool_open_img_sing(part->vs->img_info, offset, type);
 }
 
+
+/** 
+ * @param num_vols Number of volumes in parts array
+ * @param parts List of Volume partitions to review
+ * @type Type of pool to open (or auto detect)
+ * @returns Pool structure
+ */
 const TSK_POOL_INFO *tsk_pool_open(int num_vols,
                                    const TSK_VS_PART_INFO *const parts[],
                                    TSK_POOL_TYPE_ENUM type) {
@@ -52,6 +62,7 @@ const TSK_POOL_INFO *tsk_pool_open(int num_vols,
     return nullptr;
   }
 
+  // Make arrays of equal size to store the volume offset and IMG_INFO
   auto imgs = std::make_unique<TSK_IMG_INFO *[]>(num_vols);
   auto offsets = std::make_unique<TSK_OFF_T[]>(num_vols);
 
@@ -73,37 +84,69 @@ const TSK_POOL_INFO *tsk_pool_open(int num_vols,
   return tsk_pool_open_img(num_vols, imgs.get(), offsets.get(), type);
 }
 
+/**
+ * Open a pool at the given offset in the given image.
+ */
 const TSK_POOL_INFO *tsk_pool_open_img_sing(TSK_IMG_INFO *img, TSK_OFF_T offset,
                                             TSK_POOL_TYPE_ENUM type) {
   return tsk_pool_open_img(1, &img, &offset, type);
 }
 
+
+/**
+ * Open a pool at the set of image offsets
+ * @param num_imgs Size of imgs array
+ * @param imgs List of IMG_INFO to look for pool
+ * @param offsets List of offsets to look for pool in the img at the same array index
+ * @param type Pool type to open
+ */
 const TSK_POOL_INFO *tsk_pool_open_img(int num_imgs, TSK_IMG_INFO *const imgs[],
                                        const TSK_OFF_T offsets[],
                                        TSK_POOL_TYPE_ENUM type) {
-  std::vector<APFSPool::img_t> v{};
-  v.reserve(num_imgs);
+  std::vector<APFSPool::img_t> apfs_v{};
+  apfs_v.reserve(num_imgs);
+
+  for (auto i = 0; i < num_imgs; i++) {
+    apfs_v.emplace_back(imgs[i], offsets[i]);
+  }
+#ifdef HAVE_LIBVSLVM
+  std::vector<LVMPool::img_t> lvm_v{};
+
+  lvm_v.reserve(num_imgs);
 
   for (auto i = 0; i < num_imgs; i++) {
-    v.emplace_back(imgs[i], offsets[i]);
+    lvm_v.emplace_back(imgs[i], offsets[i]);
   }
+#endif
+
+  const char *error_string = NULL;
 
   switch (type) {
     case TSK_POOL_TYPE_DETECT:
       try {
-        auto apfs = new APFSPoolCompat(std::move(v), APFS_POOL_NX_BLOCK_LATEST);
+        auto apfs = new APFSPoolCompat(std::move(apfs_v), APFS_POOL_NX_BLOCK_LATEST);
 
         return &apfs->pool_info();
       } catch (std::runtime_error &e) {
-        if (tsk_verbose) {
-          tsk_fprintf(stderr, "tsk_pool_open_img: APFS check failed: %s\n",
-                      e.what());
-        }
+        error_string = e.what();
+      }
+#ifdef HAVE_LIBVSLVM
+      try {
+        auto lvm = new LVMPoolCompat(std::move(lvm_v));
+
+        return &lvm->pool_info();
+      } catch (std::runtime_error &e) {
+        error_string = e.what();
+      }
+#endif
+      if (tsk_verbose) {
+        tsk_fprintf(stderr, "tsk_pool_open_img: pool type detection failed: %s\n",
+                    error_string);
       }
       break;
     case TSK_POOL_TYPE_APFS:
       try {
-        auto apfs = new APFSPoolCompat(std::move(v), APFS_POOL_NX_BLOCK_LATEST);
+        auto apfs = new APFSPoolCompat(std::move(apfs_v), APFS_POOL_NX_BLOCK_LATEST);
 
         return &apfs->pool_info();
       } catch (std::runtime_error &e) {
@@ -112,6 +155,21 @@ const TSK_POOL_INFO *tsk_pool_open_img(int num_imgs, TSK_IMG_INFO *const imgs[],
         tsk_error_set_errstr("%s", e.what());
       }
       return nullptr;
+
+    // Will fallthrough to TSK_POOL_TYPE_UNSUPP if libvslvm is not available.
+    case TSK_POOL_TYPE_LVM:
+#ifdef HAVE_LIBVSLVM
+      try {
+        auto lvm = new LVMPoolCompat(std::move(lvm_v));
+
+        return &lvm->pool_info();
+      } catch (std::runtime_error &e) {
+        tsk_error_reset();
+        tsk_error_set_errno(TSK_ERR_POOL_UNKTYPE);
+        tsk_error_set_errstr("%s", e.what());
+      }
+      return nullptr;
+#endif
     case TSK_POOL_TYPE_UNSUPP:
       // All other pool types are unsupported
       tsk_error_reset();
diff --git a/tsk/pool/pool_types.cpp b/tsk/pool/pool_types.cpp
index f4d3b93ad9bec77de2db3e178c09b2da703db1a4..19bd8a5de037cb028123cc8a4efbe3cf20d85a80 100644
--- a/tsk/pool/pool_types.cpp
+++ b/tsk/pool/pool_types.cpp
@@ -22,6 +22,7 @@ struct POOL_TYPES {
 static const POOL_TYPES pool_type_table[]{
     {"auto", TSK_POOL_TYPE_DETECT, "auto-detect"},
     {"apfs", TSK_POOL_TYPE_APFS, "APFS container"},
+    {"lvm", TSK_POOL_TYPE_LVM, "Linux LVM volume group"},
 };
 
 /**
diff --git a/tsk/pool/tsk_lvm.hpp b/tsk/pool/tsk_lvm.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..3dceb646eae86f67d51fa71e17be12bbbfb98519
--- /dev/null
+++ b/tsk/pool/tsk_lvm.hpp
@@ -0,0 +1,51 @@
+/*
+ * The Sleuth Kit - Add on for Linux LVM support
+ *
+ * Copyright (c) 2022 Joachim Metz <joachim.metz@gmail.com>
+ *
+ * This software is distributed under the Common Public License 1.0
+ */
+
+#pragma once
+
+#include "tsk/base/tsk_base_i.h"
+
+#if HAVE_LIBVSLVM
+
+#include "tsk_pool.hpp"
+
+#include <libbfio.h>
+#include <libvslvm.h>
+
+class LVMPool;
+
+class LVMPool : public TSKPool {
+ protected:
+  TSK_IMG_INFO *_img;
+  // Start of the pool data within the image
+  TSK_OFF_T _offset;
+  libbfio_pool_t *_file_io_pool = NULL;
+  libvslvm_handle_t *_lvm_handle = NULL;
+  libvslvm_volume_group_t *_lvm_volume_group = NULL;
+
+ public:
+  LVMPool(std::vector<img_t> &&imgs);
+
+  // Moveable
+  LVMPool(LVMPool &&) = default;
+  LVMPool &operator=(LVMPool &&) = default;
+
+  // Not copyable because of TSK_IMG_INFO pointer
+  LVMPool(const LVMPool &) = delete;
+  LVMPool &operator=(const LVMPool &) = delete;
+
+  ~LVMPool();
+
+  std::string identifier;
+
+  ssize_t read(uint64_t address, char *buf, size_t buf_size) const
+      noexcept final;
+};
+
+#endif /* HAVE_LIBVSLVM */
+
diff --git a/tsk/pool/tsk_pool.h b/tsk/pool/tsk_pool.h
index 5c4bd0de2ce3cf422d8f22cf983b0e136f946ac5..e80c191be952d7c1f87b635b90321c2f783cb153 100644
--- a/tsk/pool/tsk_pool.h
+++ b/tsk/pool/tsk_pool.h
@@ -25,6 +25,7 @@ typedef struct TSK_FS_ATTR_RUN TSK_FS_ATTR_RUN;
 typedef enum {
   TSK_POOL_TYPE_DETECT = 0x0000,  ///< Use autodetection methods
   TSK_POOL_TYPE_APFS = 0x0001,    ///< APFS Pooled Volumes
+  TSK_POOL_TYPE_LVM = 0x0002,    ///< Linux LVM volume group
   TSK_POOL_TYPE_UNSUPP = 0xffff,  ///< Unsupported pool container type
 } TSK_POOL_TYPE_ENUM;
 
diff --git a/tsk/pool/tsk_pool.hpp b/tsk/pool/tsk_pool.hpp
index 26f30fbc959e30db988bf3d1e87a61eff4e0cbc4..7380e06779d66665a8437428401d66a01d0f3ef9 100644
--- a/tsk/pool/tsk_pool.hpp
+++ b/tsk/pool/tsk_pool.hpp
@@ -38,13 +38,13 @@ class TSKPool {
 
   virtual ~TSKPool() = default;
 
-  inline const Guid &uuid() const { return _uuid; }
+  inline const TSKGuid &uuid() const { return _uuid; }
 
   inline uint32_t block_size() const noexcept { return _block_size; }
   inline uint32_t dev_block_size() const noexcept { return _dev_block_size; }
   inline uint64_t num_blocks() const noexcept { return _num_blocks; }
   inline uint64_t first_img_offset() const noexcept {
-      if (_members.size() >= 1) {
+      if (!_members.empty()) {
           return _members[0].second;
       }
       return 0;
@@ -67,7 +67,7 @@ class TSKPool {
   TSKPool(std::vector<img_t> &&imgs) noexcept : _members{std::move(imgs)} {}
   
   std::vector<img_t> _members{};
-  Guid _uuid{};
+  TSKGuid _uuid{};
   uint64_t _num_blocks;
   int _num_vols;
   uint32_t _block_size{};
diff --git a/tsk/tsk.pc.in b/tsk/tsk.pc.in
new file mode 100644
index 0000000000000000000000000000000000000000..fb0f35fee70e5fd3eee637e713a875d91dac8e7e
--- /dev/null
+++ b/tsk/tsk.pc.in
@@ -0,0 +1,15 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+includedir=@includedir@
+libdir=@libdir@
+
+Name: @PACKAGE_NAME@
+Description: An open source forensic toolkit
+URL: http://www.sleuthkit.org/sleuthkit
+Version: @PACKAGE_VERSION@
+
+Cflags: -I${includedir}
+Libs: -L${libdir} -ltsk
+Libs.private: @PACKAGE_LIBS_PRIVATE@
+Requires: @AX_PACKAGE_REQUIRES@
+Requires.private: @AX_PACKAGE_REQUIRES_PRIVATE@ 
diff --git a/tsk/tsk_config.h.in b/tsk/tsk_config.h.in
index 337d9514372b9d963034011bf4dd7315b568c0d8..332929169fed4a305e54880eb760b9835da09dc1 100644
--- a/tsk/tsk_config.h.in
+++ b/tsk/tsk_config.h.in
@@ -1,21 +1,15 @@
 /* tsk/tsk_config.h.in.  Generated from configure.ac by autoheader.  */
 
-/* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP
-   systems. This function is required for `alloca.c' support on those systems.
-   */
-#undef CRAY_STACKSEG_END
-
-/* Define to 1 if using `alloca.c'. */
+/* Define to 1 if using 'alloca.c'. */
 #undef C_ALLOCA
 
 /* Define to 1 if you have the <afflib/afflib.h> header file. */
 #undef HAVE_AFFLIB_AFFLIB_H
 
-/* Define to 1 if you have `alloca', as a function or macro. */
+/* Define to 1 if you have 'alloca', as a function or macro. */
 #undef HAVE_ALLOCA
 
-/* Define to 1 if you have <alloca.h> and it should be used (not on Ultrix).
-   */
+/* Define to 1 if <alloca.h> works. */
 #undef HAVE_ALLOCA_H
 
 /* define if the compiler supports basic C++14 syntax */
@@ -54,6 +48,12 @@
 /* Define to 1 if you have the `afflib' library (-lafflib). */
 #undef HAVE_LIBAFFLIB
 
+/* Define to 1 if you have the `bfio' library (-lbfio). */
+#undef HAVE_LIBBFIO
+
+/* Define to 1 if you have the <libbfio.h> header file. */
+#undef HAVE_LIBBFIO_H
+
 /* Define to 1 if you have the `dl' library (-ldl). */
 #undef HAVE_LIBDL
 
@@ -63,9 +63,6 @@
 /* Define to 1 if you have the <libewf.h> header file. */
 #undef HAVE_LIBEWF_H
 
-/* Define if using opensll */
-#undef HAVE_LIBOPENSSL
-
 /* Define to 1 if you have the `sqlite3' library (-lsqlite3). */
 #undef HAVE_LIBSQLITE3
 
@@ -84,6 +81,12 @@
 /* Define to 1 if you have the <libvmdk.h> header file. */
 #undef HAVE_LIBVMDK_H
 
+/* Define to 1 if you have the `vslvm' library (-lvslvm). */
+#undef HAVE_LIBVSLVM
+
+/* Define to 1 if you have the <libvslvm.h> header file. */
+#undef HAVE_LIBVSLVM_H
+
 /* Define to 1 if you have the `z' library (-lz). */
 #undef HAVE_LIBZ
 
@@ -97,9 +100,6 @@
 /* Define to 1 if you have the <map> header file. */
 #undef HAVE_MAP
 
-/* Define to 1 if you have the <memory.h> header file. */
-#undef HAVE_MEMORY_H
-
 /* Define if you have POSIX threads libraries and header files. */
 #undef HAVE_PTHREAD
 
@@ -121,6 +121,9 @@
 /* Define to 1 if you have the <stdint.h> header file. */
 #undef HAVE_STDINT_H
 
+/* Define to 1 if you have the <stdio.h> header file. */
+#undef HAVE_STDIO_H
+
 /* Define to 1 if you have the <stdlib.h> header file. */
 #undef HAVE_STDLIB_H
 
@@ -239,17 +242,14 @@
 	STACK_DIRECTION = 0 => direction of growth unknown */
 #undef STACK_DIRECTION
 
-/* Define to 1 if you have the ANSI C header files. */
+/* Define to 1 if all of the C90 standard headers exist (not just the ones
+   required in a freestanding environment). This macro is provided for
+   backward compatibility; new code need not use it. */
 #undef STDC_HEADERS
 
 /* Version number of package */
 #undef VERSION
 
-/* Enable large inode numbers on Mac OS X 10.5.  */
-#ifndef _DARWIN_USE_64_BIT_INODE
-# define _DARWIN_USE_64_BIT_INODE 1
-#endif
-
 /* Number of bits in a file offset, on hosts where this is settable. */
 #undef _FILE_OFFSET_BITS
 
diff --git a/tsk/util/Makefile.am b/tsk/util/Makefile.am
index b425e042f575d00a8c89e4c59dfa9e8e8bfc7adb..e95d25f852f9649c645df77ea9dd8773ad52e0d6 100644
--- a/tsk/util/Makefile.am
+++ b/tsk/util/Makefile.am
@@ -3,7 +3,7 @@ AM_CXXFLAGS = -Wall -Wextra -Werror
 EXTRA_DIST = .indent.pro 
 
 noinst_LTLIBRARIES = libtskutil.la
-libtskutil_la_SOURCES = crypto.cpp detect_encryption.c
+libtskutil_la_SOURCES = crypto.cpp detect_encryption.c file_system_utils.c
 
 indent:
 	indent *.c *.cpp *.h *.hpp
diff --git a/tsk/util/detect_encryption.c b/tsk/util/detect_encryption.c
index 9e64cbcaac5996059ec05c7eb0a4dc18261d3e62..f50db1549da8f3eff0cb283fdd60b72008e2ca72 100644
--- a/tsk/util/detect_encryption.c
+++ b/tsk/util/detect_encryption.c
@@ -121,11 +121,11 @@ calculateEntropy(TSK_IMG_INFO * img_info, TSK_DADDR_T offset) {
             break;
         }
 
-        if (tsk_img_read(img_info, offset + i * bufLen, buf, bufLen) != bufLen) {
+        if (tsk_img_read(img_info, offset + i * bufLen, buf, bufLen) != (ssize_t) bufLen) {
             break;
         }
 
-        for (int j = 0; j < bufLen; j++) {
+        for (size_t j = 0; j < bufLen; j++) {
             unsigned char b = buf[j] & 0xff;
             byteCounts[b]++;
         }
@@ -176,7 +176,7 @@ detectVolumeEncryption(TSK_IMG_INFO * img_info, TSK_DADDR_T offset) {
     if (buf == NULL) {
         return result;
     }
-    if (tsk_img_read(img_info, offset, buf, len) != len) {
+    if (tsk_img_read(img_info, offset, buf, len) != (ssize_t)len) {
         free(buf);
         return result;
     }
@@ -250,7 +250,7 @@ detectDiskEncryption(TSK_IMG_INFO * img_info, TSK_DADDR_T offset) {
     if (buf == NULL) {
         return result;
     }
-    if (tsk_img_read(img_info, offset, buf, len) != len) {
+    if (tsk_img_read(img_info, offset, buf, len) != (ssize_t)len) {
         free(buf);
         return result;
     }
diff --git a/tsk/util/file_system_utils.c b/tsk/util/file_system_utils.c
new file mode 100644
index 0000000000000000000000000000000000000000..d417882cd3dcdb1d9ce0c3f60c4385254fea2408
--- /dev/null
+++ b/tsk/util/file_system_utils.c
@@ -0,0 +1,219 @@
+
+/*
+** The Sleuth Kit
+**
+** Copyright (c) 2022 Basis Technology Corp.  All rights reserved
+** Contact: Brian Carrier [carrier <at> sleuthkit [dot] org]
+**
+** This software is distributed under the Common Public License 1.0
+**
+*/
+
+/*
+ * Common code used by the raw and logical images.
+ */
+
+#include "tsk/base/tsk_base_i.h"
+#include "tsk/img/tsk_img_i.h"
+#include "file_system_utils.h"
+
+#ifdef __APPLE__
+#include <sys/disk.h>
+#endif
+
+#ifdef TSK_WIN32
+#include <winioctl.h>
+#else
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <fcntl.h>
+#endif
+
+#ifndef S_IFMT
+#define S_IFMT __S_IFMT
+#endif
+
+#ifndef S_IFDIR
+#define S_IFDIR __S_IFDIR
+#endif
+
+/**
+* Test if the image is a Windows device
+* @param The path to test
+*
+* Return 1 if the path represents a Windows device, 0 otherwise
+*/
+#ifdef TSK_WIN32
+int is_windows_device_path(const TSK_TCHAR * image_name) {
+	return (TSTRNCMP(image_name, _TSK_T("\\\\.\\"), 4) == 0);
+}
+#endif
+
+/**
+* Get the size in bytes of the given file.
+*
+* @param a_file The file to test
+* @param is_winobj 1 if the file is a windows object and not a real file
+*
+* @return the size in bytes, or -1 on error/unknown,
+*         -2 if unreadable, -3 if it's a directory.
+*/
+TSK_OFF_T
+get_size_of_file_on_disk(const TSK_TCHAR * a_file, uint8_t a_is_winobj)
+{
+	TSK_OFF_T size = -1;
+	struct STAT_STR sb;
+
+	if (TSTAT(a_file, &sb) < 0) {
+		if (a_is_winobj) {
+			/* stat can fail for Windows objects; ignore that */
+			if (tsk_verbose) {
+				tsk_fprintf(stderr,
+					"raw_open: ignoring stat result on Windows device %"
+					PRIttocTSK "\n", a_file);
+			}
+		}
+		else {
+			tsk_error_reset();
+			tsk_error_set_errno(TSK_ERR_IMG_STAT);
+			tsk_error_set_errstr("raw_open: image \"%" PRIttocTSK
+				"\" - %s", a_file, strerror(errno));
+			return -2;
+		}
+	}
+	else if ((sb.st_mode & S_IFMT) == S_IFDIR) {
+		tsk_error_reset();
+		tsk_error_set_errno(TSK_ERR_IMG_MAGIC);
+		tsk_error_set_errstr("raw_open: image \"%" PRIttocTSK
+			"\" - is a directory", a_file);
+		return -3;
+	}
+
+#ifdef TSK_WIN32
+	{
+		HANDLE fd;
+		DWORD dwHi, dwLo;
+
+		if ((fd = CreateFile(a_file, FILE_READ_DATA,
+			FILE_SHARE_READ | FILE_SHARE_WRITE, NULL,
+			OPEN_EXISTING, 0, NULL)) ==
+			INVALID_HANDLE_VALUE) {
+			int lastError = (int)GetLastError();
+			tsk_error_reset();
+			tsk_error_set_errno(TSK_ERR_IMG_OPEN);
+			// print string of commonly found errors
+			if (lastError == ERROR_ACCESS_DENIED) {
+				tsk_error_set_errstr("raw_open: file \"%" PRIttocTSK
+					"\" - access denied", a_file);
+			}
+			else if (lastError == ERROR_SHARING_VIOLATION) {
+				tsk_error_set_errstr("raw_open: file \"%" PRIttocTSK
+					"\" - sharing violation", a_file);
+			}
+			else if (lastError == ERROR_FILE_NOT_FOUND) {
+				tsk_error_set_errstr("raw_open: file \"%" PRIttocTSK
+					"\" - file not found", a_file);
+			}
+			else {
+				tsk_error_set_errstr("raw_open: file \"%" PRIttocTSK
+					"\" - (error %d)", a_file, lastError);
+			}
+			return -2;
+		}
+
+		/* We need different techniques to determine the size of Windows physical
+		* devices versus normal files */
+		if (a_is_winobj == 0) {
+			dwLo = GetFileSize(fd, &dwHi);
+			if (dwLo == 0xffffffff) {
+				int lastError = (int)GetLastError();
+				tsk_error_reset();
+				tsk_error_set_errno(TSK_ERR_IMG_OPEN);
+				tsk_error_set_errstr("raw_open: file \"%" PRIttocTSK
+					"\" - GetFileSize: %d", a_file, lastError);
+				size = -1;
+			}
+			else {
+				size = dwLo | ((TSK_OFF_T)dwHi << 32);
+			}
+		}
+		else {
+
+			//use GET_PARTITION_INFO_EX prior to IOCTL_DISK_GET_DRIVE_GEOMETRY
+			// to determine the physical disk size because
+			//calculating it with the help of GET_DRIVE_GEOMETRY gives only
+			// approximate number
+			DWORD junk;
+
+			PARTITION_INFORMATION_EX partition;
+			if (FALSE == DeviceIoControl(fd,
+				IOCTL_DISK_GET_PARTITION_INFO_EX,
+				NULL, 0, &partition, sizeof(partition), &junk,
+				(LPOVERLAPPED)NULL)) {
+				DISK_GEOMETRY pdg;
+
+				if (FALSE == DeviceIoControl(fd, IOCTL_DISK_GET_DRIVE_GEOMETRY,
+					NULL, 0, &pdg, sizeof(pdg), &junk, (LPOVERLAPPED)NULL)) {
+					int lastError = (int)GetLastError();
+					tsk_error_reset();
+					tsk_error_set_errno(TSK_ERR_IMG_OPEN);
+					tsk_error_set_errstr("raw_open: file \"%" PRIttocTSK
+						"\" - DeviceIoControl: %d", a_file,
+						lastError);
+					size = -1;
+				}
+				else {
+					size = pdg.Cylinders.QuadPart *
+						(TSK_OFF_T)pdg.TracksPerCylinder *
+						(TSK_OFF_T)pdg.SectorsPerTrack *
+						(TSK_OFF_T)pdg.BytesPerSector;
+				}
+			}
+			else {
+				size = partition.PartitionLength.QuadPart;
+			}
+		}
+
+		CloseHandle(fd);
+	}
+#else
+
+	int fd;
+
+	if ((fd = open(a_file, O_RDONLY | O_BINARY)) < 0) {
+		tsk_error_reset();
+		tsk_error_set_errno(TSK_ERR_IMG_OPEN);
+		tsk_error_set_errstr("raw_open: file \"%" PRIttocTSK "\" - %s",
+			a_file, strerror(errno));
+		return -2;
+	}
+
+#ifdef __APPLE__
+	/* OS X doesn't support SEEK_END on char devices */
+	if ((sb.st_mode & S_IFMT) != S_IFCHR) {
+		size = lseek(fd, 0, SEEK_END);
+	}
+
+	if (size <= 0) {
+		int blkSize;
+		long long blkCnt;
+
+		if (ioctl(fd, DKIOCGETBLOCKSIZE, &blkSize) >= 0) {
+			if (ioctl(fd, DKIOCGETBLOCKCOUNT, &blkCnt) >= 0) {
+				size = blkCnt * (long long)blkSize;
+			}
+		}
+	}
+#else
+	/* We don't use the stat output because it doesn't work on raw
+	* devices and such */
+	size = lseek(fd, 0, SEEK_END);
+#endif
+
+	close(fd);
+
+#endif
+
+	return size;
+}
\ No newline at end of file
diff --git a/tsk/util/file_system_utils.h b/tsk/util/file_system_utils.h
new file mode 100644
index 0000000000000000000000000000000000000000..c3fa619978dde16af1e3e5e3bb033e323955b66b
--- /dev/null
+++ b/tsk/util/file_system_utils.h
@@ -0,0 +1,20 @@
+/*
+** The Sleuth Kit
+**
+** Copyright (c) 2022 Basis Technology Corp.  All rights reserved
+** Contact: Brian Carrier [carrier <at> sleuthkit [dot] org]
+**
+** This software is distributed under the Common Public License 1.0
+**
+*/
+
+#ifndef _FILE_SYSTEM_UTILS_H_
+#define _FILE_SYSTEM_UTILS_H_
+
+#ifdef TSK_WIN32
+extern int is_windows_device_path(const TSK_TCHAR * image_name);
+#endif
+
+extern TSK_OFF_T get_size_of_file_on_disk(const TSK_TCHAR * a_file, uint8_t a_is_winobj);
+
+#endif
\ No newline at end of file
diff --git a/tsk/vs/mm_open.c b/tsk/vs/mm_open.c
index 6ac5a6ce8daf774fe24128ea6e9173ab9916fb83..c8c415bcffe8f17894b8b1bf0dd884510769ada6 100644
--- a/tsk/vs/mm_open.c
+++ b/tsk/vs/mm_open.c
@@ -43,6 +43,13 @@ tsk_vs_open(TSK_IMG_INFO * img_info, TSK_DADDR_T offset,
         return NULL;
     }
 
+	if (img_info->itype == TSK_IMG_TYPE_LOGICAL) {
+		tsk_error_reset();
+		tsk_error_set_errno(TSK_ERR_VS_UNSUPTYPE);
+		tsk_error_set_errstr("Logical image type can not have a volume system");
+		return NULL;
+	}
+
     /* Autodetect mode 
      * We need to try all of them in case there are multiple 
      * installations
@@ -51,31 +58,32 @@ tsk_vs_open(TSK_IMG_INFO * img_info, TSK_DADDR_T offset,
      * will not be reported
      */
     if (type == TSK_VS_TYPE_DETECT) {
-        TSK_VS_INFO *vs, *vs_set = NULL;
-        char *set = NULL;
+        TSK_VS_INFO *vs, *prev_vs = NULL;
+        char *prev_type = NULL;
 
         if ((vs = tsk_vs_dos_open(img_info, offset, 1)) != NULL) {
-            set = "DOS";
-            vs_set = vs;
+            prev_type = "DOS";
+            prev_vs = vs;
         }
         else {
             tsk_error_reset();
         }
+        
         if ((vs = tsk_vs_bsd_open(img_info, offset)) != NULL) {
-            // if (set == NULL) {
+            // if (prev_type == NULL) {
             // In this case, BSD takes priority because BSD partitions start off with
             // the DOS magic value in the first sector with the boot code.
-            set = "BSD";
-            vs_set = vs;
+            prev_type = "BSD";
+            prev_vs = vs;
             /*
                }
                else {
-               vs_set->close(vs_set);
+               prev_vs->close(prev_vs);
                vs->close(vs);
                tsk_error_reset();
                tsk_error_set_errno(TSK_ERR_VS_UNKTYPE);
                tsk_error_set_errstr(
-               "BSD or %s at %" PRIuDADDR, set, offset);
+               "BSD or %s at %" PRIuDADDR, prev_type, offset);
                tsk_errstr2[0] = '\0';
                return NULL;
                }
@@ -84,24 +92,26 @@ tsk_vs_open(TSK_IMG_INFO * img_info, TSK_DADDR_T offset,
         else {
             tsk_error_reset();
         }
+        
         if ((vs = tsk_vs_gpt_open(img_info, offset)) != NULL) {
 
-            if ((set != NULL) && (strcmp(set, "DOS") == 0) && (vs->is_backup)) {
+            if ((prev_type != NULL) && (strcmp(prev_type, "DOS") == 0) && (vs->is_backup)) {
                 /* In this case we've found a DOS partition and a backup GPT partition.
-                 * The DOS partition takes priority in this case (and are already in set and vs_set) */
+                 * The DOS partition takes priority in this case (and are already in prev_type and prev_vs) */
                 vs->close(vs);
                 if (tsk_verbose)
                     tsk_fprintf(stderr,
                         "mm_open: Ignoring secondary GPT Partition\n");
             }
             else {
-                if (set != NULL) {
+                if (prev_type != NULL) {
 
                     /* GPT drives have a DOS Safety partition table.
-                     * Test to see if we can ignore one */
-                    if (strcmp(set, "DOS") == 0) {
+                     * Test to see if the GPT has a safety partiiton
+                     * and then we can igore the DOS */
+                    if (strcmp(prev_type, "DOS") == 0) {
                         TSK_VS_PART_INFO *tmp_set;
-                        for (tmp_set = vs_set->part_list; tmp_set;
+                        for (tmp_set = prev_vs->part_list; tmp_set;
                             tmp_set = tmp_set->next) {
                             if ((tmp_set->desc)
                                 && (strncmp(tmp_set->desc, "GPT Safety",
@@ -111,25 +121,27 @@ tsk_vs_open(TSK_IMG_INFO * img_info, TSK_DADDR_T offset,
                                 if (tsk_verbose)
                                     tsk_fprintf(stderr,
                                         "mm_open: Ignoring DOS Safety GPT Partition\n");
-                                set = NULL;
-                                vs_set = NULL;
+                                prev_type = NULL;
+                                prev_vs->close(prev_vs);
+                                prev_vs = NULL;
                                 break;
                             }
                         }
                     }
 
-                    if (set != NULL) {
-                        vs_set->close(vs_set);
+                    /* If we never found the safety, then we have a conflict. */
+                    if (prev_type != NULL) {
+                        prev_vs->close(prev_vs);
                         vs->close(vs);
                         tsk_error_reset();
-                        tsk_error_set_errno(TSK_ERR_VS_UNKTYPE);
-                        tsk_error_set_errstr("GPT or %s at %" PRIuDADDR, set,
+                        tsk_error_set_errno(TSK_ERR_VS_MULTTYPE);
+                        tsk_error_set_errstr("GPT or %s at %" PRIuDADDR, prev_type,
                             offset);
                         return NULL;
                     }
                 }
-                set = "GPT";
-                vs_set = vs;
+                prev_type = "GPT";
+                prev_vs = vs;
             }
         }
         else {
@@ -137,16 +149,16 @@ tsk_vs_open(TSK_IMG_INFO * img_info, TSK_DADDR_T offset,
         }
 
         if ((vs = tsk_vs_sun_open(img_info, offset)) != NULL) {
-            if (set == NULL) {
-                set = "Sun";
-                vs_set = vs;
+            if (prev_type == NULL) {
+                prev_type = "Sun";
+                prev_vs = vs;
             }
             else {
-                vs_set->close(vs_set);
+                prev_vs->close(prev_vs);
                 vs->close(vs);
                 tsk_error_reset();
-                tsk_error_set_errno(TSK_ERR_VS_UNKTYPE);
-                tsk_error_set_errstr("Sun or %s at %" PRIuDADDR, set,
+                tsk_error_set_errno(TSK_ERR_VS_MULTTYPE);
+                tsk_error_set_errstr("Sun or %s at %" PRIuDADDR, prev_type,
                     offset);
                 return NULL;
             }
@@ -156,16 +168,16 @@ tsk_vs_open(TSK_IMG_INFO * img_info, TSK_DADDR_T offset,
         }
 
         if ((vs = tsk_vs_mac_open(img_info, offset)) != NULL) {
-            if (set == NULL) {
-                set = "Mac";
-                vs_set = vs;
+            if (prev_type == NULL) {
+                prev_type = "Mac";
+                prev_vs = vs;
             }
             else {
-                vs_set->close(vs_set);
+                prev_vs->close(prev_vs);
                 vs->close(vs);
                 tsk_error_reset();
-                tsk_error_set_errno(TSK_ERR_VS_UNKTYPE);
-                tsk_error_set_errstr("Mac or %s at %" PRIuDADDR, set,
+                tsk_error_set_errno(TSK_ERR_VS_MULTTYPE);
+                tsk_error_set_errstr("Mac or %s at %" PRIuDADDR, prev_type,
                     offset);
                 return NULL;
             }
@@ -174,7 +186,7 @@ tsk_vs_open(TSK_IMG_INFO * img_info, TSK_DADDR_T offset,
             tsk_error_reset();
         }
 
-        if (vs_set == NULL) {
+        if (prev_vs == NULL) {
             tsk_error_reset();
 
             // Check whether the volume system appears to be encrypted.
@@ -184,7 +196,7 @@ tsk_vs_open(TSK_IMG_INFO * img_info, TSK_DADDR_T offset,
             if (result != NULL) {
                 if (result->encryptionType == ENCRYPTION_DETECTED_SIGNATURE) {
                     tsk_error_set_errno(TSK_ERR_VS_ENCRYPTED);
-                    tsk_error_set_errstr(result->desc);
+                    tsk_error_set_errstr("%s", result->desc);
                 }
                 free(result);
                 result = NULL;
@@ -195,8 +207,10 @@ tsk_vs_open(TSK_IMG_INFO * img_info, TSK_DADDR_T offset,
             return NULL;
         }
 
-        return vs_set;
+        return prev_vs;
     }
+    
+    // Not autodetect
     else {
 
         switch (type) {
diff --git a/tsk/vs/mm_part.c b/tsk/vs/mm_part.c
index 911fa049a650f0598b08852eefb2ab042c07ca6b..6dc6b5325b01e69a48f76df8cb4df4e5a698ed54 100644
--- a/tsk/vs/mm_part.c
+++ b/tsk/vs/mm_part.c
@@ -186,8 +186,6 @@ tsk_vs_part_free(TSK_VS_INFO * a_vs)
         part = part2;
     }
     a_vs->part_list = NULL;
-
-    return;
 }
 
 /**
diff --git a/win32/libtsk/libtsk.vcxproj b/win32/libtsk/libtsk.vcxproj
index 55d2da31ea84d05f8d390a971d181887900de517..b730f1c393c17e7ca3620286aa2d6959b3427bdd 100755
--- a/win32/libtsk/libtsk.vcxproj
+++ b/win32/libtsk/libtsk.vcxproj
@@ -310,6 +310,7 @@ xcopy /E /Y "$(VCInstallDir)\redist\$(PlatformTarget)\Microsoft.VC140.CRT" "$(Ou
     <ClCompile Include="..\..\tsk\pool\pool_types.cpp" />
     <ClCompile Include="..\..\tsk\util\crypto.cpp" />
     <ClCompile Include="..\..\tsk\util\detect_encryption.c" />
+	<ClCompile Include="..\..\tsk\util\file_system_utils.c" />
     <ClCompile Include="..\..\tsk\vs\bsd.c" />
     <ClCompile Include="..\..\tsk\vs\dos.c" />
     <ClCompile Include="..\..\tsk\vs\gpt.c" />
@@ -363,6 +364,7 @@ xcopy /E /Y "$(VCInstallDir)\redist\$(PlatformTarget)\Microsoft.VC140.CRT" "$(Ou
     <ClCompile Include="..\..\tsk\fs\unix_misc.c" />
     <ClCompile Include="..\..\tsk\fs\walk_cpp.cpp" />
     <ClCompile Include="..\..\tsk\fs\yaffs.cpp" />
+    <ClCompile Include="..\..\tsk\fs\logical_fs.cpp" />
     <ClCompile Include="..\..\tsk\auto\auto.cpp" />
     <ClCompile Include="..\..\tsk\auto\auto_db.cpp" />
     <ClCompile Include="..\..\tsk\auto\case_db.cpp" />
@@ -396,6 +398,7 @@ xcopy /E /Y "$(VCInstallDir)\redist\$(PlatformTarget)\Microsoft.VC140.CRT" "$(Ou
     <ClCompile Include="..\..\tsk\img\img_types.c" />
     <ClCompile Include="..\..\tsk\img\mult_files.c" />
     <ClCompile Include="..\..\tsk\img\raw.c" />
+    <ClCompile Include="..\..\tsk\img\logical_img.c" />
   </ItemGroup>
   <ItemGroup>
     <ClInclude Include="..\..\tsk\auto\guid.h" />
@@ -424,6 +427,7 @@ xcopy /E /Y "$(VCInstallDir)\redist\$(PlatformTarget)\Microsoft.VC140.CRT" "$(Ou
     <ClInclude Include="..\..\tsk\pool\tsk_pool.hpp" />
     <ClInclude Include="..\..\tsk\util\crypto.hpp" />
     <ClInclude Include="..\..\tsk\util\detect_encryption.h" />
+    <ClInclude Include="..\..\tsk\util\file_system_utils.h" />
     <ClInclude Include="..\..\tsk\util\lw_shared_ptr.hpp" />
     <ClInclude Include="..\..\tsk\util\span.hpp" />
     <ClInclude Include="..\..\tsk\vs\tsk_bsd.h" />
@@ -442,6 +446,7 @@ xcopy /E /Y "$(VCInstallDir)\redist\$(PlatformTarget)\Microsoft.VC140.CRT" "$(Ou
     <ClInclude Include="..\..\tsk\fs\tsk_iso9660.h" />
     <ClInclude Include="..\..\tsk\fs\tsk_ntfs.h" />
     <ClInclude Include="..\..\tsk\fs\tsk_yaffs.h" />
+    <ClInclude Include="..\..\tsk\fs\tsk_logical_fs.h" />
     <ClInclude Include="..\..\tsk\auto\sqlite3.h" />
     <ClInclude Include="..\..\tsk\auto\tsk_auto.h" />
     <ClInclude Include="..\..\tsk\auto\tsk_auto_i.h" />
@@ -455,6 +460,7 @@ xcopy /E /Y "$(VCInstallDir)\redist\$(PlatformTarget)\Microsoft.VC140.CRT" "$(Ou
     <ClInclude Include="..\..\tsk\img\aff.h" />
     <ClInclude Include="..\..\tsk\img\ewf.h" />
     <ClInclude Include="..\..\tsk\img\raw.h" />
+    <ClInclude Include="..\..\tsk\img\logical_img.h" />
     <ClInclude Include="..\..\tsk\img\tsk_img.h" />
     <ClInclude Include="..\..\tsk\img\tsk_img_i.h" />
   </ItemGroup>
diff --git a/win32/libtsk/libtsk.vcxproj.filters b/win32/libtsk/libtsk.vcxproj.filters
index 1d6e4b3677bfbae933a78575cbfea5ef1a5991db..98cae96115f2417cbd0ad15f05c6d9db8bc03348 100755
--- a/win32/libtsk/libtsk.vcxproj.filters
+++ b/win32/libtsk/libtsk.vcxproj.filters
@@ -243,6 +243,9 @@
     <ClCompile Include="..\..\tsk\img\raw.c">
       <Filter>img</Filter>
     </ClCompile>
+    <ClCompile Include="..\..\tsk\img\logical_img.c">
+      <Filter>img</Filter>
+    </ClCompile>
     <ClCompile Include="..\..\tsk\fs\fatfs_dent.cpp">
       <Filter>fs</Filter>
     </ClCompile>
@@ -282,6 +285,9 @@
     <ClCompile Include="..\..\tsk\fs\yaffs.cpp">
       <Filter>fs</Filter>
     </ClCompile>
+    <ClCompile Include="..\..\tsk\fs\logical_fs.cpp">
+      <Filter>fs</Filter>
+    </ClCompile>
     <ClCompile Include="..\..\tsk\fs\exfatfs_dent.c">
       <Filter>fs</Filter>
     </ClCompile>
@@ -366,6 +372,9 @@
     <ClCompile Include="..\..\tsk\util\detect_encryption.c">
       <Filter>util</Filter>
     </ClCompile>
+    <ClCompile Include="..\..\tsk\util\file_system_utils.c">
+      <Filter>util</Filter>
+    </ClCompile>
     <ClCompile Include="..\..\tsk\img\unsupported_types.c">
       <Filter>img</Filter>
     </ClCompile>
@@ -455,6 +464,9 @@
     <ClInclude Include="..\..\tsk\img\raw.h">
       <Filter>img</Filter>
     </ClInclude>
+    <ClInclude Include="..\..\tsk\img\logical_img.h">
+      <Filter>img</Filter>
+    </ClInclude>
     <ClInclude Include="..\..\tsk\img\tsk_img.h">
       <Filter>img</Filter>
     </ClInclude>
@@ -470,6 +482,9 @@
     <ClInclude Include="..\..\tsk\fs\tsk_yaffs.h">
       <Filter>fs</Filter>
     </ClInclude>
+    <ClInclude Include="..\..\tsk\fs\tsk_logical_fs.h">
+      <Filter>fs</Filter>
+    </ClInclude>
     <ClInclude Include="..\..\tsk\hashdb\tsk_hash_info.h">
       <Filter>hash</Filter>
     </ClInclude>
@@ -543,6 +558,9 @@
     <ClInclude Include="..\..\tsk\util\detect_encryption.h">
       <Filter>util</Filter>
     </ClInclude>
+    <ClInclude Include="..\..\tsk\util\file_system_utils.h">
+      <Filter>util</Filter>
+    </ClInclude>
     <ClInclude Include="..\..\tsk\img\unsupported_types.h">
       <Filter>img</Filter>
     </ClInclude>