diff --git a/NEWS.txt b/NEWS.txt
index 332e57dfa118522ef8606f34458d19409f50e1e1..1f349eb6481501b85979a11d7bf388ca6e0dbff7 100644
--- a/NEWS.txt
+++ b/NEWS.txt
@@ -1,10 +1,10 @@
 ---------------- VERSION 4.10.1 --------------
-
-NEED TO FINISH
-
 C/C++:
-- Changed Windows build to use Nuget for libewf, libvmdk, libvhdi
-- Fixed compiler warnings and updated license files
+- Changed Windows build to use Nuget for libewf, libvmdk, libvhdi.
+- Fixed compiler warnings 
+- Clarrified licenses and added Apache license to distribution
+- Improved error handling for out of memory issues
+- Rejistry++ memory leak fixes
 
 Java:
 - Localized for Japanese
diff --git a/bindings/java/build.xml b/bindings/java/build.xml
index e0abb64d1a8256af7cb722c41957c231a170f577..98adf33507ab8d5ca7729f47de8db4b4a31a6edc 100644
--- a/bindings/java/build.xml
+++ b/bindings/java/build.xml
@@ -121,6 +121,7 @@
 	<target name="compile-test" depends="compile" description="compile the tests">
 		<javac debug="on" srcdir="${test}" destdir="${build}" includeantruntime="false">
 			<classpath refid="libraries"/>
+			<compilerarg value="-Xlint" />
 		</javac>
 	</target>
 
diff --git a/bindings/java/src/org/sleuthkit/datamodel/CommunicationsManager.java b/bindings/java/src/org/sleuthkit/datamodel/CommunicationsManager.java
index 4fefb10606bb1f8cb59694f35222527d843212a3..5b1aaa05fd99b552781a4e35e92474933ba5cf7d 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/CommunicationsManager.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/CommunicationsManager.java
@@ -18,6 +18,7 @@
  */
 package org.sleuthkit.datamodel;
 
+import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
@@ -35,6 +36,7 @@
 import java.util.logging.Logger;
 import org.sleuthkit.datamodel.Blackboard.BlackboardException;
 import org.sleuthkit.datamodel.SleuthkitCase.CaseDbConnection;
+import org.sleuthkit.datamodel.SleuthkitCase.CaseDbTransaction;
 import static org.sleuthkit.datamodel.SleuthkitCase.closeResultSet;
 import static org.sleuthkit.datamodel.SleuthkitCase.closeStatement;
 
@@ -364,7 +366,7 @@ public void addRelationships(AccountFileInstance sender, List<AccountFileInstanc
 		 * correctly.
 		 */
 		// Currently we do not save the direction of communication
-		List<Long> accountIDs = new ArrayList<Long>();
+		List<Long> accountIDs = new ArrayList<>();
 
 		if (null != sender) {
 			accountIDs.add(sender.getAccount().getAccountID());
@@ -381,17 +383,50 @@ public void addRelationships(AccountFileInstance sender, List<AccountFileInstanc
 						+ "Recipient source ID" + recipient.getDataSourceObjectID() + " != relationship source ID" + sourceArtifact.getDataSourceObjectID());
 			}
 		}
+		
+		// Set up the query for the prepared statement
+		String query = "INTO account_relationships (account1_id, account2_id, relationship_source_obj_id, date_time, relationship_type, data_source_obj_id  ) "
+						+ "VALUES (?,?,?,?,?,?)";
+		switch (db.getDatabaseType()) {
+			case POSTGRESQL:
+				query = "INSERT " + query + " ON CONFLICT DO NOTHING";
+				break;
+			case SQLITE:
+				query = "INSERT OR IGNORE " + query;
+				break;
+			default:
+				throw new TskCoreException("Unknown DB Type: " + db.getDatabaseType().name());
+		}		
+		
+		CaseDbTransaction trans = db.beginTransaction();	
+		try {
+			SleuthkitCase.CaseDbConnection connection = trans.getConnection();
+			PreparedStatement preparedStatement = connection.getPreparedStatement(query, Statement.NO_GENERATED_KEYS);
+			
+			for (int i = 0; i < accountIDs.size(); i++) {
+				for (int j = i + 1; j < accountIDs.size(); j++) {
+					long account1_id = accountIDs.get(i);
+					long account2_id = accountIDs.get(j);
+
+					preparedStatement.clearParameters();
+					preparedStatement.setLong(1, account1_id);
+					preparedStatement.setLong(2, account2_id);
+					preparedStatement.setLong(3, sourceArtifact.getId());
+					if (dateTime > 0) {
+						preparedStatement.setLong(4, dateTime);
+					} else {
+						preparedStatement.setNull(4, java.sql.Types.BIGINT);
+					}
+					preparedStatement.setInt(5, relationshipType.getTypeID());
+					preparedStatement.setLong(6, sourceArtifact.getDataSourceObjectID());
 
-		for (int i = 0; i < accountIDs.size(); i++) {
-			for (int j = i + 1; j < accountIDs.size(); j++) {
-				try {
-					addAccountsRelationship(accountIDs.get(i), accountIDs.get(j),
-							sourceArtifact, relationshipType, dateTime);
-				} catch (TskCoreException ex) {
-					// @@@ This should probably not be caught and instead we stop adding
-					LOGGER.log(Level.WARNING, "Error adding relationship", ex); //NON-NLS
+					connection.executeUpdate(preparedStatement);
 				}
 			}
+			trans.commit();
+		} catch (SQLException ex) {
+			trans.rollback();
+			throw new TskCoreException("Error adding accounts relationship", ex);
 		}
 	}
 
@@ -586,55 +621,6 @@ public org.sleuthkit.datamodel.Account.Type getAccountType(String accountTypeNam
 		}
 	}
 
-	/**
-	 * Add a row in account relationships table.
-	 *
-	 * @param account1_id           account_id for account1
-	 * @param account2_id           account_id for account2
-	 * @param relationshipaArtifact relationship artifact
-	 * @param relationshipType      The type of relationship to be created
-	 * @param dateTime              datetime of communication/relationship as
-	 *                              epoch seconds
-	 *
-	 * @throws TskCoreException exception thrown if a critical error occurs
-	 *                          within TSK core
-	 */
-	private void addAccountsRelationship(long account1_id, long account2_id, BlackboardArtifact relationshipaArtifact, Relationship.Type relationshipType, long dateTime) throws TskCoreException {
-		CaseDbConnection connection = db.getConnection();
-		db.acquireSingleUserCaseWriteLock();
-		Statement s = null;
-		ResultSet rs = null;
-
-		try {
-			String dateTimeValStr = (dateTime > 0) ? Long.toString(dateTime) : "NULL";
-
-			connection.beginTransaction();
-			s = connection.createStatement();
-			String query = "INTO account_relationships (account1_id, account2_id, relationship_source_obj_id, date_time, relationship_type, data_source_obj_id  ) "
-					+ "VALUES ( " + account1_id + ", " + account2_id + ", " + relationshipaArtifact.getId() + ", " + dateTimeValStr + ", " + relationshipType.getTypeID() + ", " + relationshipaArtifact.getDataSourceObjectID() + ")";
-			switch (db.getDatabaseType()) {
-				case POSTGRESQL:
-					query = "INSERT " + query + " ON CONFLICT DO NOTHING";
-					break;
-				case SQLITE:
-					query = "INSERT OR IGNORE " + query;
-					break;
-				default:
-					throw new TskCoreException("Unknown DB Type: " + db.getDatabaseType().name());
-			}
-			s.execute(query); //NON-NLS
-			connection.commitTransaction();
-		} catch (SQLException ex) {
-			connection.rollbackTransaction();
-			throw new TskCoreException("Error adding accounts relationship", ex);
-		} finally {
-			closeResultSet(rs);
-			closeStatement(s);
-			connection.close();
-			db.releaseSingleUserCaseWriteLock();
-		}
-	}
-
 	/**
 	 * Returns a list of AccountDeviceInstances that have at least one
 	 * relationship that meets the criteria listed in the filters.
diff --git a/bindings/java/src/org/sleuthkit/datamodel/EncodedFileOutputStream.java b/bindings/java/src/org/sleuthkit/datamodel/EncodedFileOutputStream.java
index f181b936f0dcf888cb92ede4d87c201aa03b522e..7c0183e1dd8682bee1eee1d9a2960b7f5014ad2f 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/EncodedFileOutputStream.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/EncodedFileOutputStream.java
@@ -30,7 +30,8 @@
  */
 public class EncodedFileOutputStream extends BufferedOutputStream {
 
-	private TskData.EncodingType type;
+	private final TskData.EncodingType type;
+	private long encodedDataLength;
 
 	/**
 	 * Create an encoded output stream using the specified encoding.
@@ -43,6 +44,7 @@ public class EncodedFileOutputStream extends BufferedOutputStream {
 	public EncodedFileOutputStream(OutputStream out, TskData.EncodingType type) throws IOException {
 		super(out);
 		this.type = type;
+		encodedDataLength = 0;
 		writeHeader();
 	}
 
@@ -65,11 +67,13 @@ public EncodedFileOutputStream(OutputStream out, int size, TskData.EncodingType
 	private void writeHeader() throws IOException {
 		// We get the encoded header here so it will be in plaintext after encoding
 		write(EncodedFileUtil.getEncodedHeader(type), 0, EncodedFileUtil.getHeaderLength());
+		encodedDataLength -= EncodedFileUtil.getHeaderLength();
 	}
 
 	@Override
 	public void write(int b) throws IOException {
 		super.write((int) EncodedFileUtil.encodeByte((byte) b, type));
+		encodedDataLength++;
 	}
 
 	@Override
@@ -83,5 +87,17 @@ public void write(byte[] b,
 		}
 
 		super.write(encodedData, off, len);
+		encodedDataLength += len;
 	}
-}
+	
+	/**
+	 * Get the number of bytes written to the file, excluding header bytes.
+	 * This is needed for storing the original length of the file in the
+	 * tsk_files table in cases where we don't know the size in advance.
+	 * 
+	 * @return the number of bytes written to the stream, excluding the header.
+	 */
+	public long getBytesWritten() {
+		return encodedDataLength;
+	} 
+}
\ No newline at end of file
diff --git a/bindings/java/src/org/sleuthkit/datamodel/HashUtility.java b/bindings/java/src/org/sleuthkit/datamodel/HashUtility.java
index d8618b0ee606a08b9875d2a7f0e06dba818f62b9..4ce6e192eea18ad4fcb1b32c5ed5ff4056071073 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/HashUtility.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/HashUtility.java
@@ -68,6 +68,11 @@ static public List<HashResult> calculateHashes(Content content, Collection<HashT
 			} catch (TskCoreException ex) {
 				throw new TskCoreException("Error reading data at address " + i * BUFFER_SIZE + " from content with ID: " + content.getId(), ex);
 			}
+			
+			// Check for EOF
+			if (read == -1) {
+				break;
+			}
 
 			// Only update with the read bytes.
 			if (read == BUFFER_SIZE) {
@@ -228,4 +233,4 @@ static public String calculateMd5Hash(Content content) throws IOException {
 			throw new IOException(ex);
 		}
 	}	
-}
+}
\ No newline at end of file
diff --git a/bindings/java/src/org/sleuthkit/datamodel/TimelineFilter.java b/bindings/java/src/org/sleuthkit/datamodel/TimelineFilter.java
index 85e2ed066716a9dbfa139091b4b35c74179c1481..e32295740ddd1ff2560f137671305b4bd9056625 100644
--- a/bindings/java/src/org/sleuthkit/datamodel/TimelineFilter.java
+++ b/bindings/java/src/org/sleuthkit/datamodel/TimelineFilter.java
@@ -655,7 +655,8 @@ public boolean equals(Object obj) {
 			if (notEqual(this.fileTypesFilter, other.getFileTypesFilter())) {
 				return false;
 			}
-			return Objects.equals(this.additionalFilters, other.getSubFilters());
+
+			return Objects.equals(this.additionalFilters, new HashSet<>(other.getSubFilters()));
 		}
 
 	}
diff --git a/bindings/java/test/org/sleuthkit/datamodel/DataModelTestSuite.java b/bindings/java/test/org/sleuthkit/datamodel/DataModelTestSuite.java
index 82c9593f9c251b62a799d19c685142356988ae98..0c22fbd617a741331104c709b3e782f44893c71d 100644
--- a/bindings/java/test/org/sleuthkit/datamodel/DataModelTestSuite.java
+++ b/bindings/java/test/org/sleuthkit/datamodel/DataModelTestSuite.java
@@ -41,7 +41,18 @@
  * default ant target sets properties for the various folders.
  */
 @RunWith(Suite.class)
-@Suite.SuiteClasses({ CommunicationsManagerTest.class, CaseDbSchemaVersionNumberTest.class,org.sleuthkit.datamodel.TopDownTraversal.class, org.sleuthkit.datamodel.SequentialTraversal.class, org.sleuthkit.datamodel.CrossCompare.class, org.sleuthkit.datamodel.BottomUpTest.class, org.sleuthkit.datamodel.CPPtoJavaCompare.class, org.sleuthkit.datamodel.HashDbTest.class})
+@Suite.SuiteClasses({ 
+	CommunicationsManagerTest.class, 
+	CaseDbSchemaVersionNumberTest.class,
+
+//  Note: these tests have dependencies on images being placed in the input folder: nps-2009-canon2-gen6, ntfs1-gen, and small2	
+//	org.sleuthkit.datamodel.TopDownTraversal.class, 
+//	org.sleuthkit.datamodel.SequentialTraversal.class, 
+//	org.sleuthkit.datamodel.CrossCompare.class, 
+//	org.sleuthkit.datamodel.BottomUpTest.class, 
+//	org.sleuthkit.datamodel.CPPtoJavaCompare.class, 
+//	org.sleuthkit.datamodel.HashDbTest.class
+})
 public class DataModelTestSuite {
 
 	static final String TEST_IMAGE_DIR_NAME = "test" + java.io.File.separator + "Input";
diff --git a/bindings/java/test/org/sleuthkit/datamodel/PublicTagName.java b/bindings/java/test/org/sleuthkit/datamodel/PublicTagName.java
index c14d0258ea941cca85e996fdb41f549514673d65..9a777aedd702246d7aa92515389927681fe9932d 100644
--- a/bindings/java/test/org/sleuthkit/datamodel/PublicTagName.java
+++ b/bindings/java/test/org/sleuthkit/datamodel/PublicTagName.java
@@ -7,8 +7,8 @@
  */
 public class PublicTagName extends TagName {
 
-	public PublicTagName(long id, String displayName, String description, HTML_COLOR color, TskData.FileKnown knownStatus) {
-		super(id, displayName, description, color, knownStatus);
+	public PublicTagName(long id, String displayName, String description, HTML_COLOR color, TskData.FileKnown knownStatus, long tagSetId, int rank) {
+		super(id, displayName, description, color, knownStatus, tagSetId, rank);
 	}
 
 	@Override
diff --git a/bindings/java/test/org/sleuthkit/datamodel/timeline/EventTypeFilterTest.java b/bindings/java/test/org/sleuthkit/datamodel/timeline/EventTypeFilterTest.java
index c4b700ba5516a3fcb65ad8094d6d8ed99b307cce..c42b5d5dcc2db66ebcf018ba6f78d3625bb4e264 100644
--- a/bindings/java/test/org/sleuthkit/datamodel/timeline/EventTypeFilterTest.java
+++ b/bindings/java/test/org/sleuthkit/datamodel/timeline/EventTypeFilterTest.java
@@ -51,7 +51,7 @@ public void testGetDisplayName() {
 		EventTypeFilter instance = new EventTypeFilter(TimelineEventType.EMAIL);
 		assertEquals(TimelineEventType.EMAIL.getDisplayName(), instance.getDisplayName());
 		instance = new EventTypeFilter(TimelineEventType.ROOT_EVENT_TYPE);
-		assertEquals("Event Type", instance.getDisplayName());
+		assertEquals("Limit event types to", instance.getDisplayName());
 	}
 
 	/**
diff --git a/bindings/java/test/org/sleuthkit/datamodel/timeline/RootFilterTest.java b/bindings/java/test/org/sleuthkit/datamodel/timeline/RootFilterTest.java
index c675d39c05fcc2403dc3cf0be2fe85d03d26b2ff..15513fa7f0f3c85a3b9cc3d0046228a9e1a3a0c3 100644
--- a/bindings/java/test/org/sleuthkit/datamodel/timeline/RootFilterTest.java
+++ b/bindings/java/test/org/sleuthkit/datamodel/timeline/RootFilterTest.java
@@ -40,13 +40,25 @@ public class RootFilterTest {
 	@Test
 	public void testCopyOf() {
 		System.out.println("copyOf");
-		TimelineFilter instance = getNewRootFilter();
-		assertEquals(instance, instance.copyOf());
+		
+		testCopyOfEqual(new TimelineFilter.TagsFilter());
+		testCopyOfEqual(new TimelineFilter.HashHitsFilter());
+		testCopyOfEqual(new TimelineFilter.TextFilter());
+		testCopyOfEqual(new EventTypeFilter(TimelineEventType.ROOT_EVENT_TYPE));
+		testCopyOfEqual(new TimelineFilter.DataSourcesFilter());
+		testCopyOfEqual(new TimelineFilter.HideKnownFilter());
+		testCopyOfEqual(new TimelineFilter.FileTypesFilter());
+		
+		testCopyOfEqual(getNewRootFilter());
 	}
+	
+	private void testCopyOfEqual(TimelineFilter filter) {
+		assertEquals(filter, filter.copyOf());
+	}
+	
 
 	TimelineFilter.RootFilter getNewRootFilter() {
 		TimelineFilter.TagsFilter tagsFilter = new TimelineFilter.TagsFilter();
-		tagsFilter.addSubFilter(new TimelineFilter.TagNameFilter(new PublicTagName(0, "test tagName", "test tag name description", TagName.HTML_COLOR.NONE, TskData.FileKnown.KNOWN)));
 		TimelineFilter.HashHitsFilter hashHitsFilter = new TimelineFilter.HashHitsFilter();
 		TimelineFilter.TextFilter textFilter = new TimelineFilter.TextFilter();
 		EventTypeFilter eventTypeFilter = new EventTypeFilter(TimelineEventType.ROOT_EVENT_TYPE);
diff --git a/bindings/java/test/org/sleuthkit/datamodel/timeline/TimelineTestSuite.java b/bindings/java/test/org/sleuthkit/datamodel/timeline/TimelineTestSuite.java
index 4879a4d70a1c305ae92b3e4ba002d7aedb149794..4ba1c7bc0328cbe97f522aca8a708197c0d0c4f4 100644
--- a/bindings/java/test/org/sleuthkit/datamodel/timeline/TimelineTestSuite.java
+++ b/bindings/java/test/org/sleuthkit/datamodel/timeline/TimelineTestSuite.java
@@ -27,7 +27,8 @@
 @RunWith(Suite.class)
 @Suite.SuiteClasses({
 	RootFilterTest.class,
-	EventTypeFilterTest.class})
+	EventTypeFilterTest.class
+})
 public class TimelineTestSuite {
 
 }
diff --git a/case-uco/java/nbproject/project.properties b/case-uco/java/nbproject/project.properties
index c98e3725ed14261d79cda777a763ece17786ca0a..85f76821583f5591463baa9c3d1c4343479e6fd4 100644
--- a/case-uco/java/nbproject/project.properties
+++ b/case-uco/java/nbproject/project.properties
@@ -35,14 +35,14 @@ dist.javadoc.dir=${dist.dir}/javadoc
 endorsed.classpath=
 excludes=
 file.reference.gson-2.8.5.jar=lib/gson-2.8.5.jar
-file.reference.sleuthkit-4.10.1.jar=lib/sleuthkit-4.10.1.jar
+file.reference.sleuthkit-4.10.1.jar=lib/sleuthkit-4.10.1.jar
 includes=**
 jar.archive.disabled=${jnlp.enabled}
 jar.compress=false
 jar.index=${jnlp.enabled}
 javac.classpath=\
     ${file.reference.gson-2.8.5.jar}:\
-    ${file.reference.sleuthkit-4.10.1.jar}
+${file.reference.sleuthkit-4.10.1.jar}
 # Space-separated list of extra javac options
 javac.compilerargs=-Xlint
 javac.deprecation=false
diff --git a/release/VS2015_cygwin.bat b/release/VS2015_cygwin.bat
index 10a14f577617a8c4ad3d815352402825886f7496..ea03ca4c62409974e0a001c370274fa08e9d7a1d 100755
--- a/release/VS2015_cygwin.bat
+++ b/release/VS2015_cygwin.bat
@@ -1,9 +1,10 @@
 @echo off
-REM Launch a Cygwin shell with the needed Visual Studio 2015 environment variables set
+REM Double click on this file to use it. 
+REM It will launch a Cygwin shell with the needed Visual Studio 2015 environment variables set
 REM Used to run the automated build / release scripts that are written in Perl/Python
 
 CALL "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat"
 
 C:
 chdir C:\cygwin64\bin
-bash --login -i
\ No newline at end of file
+bash --login -i
diff --git a/release/release-win.pl b/release/release-win.pl
index 79783ab91f10ec3dc87239ca135e9b53f6167e93..7bc4c6c2601bdec53c447a7c9c78a4d5ea58bf21 100755
--- a/release/release-win.pl
+++ b/release/release-win.pl
@@ -7,12 +7,12 @@
 # This only builds the 32-bit, release target  
 #
 # Assumes:
-#- libewf, libvmdk, and libvhdi are configured and built
 #- The correct msbuild is in the PATH
 #-- VS2015 and VS2008 put this in different places.  If VS2008 is found first, you'll get errors
 #   about not finding the 140_xp platform. 
 #-- The easiest way to do this is to launch Cygwin using the appropriate batch file, which sets
 #   the correct environment variables. 
+#- Nuget exe commandline is installed and on path
 #
 # This requires Cygwin with:
 # - git 
@@ -21,8 +21,7 @@
 
 use strict;
 
-my $TESTING = 0;
-print "TESTING MODE (no commits)\n" if ($TESTING);
+# Use 'no-tag' as the tag name to do basic testing
 
 unless (@ARGV == 1) {
 	print stderr "Missing arguments: version\n";
@@ -47,18 +46,6 @@
 #die "Missing redist dir $REDIST_LOC" unless (-d "$REDIST_LOC");
 
 
-# Verify LIBX libraries exist / built
-die "LIBEWF missing" unless (-d "$ENV{'LIBEWF_HOME'}");
-die "libewf dll missing" 
-	unless (-e "$ENV{'LIBEWF_HOME'}/msvscpp/Release/libewf.dll" ); 
-
-die "libvhdi dll missing" 
-	unless (-e "$ENV{'LIBVHDI_HOME'}/msvscpp/Release/libvhdi.dll" ); 
-
-die "libvhdi dll missing" 
-	unless (-e "$ENV{'LIBVMDK_HOME'}/msvscpp/Release/libvmdk.dll" ); 
-
-	
 #######################
 
 # Function to execute a command and send output to pipe
@@ -165,10 +152,14 @@ sub build_core {
 
 	die "Release folder not deleted" if (-x "Release/fls.exe");
 
+
+	# Get Dependencies
+	`nuget restore tsk-win.sln`;
+
 	# 2008 version
 	# `vcbuild /errfile:BuildErrors.txt tsk-win.sln "Release|Win32"`; 
 	# 2010/2015 version
-	`msbuild.exe tsk-win.sln /m /p:Configuration=Release /clp:ErrorsOnly /nologo > BuildErrors.txt`;
+	`msbuild.exe tsk-win.sln /m /p:Configuration=Release /p:platform=Win32 /clp:ErrorsOnly /nologo > BuildErrors.txt`;
 	die "Build errors -- check win32/BuildErrors.txt" if (-s "BuildErrors.txt");
 
 	# Do a basic check on some of the executables
@@ -240,4 +231,4 @@ sub package_core {
 
 update_code();
 build_core();
-package_core();
\ No newline at end of file
+package_core();
diff --git a/tools/fiwalk/src/arff.cpp b/tools/fiwalk/src/arff.cpp
index cfc4429f59eea2651cc14f47ec537b1ba054a3b1..1297d11ff9276820c8ed6e10011894bd768604ae 100644
--- a/tools/fiwalk/src/arff.cpp
+++ b/tools/fiwalk/src/arff.cpp
@@ -66,7 +66,7 @@ bool arff::is_weka_date(const string &s)
        s[10]==' ' &&
        isdigit(s[11]) && isdigit(s[12]) && s[13]==':' &&       
        isdigit(s[14]) && isdigit(s[15]) && s[16]==':' &&       
-       isdigit(s[17]) && isdigit(s[17]) && s.size()==19) return true;
+       isdigit(s[17]) && isdigit(s[18]) && s.size()==19) return true;
     return false;
 }
 
diff --git a/tsk/fs/ext2fs.c b/tsk/fs/ext2fs.c
index b547605622583e1ee701df20fcbab882ccc86bd2..cc42ff5f51c61976053dad83a9fb5bfdaa321d37 100755
--- a/tsk/fs/ext2fs.c
+++ b/tsk/fs/ext2fs.c
@@ -452,13 +452,14 @@ static uint8_t
  * @param ext2fs A ext2fs file system information structure
  * @param dino_inum Metadata address
  * @param dino_buf The buffer to store the block in (must be size of ext2fs->inode_size or larger)
+ * @param ea_buf The buffer to hold the extended attribute data
  *
  * return 1 on error and 0 on success
  * */
 
 static uint8_t
 ext2fs_dinode_load(EXT2FS_INFO * ext2fs, TSK_INUM_T dino_inum,
-    ext2fs_inode * dino_buf)
+    ext2fs_inode * dino_buf, uint8_t ** ea_buf, size_t * ea_buf_len)
 {
     EXT2_GRPNUM_T grp_num;
     TSK_OFF_T addr;
@@ -551,6 +552,16 @@ ext2fs_dinode_load(EXT2FS_INFO * ext2fs, TSK_INUM_T dino_inum,
 //DEBUG    printf("Inode Size: %d, %d, %d, %d\n", sizeof(ext2fs_inode), *ext2fs->fs->s_inode_size, ext2fs->inode_size, *ext2fs->fs->s_want_extra_isize);
 //DEBUG    debug_print_buf((char *)dino_buf, ext2fs->inode_size);
 
+    // Check if we have an extended attribute in the inode
+    if (ext2fs->inode_size > EXT2_EA_INODE_OFFSET) {
+        // The extended attribute data immediatly follows the standard inode data
+        *ea_buf = (char*)dino_buf + EXT2_EA_INODE_OFFSET;
+        *ea_buf_len = ext2fs->inode_size - EXT2_EA_INODE_OFFSET;
+    }
+    else {
+        *ea_buf = NULL;
+    }
+
     if (tsk_verbose) {
         tsk_fprintf(stderr,
             "%" PRIuINUM " m/l/s=%o/%d/%" PRIu32
@@ -575,21 +586,132 @@ ext2fs_dinode_load(EXT2FS_INFO * ext2fs, TSK_INUM_T dino_inum,
     return 0;
 }
 
+/**
+* \internal
+* Loads attribute for Ext4 inline storage method.
+* @param fs_file File to load attrs
+* @param ea_buf  Extended attribute buffer
+* @param ea_buf_len Extended attribute buffer length
+* @returns 0 on success, 1 otherwise
+*/
+static uint8_t
+ext4_load_attrs_inline(TSK_FS_FILE *fs_file, const uint8_t * ea_buf, size_t ea_buf_len)
+{
+    TSK_FS_META *fs_meta = fs_file->meta;
+    TSK_FS_ATTR *fs_attr;
+
+    // see if we have already loaded the attr
+    if ((fs_meta->attr != NULL)
+        && (fs_meta->attr_state == TSK_FS_META_ATTR_STUDIED)) {
+        return 0;
+    }
+
+    if (fs_meta->attr_state == TSK_FS_META_ATTR_ERROR) {
+        return 1;
+    }
+
+    // First load the data from the extended attr (if present)
+    const char *ea_inline_data = NULL;
+    uint32_t ea_inline_data_len = 0;
+    if ((ea_buf != NULL) && (ea_buf_len > 4 + sizeof(ext2fs_ea_entry))
+        && (tsk_getu32(fs_file->fs_info->endian, ea_buf) == EXT2_EA_MAGIC)) {
+
+        // First entry starts after the four byte header
+        size_t index = 4;
+        ext2fs_ea_entry *ea_entry = (ext2fs_ea_entry*) &(ea_buf[index]);
+
+        // The end of the list of entries is marked by two null bytes
+        while ((ea_entry->nlen != 0) || (ea_entry->nidx != 0)) {
+
+            // It looks like the continuation of inline data is stored in system.data.
+            // Make sure we have room to read the attr name 'data'.
+            if ((ea_entry->nidx == EXT2_EA_IDX_SYSTEM)
+                && (ea_entry->nlen == 4)
+                && (index + sizeof(ext2fs_ea_entry) + strlen("data") < ea_buf_len)
+                && (strncmp(&(ea_entry->name), "data", 4)) == 0) {
+
+                // This is the right attribute. Check that the length and offset are valid.
+                // The offset is from the beginning of the entries, i.e., four bytes into the buffer.
+                uint32_t offset = tsk_getu32(fs_file->fs_info->endian, ea_entry->val_off);
+                uint32_t size = tsk_getu32(fs_file->fs_info->endian, ea_entry->val_size);
+                if (4 + offset + size <= ea_buf_len) {
+                    ea_inline_data = &(ea_buf[4 + offset]);
+                    ea_inline_data_len = size;
+                    break;
+                }
+            }
+
+            // Prepare to load the next entry.
+            // The entry size is the size of the struct plus the length of the name, minus one
+            // because the struct contains the first character of the name.
+            index += sizeof(ext2fs_ea_entry) + ea_entry->nlen - 1;
+
+            // Make sure there's room for the next entry plus the 'data' name we're looking for.
+            if (index + sizeof(ext2fs_ea_entry) + strlen("data") > ea_buf_len) {
+                break;
+            }
+            ext2fs_ea_entry *ea_entry = (ext2fs_ea_entry*) &(ea_buf[index]);
+        }
+    }
+
+    // Combine the two parts of the inline data for the resident attribute. For now, make a
+    // buffer for the full file size - this may be different than the length of the data 
+    // from the inode if we have sparse data.
+    uint8_t *resident_data;
+    if ((resident_data = (uint8_t*)tsk_malloc(fs_meta->size)) == NULL) {
+        return 1;
+    }
+    memset(resident_data, 0, fs_meta->size);
+
+    // Copy the data from the inode.
+    size_t inode_data_len = (fs_meta->size < EXT2_INLINE_MAX_DATA_LEN) ? fs_meta->size : EXT2_INLINE_MAX_DATA_LEN;
+    memcpy(resident_data, fs_meta->content_ptr, inode_data_len);
+
+    // If we need more data and found an extended attribute, append that data
+    if ((fs_meta->size > EXT2_INLINE_MAX_DATA_LEN) && (ea_inline_data_len > 0)) {
+        // Don't go beyond the size of the file
+        size_t ea_data_len = (inode_data_len + ea_inline_data_len < (uint64_t)fs_meta->size) ? inode_data_len + ea_inline_data_len : fs_meta->size - inode_data_len;
+        memcpy(resident_data + inode_data_len, ea_inline_data, ea_data_len);
+    }
+
+    fs_meta->attr = tsk_fs_attrlist_alloc();
+    if ((fs_attr =
+        tsk_fs_attrlist_getnew(fs_meta->attr,
+            TSK_FS_ATTR_RES)) == NULL) {
+        free(resident_data);
+        return 1;
+    }
+
+    // Set the details in the fs_attr structure
+    if (tsk_fs_attr_set_str(fs_file, fs_attr, "DATA",
+        TSK_FS_ATTR_TYPE_DEFAULT, TSK_FS_ATTR_ID_DEFAULT,
+        (void*)resident_data,
+        fs_meta->size)) {
+        free(resident_data);
+        fs_meta->attr_state = TSK_FS_META_ATTR_ERROR;
+        return 1;
+    }
+
+    free(resident_data);
+    fs_meta->attr_state = TSK_FS_META_ATTR_STUDIED;
+    return 0;
+}
+
 /* ext2fs_dinode_copy - copy cached disk inode into generic inode
  *
  * returns 1 on error and 0 on success
  * */
 static uint8_t
-ext2fs_dinode_copy(EXT2FS_INFO * ext2fs, TSK_FS_META * fs_meta,
-    TSK_INUM_T inum, const ext2fs_inode * dino_buf)
+ext2fs_dinode_copy(EXT2FS_INFO * ext2fs, TSK_FS_FILE * fs_file,
+    TSK_INUM_T inum, const ext2fs_inode * dino_buf, const uint8_t * ea_buf, size_t ea_buf_len)
 {
     int i;
     TSK_FS_INFO *fs = (TSK_FS_INFO *) & ext2fs->fs_info;
+    TSK_FS_META * fs_meta = fs_file->meta;
     ext2fs_sb *sb = ext2fs->fs;
     EXT2_GRPNUM_T grp_num;
     TSK_INUM_T ibase = 0;
 
-
     if (dino_buf == NULL) {
         tsk_error_reset();
         tsk_error_set_errno(TSK_ERR_FS_ARG);
@@ -728,9 +850,20 @@ ext2fs_dinode_copy(EXT2FS_INFO * ext2fs, TSK_FS_META * fs_meta,
         /* NOTE TSK_DADDR_T != uint32_t, so lets make sure we use uint32_t */
         addr_ptr = (uint32_t *) fs_meta->content_ptr;
         for (i = 0; i < EXT2FS_NDADDR + EXT2FS_NIADDR; i++) {
-            addr_ptr[i] = tsk_gets32(fs->endian, dino_buf->i_block[i]);;
+            addr_ptr[i] = tsk_gets32(fs->endian, dino_buf->i_block[i]);
         }
     }
+    else if (tsk_getu32(fs->endian, dino_buf->i_flags) & EXT2_INLINE_DATA) {
+        uint32_t *addr_ptr;
+        fs_meta->content_type = TSK_FS_META_CONTENT_TYPE_EXT4_INLINE;
+        addr_ptr = (uint32_t *)fs_meta->content_ptr;
+        for (i = 0; i < EXT2FS_NDADDR + EXT2FS_NIADDR; i++) {
+            addr_ptr[i] = tsk_gets32(fs->endian, dino_buf->i_block[i]);
+        }
+
+        // For inline data we create the default attribute now
+        ext4_load_attrs_inline(fs_file, ea_buf, ea_buf_len);
+    } 
     else {
         TSK_DADDR_T *addr_ptr;
         addr_ptr = (TSK_DADDR_T *) fs_meta->content_ptr;
@@ -887,6 +1020,8 @@ ext2fs_inode_lookup(TSK_FS_INFO * fs, TSK_FS_FILE * a_fs_file,
 {
     EXT2FS_INFO *ext2fs = (EXT2FS_INFO *) fs;
     ext2fs_inode *dino_buf = NULL;
+    uint8_t *ea_buf = NULL;
+    size_t ea_buf_len = 0;
     unsigned int size = 0;
 
     if (a_fs_file == NULL) {
@@ -919,12 +1054,12 @@ ext2fs_inode_lookup(TSK_FS_INFO * fs, TSK_FS_FILE * a_fs_file,
         return 1;
     }
 
-    if (ext2fs_dinode_load(ext2fs, inum, dino_buf)) {
+    if (ext2fs_dinode_load(ext2fs, inum, dino_buf, &ea_buf, &ea_buf_len)) {
         free(dino_buf);
         return 1;
     }
 
-    if (ext2fs_dinode_copy(ext2fs, a_fs_file->meta, inum, dino_buf)) {
+    if (ext2fs_dinode_copy(ext2fs, a_fs_file, inum, dino_buf, ea_buf, ea_buf_len)) {
         free(dino_buf);
         return 1;
     }
@@ -956,6 +1091,8 @@ ext2fs_inode_walk(TSK_FS_INFO * fs, TSK_INUM_T start_inum,
     TSK_FS_FILE *fs_file;
     unsigned int myflags;
     ext2fs_inode *dino_buf = NULL;
+    uint8_t *ea_buf = NULL;
+    size_t ea_buf_len = 0;
     unsigned int size = 0;
 
     // clean up any error messages that are lying around
@@ -1089,7 +1226,7 @@ ext2fs_inode_walk(TSK_FS_INFO * fs, TSK_INUM_T start_inum,
         if ((flags & myflags) != myflags)
             continue;
 
-        if (ext2fs_dinode_load(ext2fs, inum, dino_buf)) {
+        if (ext2fs_dinode_load(ext2fs, inum, dino_buf, &ea_buf, &ea_buf_len)) {
             tsk_fs_file_close(fs_file);
             free(dino_buf);
             return 1;
@@ -1119,7 +1256,7 @@ ext2fs_inode_walk(TSK_FS_INFO * fs, TSK_INUM_T start_inum,
          * Fill in a file system-independent inode structure and pass control
          * to the application.
          */
-        if (ext2fs_dinode_copy(ext2fs, fs_file->meta, inum, dino_buf)) {
+        if (ext2fs_dinode_copy(ext2fs, fs_file, inum, dino_buf, ea_buf, ea_buf_len)) {
             tsk_fs_meta_close(fs_file->meta);
             free(dino_buf);
             return 1;
@@ -1412,14 +1549,26 @@ ext2fs_make_data_run_extent(TSK_FS_INFO * fs_info, TSK_FS_ATTR * fs_attr,
     }
 
     data_run->offset = tsk_getu32(fs_info->endian, extent->ee_block);
-    data_run->addr =
-        (((uint32_t) tsk_getu16(fs_info->endian,
+
+    // Check if the extent is initialized or uninitialized
+    if (tsk_getu16(fs_info->endian, extent->ee_len) <= EXT2_MAX_INIT_EXTENT_LENGTH) {
+        // Extent is initalized - process normally
+        data_run->addr =
+            (((uint32_t)tsk_getu16(fs_info->endian,
                 extent->ee_start_hi)) << 16) | tsk_getu32(fs_info->endian,
-        extent->ee_start_lo);
-    data_run->len = tsk_getu16(fs_info->endian, extent->ee_len);
+                    extent->ee_start_lo);
+        data_run->len = tsk_getu16(fs_info->endian, extent->ee_len);
+    }
+    else {
+        // Extent is uninitialized - make a sparse run
+        data_run->len = tsk_getu16(fs_info->endian, extent->ee_len) - EXT2_MAX_INIT_EXTENT_LENGTH;
+        data_run->addr = 0;
+        data_run->flags = TSK_FS_ATTR_RUN_FLAG_SPARSE;
+    }
 
     // save the run
     if (tsk_fs_attr_add_run(fs_info, fs_attr, data_run)) {
+        tsk_fs_attr_run_free(data_run);
         return 1;
     }
 
@@ -1585,6 +1734,50 @@ ext2fs_extent_tree_index_count(TSK_FS_INFO * fs_info,
     return count;
 }
 
+/** \internal
+* If the file length is longer than what is in the attr runs, add a sparse
+* data run to cover the rest of the file.
+*
+* @return 0 if successful or 1 on error.
+*/
+static uint8_t
+ext2fs_handle_implicit_sparse_data_run(TSK_FS_INFO * fs_info, TSK_FS_ATTR * fs_attr) {
+    TSK_FS_FILE *fs_file = fs_attr->fs_file;
+
+    if (fs_file == NULL) {
+        return 1;
+    }
+
+    TSK_DADDR_T end_of_runs;
+    TSK_DADDR_T total_file_blocks = roundup(fs_file->meta->size, fs_info->block_size) / fs_info->block_size;
+
+    if (fs_attr->nrd.run_end) {
+        end_of_runs = fs_attr->nrd.run_end->offset + fs_attr->nrd.run_end->len;
+    }
+    else {
+        end_of_runs = 0;
+    }
+
+    if (end_of_runs < total_file_blocks) {
+        // Make sparse run.
+        TSK_FS_ATTR_RUN *data_run;
+        data_run = tsk_fs_attr_run_alloc();
+        if (data_run == NULL) {
+            return 1;
+        }
+        data_run->offset = end_of_runs;
+        data_run->addr = 0;
+        data_run->len = total_file_blocks - end_of_runs;
+        data_run->flags = TSK_FS_ATTR_RUN_FLAG_SPARSE;
+
+        // Save the run.
+        if (tsk_fs_attr_add_run(fs_info, fs_attr, data_run)) {
+
+            return 1;
+        }
+    }
+    return 0;
+}
 
 /**
  * \internal
@@ -1638,7 +1831,7 @@ ext4_load_attrs_extents(TSK_FS_FILE *fs_file)
     }
     
     length = roundup(fs_meta->size, fs_info->block_size);
-    
+
     if ((fs_attr =
          tsk_fs_attrlist_getnew(fs_meta->attr,
                                 TSK_FS_ATTR_NONRES)) == NULL) {
@@ -1652,6 +1845,17 @@ ext4_load_attrs_extents(TSK_FS_FILE *fs_file)
     }
     
     if (num_entries == 0) {
+        if (fs_meta->size == 0) {
+            // Empty file
+            fs_meta->attr_state = TSK_FS_META_ATTR_STUDIED;
+            return 0;
+        }
+
+        // The entire file is sparse
+        if (ext2fs_handle_implicit_sparse_data_run(fs_info, fs_attr)) {
+            return 1;
+        }
+
         fs_meta->attr_state = TSK_FS_META_ATTR_STUDIED;
         return 0;
     }
@@ -1688,7 +1892,7 @@ ext4_load_attrs_extents(TSK_FS_FILE *fs_file)
             ("ext2fs_load_attr: Inode reports too many extent indices");
             return 1;
         }
-        
+
         if ((fs_attr_extent =
              tsk_fs_attrlist_getnew(fs_meta->attr,
                                     TSK_FS_ATTR_NONRES)) == NULL) {
@@ -1723,6 +1927,11 @@ ext4_load_attrs_extents(TSK_FS_FILE *fs_file)
             }
         }
     }
+
+    // There may be implicit sparse blocks at the end of the file
+    if (ext2fs_handle_implicit_sparse_data_run(fs_info, fs_attr)) {
+        return 1;
+    }
     
     fs_meta->attr_state = TSK_FS_META_ATTR_STUDIED;
     
@@ -1743,6 +1952,10 @@ ext2fs_load_attrs(TSK_FS_FILE * fs_file)
     if (fs_file->meta->content_type == TSK_FS_META_CONTENT_TYPE_EXT4_EXTENTS) {
         return ext4_load_attrs_extents(fs_file);
     }
+    else if (fs_file->meta->content_type == TSK_FS_META_CONTENT_TYPE_EXT4_INLINE) {
+        // Inline attributes are loaded in dinode_copy
+        return 0;
+    }
     else {
         return tsk_fs_unix_make_data_run(fs_file);
     }
@@ -2611,6 +2824,8 @@ ext2fs_istat(TSK_FS_INFO * fs, TSK_FS_ISTAT_FLAG_ENUM istat_flags, FILE * hFile,
     EXT2FS_PRINT_ADDR print;
     const TSK_FS_ATTR *fs_attr_indir;
     ext2fs_inode *dino_buf = NULL;
+    uint8_t *ea_buf = NULL;
+    size_t ea_buf_len = 0;
     char timeBuf[128];
     unsigned int size;
     unsigned int large_inodes;
@@ -2631,7 +2846,7 @@ ext2fs_istat(TSK_FS_INFO * fs, TSK_FS_ISTAT_FLAG_ENUM istat_flags, FILE * hFile,
         return 1;
     }
 
-    if (ext2fs_dinode_load(ext2fs, inum, dino_buf)) {
+    if (ext2fs_dinode_load(ext2fs, inum, dino_buf, &ea_buf, &ea_buf_len)) {
         free(dino_buf);
         return 1;
     }
@@ -2738,6 +2953,21 @@ ext2fs_istat(TSK_FS_INFO * fs, TSK_FS_ISTAT_FLAG_ENUM istat_flags, FILE * hFile,
         if (tsk_getu32(fs->endian, dino_buf->i_flags) & EXT2_IN_EOFBLOCKS)
             tsk_fprintf(hFile, "Blocks Allocated Beyond EOF, ");
 
+        if (tsk_getu32(fs->endian, dino_buf->i_flags) & EXT2_SNAPFILE)
+            tsk_fprintf(hFile, "Snapshot, ");
+
+        if (tsk_getu32(fs->endian, dino_buf->i_flags) & EXT2_SNAPFILE_DELETED)
+            tsk_fprintf(hFile, "Deleted Snapshot, ");
+
+        if (tsk_getu32(fs->endian, dino_buf->i_flags) & EXT2_SNAPFILE_SHRUNK)
+            tsk_fprintf(hFile, "Shrunk Snapshot, ");
+
+        if (tsk_getu32(fs->endian, dino_buf->i_flags) & EXT2_INLINE_DATA)
+            tsk_fprintf(hFile, "Inline Data, ");
+
+        if (tsk_getu32(fs->endian, dino_buf->i_flags) & EXT2_PROJINHERIT)
+            tsk_fprintf(hFile, "Inherited project ID, ");
+
 
         tsk_fprintf(hFile, "\n");
     }
@@ -3063,87 +3293,89 @@ ext2fs_istat(TSK_FS_INFO * fs, TSK_FS_ISTAT_FLAG_ENUM istat_flags, FILE * hFile,
     if (numblock > 0)
         fs_meta->size = numblock * fs->block_size;
 
-    tsk_fprintf(hFile, "\nDirect Blocks:\n");
+    if (fs_meta->content_type != TSK_FS_META_CONTENT_TYPE_EXT4_INLINE) {
+        tsk_fprintf(hFile, "\nDirect Blocks:\n");
 
-    if (istat_flags & TSK_FS_ISTAT_RUNLIST) {
-        const TSK_FS_ATTR *fs_attr_default =
-            tsk_fs_file_attr_get_type(fs_file,
-                TSK_FS_ATTR_TYPE_DEFAULT, 0, 0);
-        if (fs_attr_default && (fs_attr_default->flags & TSK_FS_ATTR_NONRES)) {
-            if (tsk_fs_attr_print(fs_attr_default, hFile)) {
-                tsk_fprintf(hFile, "\nError creating run lists\n");
-                tsk_error_print(hFile);
-                tsk_error_reset();
-            }
-        }
-    }
-    else {
-        print.idx = 0;
-        print.hFile = hFile;
-
-        if (tsk_fs_file_walk(fs_file, TSK_FS_FILE_WALK_FLAG_AONLY,
-            print_addr_act, (void *)&print)) {
-            tsk_fprintf(hFile, "\nError reading file:  ");
-            tsk_error_print(hFile);
-            tsk_error_reset();
-        }
-        else if (print.idx != 0) {
-            tsk_fprintf(hFile, "\n");
-        }
-    }
-
-    if (fs_meta->content_type == TSK_FS_META_CONTENT_TYPE_EXT4_EXTENTS) {
-        const TSK_FS_ATTR *fs_attr_extent =
-            tsk_fs_file_attr_get_type(fs_file,
-            TSK_FS_ATTR_TYPE_UNIX_EXTENT, 0, 0);
-        if (fs_attr_extent) {
-            tsk_fprintf(hFile, "\nExtent Blocks:\n");
-
-            if (istat_flags & TSK_FS_ISTAT_RUNLIST) {
-                if (tsk_fs_attr_print(fs_attr_extent, hFile)) {
+        if (istat_flags & TSK_FS_ISTAT_RUNLIST) {
+            const TSK_FS_ATTR *fs_attr_default =
+                tsk_fs_file_attr_get_type(fs_file,
+                    TSK_FS_ATTR_TYPE_DEFAULT, 0, 0);
+            if (fs_attr_default && (fs_attr_default->flags & TSK_FS_ATTR_NONRES)) {
+                if (tsk_fs_attr_print(fs_attr_default, hFile)) {
                     tsk_fprintf(hFile, "\nError creating run lists\n");
                     tsk_error_print(hFile);
                     tsk_error_reset();
                 }
             }
-            else {
-                print.idx = 0;
+        }
+        else {
+            print.idx = 0;
+            print.hFile = hFile;
 
-                if (tsk_fs_attr_walk(fs_attr_extent,
-                    TSK_FS_FILE_WALK_FLAG_AONLY, print_addr_act,
-                    (void *)&print)) {
-                    tsk_fprintf(hFile,
-                        "\nError reading indirect attribute:  ");
-                    tsk_error_print(hFile);
-                    tsk_error_reset();
+            if (tsk_fs_file_walk(fs_file, TSK_FS_FILE_WALK_FLAG_AONLY,
+                print_addr_act, (void *)&print)) {
+                tsk_fprintf(hFile, "\nError reading file:  ");
+                tsk_error_print(hFile);
+                tsk_error_reset();
+            }
+            else if (print.idx != 0) {
+                tsk_fprintf(hFile, "\n");
+            }
+        }
+
+        if (fs_meta->content_type == TSK_FS_META_CONTENT_TYPE_EXT4_EXTENTS) {
+            const TSK_FS_ATTR *fs_attr_extent =
+                tsk_fs_file_attr_get_type(fs_file,
+                    TSK_FS_ATTR_TYPE_UNIX_EXTENT, 0, 0);
+            if (fs_attr_extent) {
+                tsk_fprintf(hFile, "\nExtent Blocks:\n");
+
+                if (istat_flags & TSK_FS_ISTAT_RUNLIST) {
+                    if (tsk_fs_attr_print(fs_attr_extent, hFile)) {
+                        tsk_fprintf(hFile, "\nError creating run lists\n");
+                        tsk_error_print(hFile);
+                        tsk_error_reset();
+                    }
                 }
-                else if (print.idx != 0) {
-                    tsk_fprintf(hFile, "\n");
+                else {
+                    print.idx = 0;
+
+                    if (tsk_fs_attr_walk(fs_attr_extent,
+                        TSK_FS_FILE_WALK_FLAG_AONLY, print_addr_act,
+                        (void *)&print)) {
+                        tsk_fprintf(hFile,
+                            "\nError reading indirect attribute:  ");
+                        tsk_error_print(hFile);
+                        tsk_error_reset();
+                    }
+                    else if (print.idx != 0) {
+                        tsk_fprintf(hFile, "\n");
+                    }
                 }
             }
         }
-    }
-    else {
-        fs_attr_indir = tsk_fs_file_attr_get_type(fs_file,
-            TSK_FS_ATTR_TYPE_UNIX_INDIR, 0, 0);
-        if (fs_attr_indir) {
-            tsk_fprintf(hFile, "\nIndirect Blocks:\n");
-            if (istat_flags & TSK_FS_ISTAT_RUNLIST) {
-                tsk_fs_attr_print(fs_attr_indir, hFile);
-            }
-            else {
-                print.idx = 0;
-
-                if (tsk_fs_attr_walk(fs_attr_indir,
-                    TSK_FS_FILE_WALK_FLAG_AONLY, print_addr_act,
-                    (void *)&print)) {
-                    tsk_fprintf(hFile,
-                        "\nError reading indirect attribute:  ");
-                    tsk_error_print(hFile);
-                    tsk_error_reset();
+        else {
+            fs_attr_indir = tsk_fs_file_attr_get_type(fs_file,
+                TSK_FS_ATTR_TYPE_UNIX_INDIR, 0, 0);
+            if (fs_attr_indir) {
+                tsk_fprintf(hFile, "\nIndirect Blocks:\n");
+                if (istat_flags & TSK_FS_ISTAT_RUNLIST) {
+                    tsk_fs_attr_print(fs_attr_indir, hFile);
                 }
-                else if (print.idx != 0) {
-                    tsk_fprintf(hFile, "\n");
+                else {
+                    print.idx = 0;
+
+                    if (tsk_fs_attr_walk(fs_attr_indir,
+                        TSK_FS_FILE_WALK_FLAG_AONLY, print_addr_act,
+                        (void *)&print)) {
+                        tsk_fprintf(hFile,
+                            "\nError reading indirect attribute:  ");
+                        tsk_error_print(hFile);
+                        tsk_error_reset();
+                    }
+                    else if (print.idx != 0) {
+                        tsk_fprintf(hFile, "\n");
+                    }
                 }
             }
         }
diff --git a/tsk/fs/ext2fs_dent.c b/tsk/fs/ext2fs_dent.c
index f590bd07bf88997626096fac0631f204bf4830cb..95e8b256961bdd504dcc91b34a2d4abeb2140e30 100644
--- a/tsk/fs/ext2fs_dent.c
+++ b/tsk/fs/ext2fs_dent.c
@@ -317,7 +317,13 @@ ext2fs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir,
         return TSK_ERR;
     }
 
-    size = roundup(fs_dir->fs_file->meta->size, a_fs->block_size);
+    if (fs_dir->fs_file->meta->content_type == TSK_FS_META_CONTENT_TYPE_EXT4_INLINE) {
+        // For inline dirs, don't try to read past the end of the data
+        size = fs_dir->fs_file->meta->size;
+    }
+    else {
+        size = roundup(fs_dir->fs_file->meta->size, a_fs->block_size);
+    }
     TSK_OFF_T offset = 0;
 
     while (size > 0) {
diff --git a/tsk/fs/fs_io.c b/tsk/fs/fs_io.c
index ec1a15326839f8616e0f0cc409f35286dcb4ca29..acfb99b3fc37b018fdea13510fde5c45b1d31e81 100755
--- a/tsk/fs/fs_io.c
+++ b/tsk/fs/fs_io.c
@@ -248,7 +248,8 @@ tsk_fs_read_block_decrypt(TSK_FS_INFO * a_fs, TSK_DADDR_T a_addr, char *a_buf,
     if ((a_fs->flags & TSK_FS_INFO_FLAG_ENCRYPTED)
         && ret_len > 0
         && a_fs->decrypt_block) {
-        for (TSK_DADDR_T i = 0; i < a_len / a_fs->block_size; i++) {
+	TSK_DADDR_T i;
+        for (i = 0; i < a_len / a_fs->block_size; i++) {
             a_fs->decrypt_block(a_fs, crypto_id + i,
                 a_buf + (a_fs->block_size * i));
         }
diff --git a/tsk/fs/tsk_ext2fs.h b/tsk/fs/tsk_ext2fs.h
index b937a8e43c0a6d9927c83a9cfc308e3b8d0df851..71640f5fd67ffbb68815568886a7bd8263e17adc 100644
--- a/tsk/fs/tsk_ext2fs.h
+++ b/tsk/fs/tsk_ext2fs.h
@@ -365,7 +365,9 @@ extern "C" {
         uint8_t eh_generation[4];       /* u32 */
     } ext2fs_extent_header;
 
-/* MODE */
+#define EXT2_MAX_INIT_EXTENT_LENGTH 0x8000  /* Maximum length of an initialized extent */
+
+/* MODE - Note that values are in octal format */
 #define EXT2_IN_FMT  0170000
 #define EXT2_IN_SOCK 0140000
 #define EXT2_IN_LNK  0120000
@@ -411,10 +413,16 @@ extern "C" {
 #define EXT2_IN_EXTENTS                 0x00080000      /* Inode uses extents */
 #define EXT2_IN_EA_INODE                0x00200000      /* Inode used for large EA */
 #define EXT2_IN_EOFBLOCKS               0x00400000      /* Blocks allocated beyond EOF */
+#define EXT2_SNAPFILE                   0x01000000      /* Inode is a snapshot */
+#define EXT2_SNAPFILE_DELETED           0x04000000	    /* Snapshot is being deleted */
+#define EXT2_SNAPFILE_SHRUNK            0x08000000	    /* Snapshot shrink has completed */
+#define EXT2_INLINE_DATA                0x10000000	    /* Inode has inline data */
+#define EXT2_PROJINHERIT                0x20000000	    /* Create children with the same project ID */
 #define EXT2_IN_RESERVED                0x80000000      /* reserved for ext4 lib */
 #define EXT2_IN_USER_VISIBLE            0x004BDFFF      /* User visible flags */
 #define EXT2_IN_USER_MODIFIABLE         0x004B80FF      /* User modifiable flags */
 
+#define EXT2_INLINE_MAX_DATA_LEN 60  /* Max length for inline data in inode (not counting extended attribute) */
 
 /*
  * directory entries
@@ -461,6 +469,7 @@ extern "C" {
  */
 
 #define EXT2_EA_MAGIC	0xEA020000
+#define EXT2_EA_INODE_OFFSET   160
 
     typedef struct {
         uint8_t magic[4];
@@ -478,6 +487,8 @@ extern "C" {
 #define EXT2_EA_IDX_TRUSTED                4
 #define EXT2_EA_IDX_LUSTRE                 5
 #define EXT2_EA_IDX_SECURITY               6
+#define EXT2_EA_IDX_SYSTEM                 7 // Possibly only valid for inline data
+#define EXT2_EA_IDX_SYSTEM_RICHACL         8
 
 /* Entries follow the header and are aligned to 4-byte boundaries
  * the value of the attribute is stored at the bottom of the block
diff --git a/tsk/fs/tsk_fs.h b/tsk/fs/tsk_fs.h
index 2158584563104b5158975835cc73a11ccfdf243b..d0b5d1dc59697222a59fc889a8e3de8248a17e45 100644
--- a/tsk/fs/tsk_fs.h
+++ b/tsk/fs/tsk_fs.h
@@ -437,7 +437,8 @@ extern "C" {
 
     typedef enum TSK_FS_META_CONTENT_TYPE_ENUM {
         TSK_FS_META_CONTENT_TYPE_DEFAULT = 0x0,
-        TSK_FS_META_CONTENT_TYPE_EXT4_EXTENTS = 0x1     ///< Ext4 with extents instead of individual pointers
+        TSK_FS_META_CONTENT_TYPE_EXT4_EXTENTS = 0x1,     ///< Ext4 with extents instead of individual pointers
+        TSK_FS_META_CONTENT_TYPE_EXT4_INLINE = 0x02      ///< Ext4 with inline data
     } TSK_FS_META_CONTENT_TYPE_ENUM;
 
 
diff --git a/win32/updateAndBuildAll.py b/win32/updateAndBuildAll.py
index 3cd257e5a2fce9e1c0fe22e88c5c6f8b5696a4d4..3114e1b699e01604b5ddd668e4087fd138c8873b 100644
--- a/win32/updateAndBuildAll.py
+++ b/win32/updateAndBuildAll.py
@@ -2,8 +2,7 @@
 #
 # This software is distributed under the Common Public License 1.0
 #
-# Updates the TSK dependency repos (libewf, etc.), compiles them, and
-# compiles various TSK platforms using the current branch
+# Gets TSK dependencies from Nuget and compiles the current branch
 
 import codecs
 import datetime
@@ -26,7 +25,7 @@
 MINIMAL = False
 
 
-def pullAndBuildAllDependencies(depBranch):
+def getDependencies(depBranch):
     '''
         Compile libewf, libvhdi, libvmdk.
         Args:
@@ -67,121 +66,6 @@ def buildTSKAll():
     if(passed):
         buildTSK(64, "Release")
 
-def checkPathExist(path):
-    global passed
-
-    if not os.path.exists(path):
-        print (path + " not exist.")
-        sys.stdout.flush()
-        passed = False
-
-def gitPull(libHome, repo, branch):
-    '''
-        Pull the latest code.
-        Args:
-            libHome: according the environment variable to get the location
-            repo String of repository ("libewf_64bit", "libvmdk_64bit" or "libvhdi_64bit" which one to pull
-            branch: String, which branch to pull
-    '''
-
-    global SYS
-    global passed
-
-    gppth = os.path.join(LOG_PATH, "GitPullOutput" + repo + ".txt")
-    gpout = open(gppth, 'a')
-
-
-    print("Resetting " + repo)
-    sys.stdout.flush()
-    call = ["git", "reset", "--hard"]
-    ret = subprocess.call(call, stdout=sys.stdout, cwd=libHome)
-
-    if ret != 0:
-        passed = False
-        return
-
-    print("Checking out " + branch)
-    sys.stdout.flush()
-    call = ["git", "checkout", branch]
-    ret = subprocess.call(call, stdout=sys.stdout, cwd=libHome)
-
-    if ret != 0:
-        passed = False
-        return
-
-    call = ["git", "pull"]
-    print("Pulling " + repo + "/" + branch)
-    sys.stdout.flush()
-    ret = subprocess.call(call, stdout=sys.stdout, cwd=libHome)
-
-    if ret != 0:
-        passed = False
-
-    gpout.close()
-    if passed:
-        print("Update " + repo + " successfully.")
-    else:
-        print("Update " + repo + " failed.")
-
-def buildDependentLibs(libHome, wPlatform, targetDll, project):
-    '''
-        build libewf.dll, libvhdi.dll and libvmdk.dll
-    '''
-    global passed
-    passed = True
-
-    print("Building " + str(wPlatform) + "-bit " + targetDll)
-    sys.stdout.flush()
-
-    target = "Release"
-
-    if wPlatform == 64:
-        dllFile = os.path.join(libHome, "msvscpp", "x64", target, targetDll +".dll")
-    elif wPlatform == 32:
-        dllFile = os.path.join(libHome,"msvscpp",target,targetDll + ".dll")
-    else:
-        print("Invalid platform")
-        sys.stdout.flush()
-        passed = False
-        return
-
-    if (os.path.isfile(dllFile)):
-        os.remove(dllFile)
-    os.chdir(os.path.join(libHome,"msvscpp"))
-
-    vs = []
-    vs.append(MSBUILD_PATH)
-    vs.append(os.path.join(targetDll + ".sln"))
-    vs.append("/t:" + project)
-    vs.append("/p:configuration=" + target)
-    if wPlatform == 64:
-        vs.append("/p:platform=x64")
-    elif wPlatform == 32:
-        vs.append("/p:platform=Win32")
-    vs.append("/clp:ErrorsOnly")
-    vs.append("/m")
-
-    outputFile = os.path.join(LOG_PATH, targetDll + "Output.txt")
-    VSout = open(outputFile, 'w')
-    ret = subprocess.call(vs, stdout=sys.stdout)
-    errorCode = ret
-    VSout.close()
-    if ret > 0:
-        failed_proj = os.system("grep 'Done Building Project' " + outputFile + " | grep vcxproj |grep FAILED |wc -l |cut -f1 -d' '")
-        failed_pyewf = os.system("grep 'Done Building Project' " + outputFile + " | grep pyewf |grep FAILED |grep pywc -l |cut -f1 -d' '")
-        if failed_proj == failed_pyewf:
-            errorCode = 0
-    if errorCode != 0 or not os.path.exists(dllFile) or os.path.getctime(dllFile) < (time.time() - 2 * 60): # the new dll should not be 2 mins old
-        print(targetDll + " " + str(wPlatform) + "-bit C++ failed to build.\n")
-        print("return code: " + str(ret) + "\tdll file: " + dllFile + "\tcreated time: " + str(os.path.getctime(dllFile)))
-        sys.stdout.flush()
-        passed = False
-        os.chdir(CURRENT_PATH)
-        return
-    else:
-        print("Build " + str(wPlatform) + "-bit " + targetDll + " successfully")
-
-    os.chdir(CURRENT_PATH)
 
 def buildTSK(wPlatform, target):
     '''
@@ -265,7 +149,7 @@ def main():
         print("MS_BUILD Does not exist")
         sys.stdout.flush()
 
-    pullAndBuildAllDependencies(depBranch)
+    getDependencies(depBranch)
     buildTSKAll()
 
 class OS: