diff --git a/bindings/java/doxygen/Doxyfile b/bindings/java/doxygen/Doxyfile
index 1dd8a6ec0696cab85b6778aee7945167fab39dad..9db930b74764779fc3af49bc6f52b37d68e8dd41 100644
--- a/bindings/java/doxygen/Doxyfile
+++ b/bindings/java/doxygen/Doxyfile
@@ -38,7 +38,7 @@ PROJECT_NAME           = "Sleuth Kit Java Bindings (JNI)"
 # could be handy for archiving the generated documentation or if some version
 # control system is used.
 
-PROJECT_NUMBER         = 4.0
+PROJECT_NUMBER         = 4.3
 
 # Using the PROJECT_BRIEF tag one can provide an optional one line description
 # for a project that appears at the top of each page and should give viewer a
@@ -760,7 +760,7 @@ WARN_LOGFILE           =
 
 INPUT                  = main.dox \
                          query_database.dox \
-						 blackboard.dox \
+			 blackboard.dox \
                          insert_and_update_database.dox \
 						 ../src
 
diff --git a/bindings/java/doxygen/main.dox b/bindings/java/doxygen/main.dox
index 18603b4b19e21f0fe60b7e3f706f83c537c4471d..545b1109cadbe5bb38575362af2528c54d7fabb8 100644
--- a/bindings/java/doxygen/main.dox
+++ b/bindings/java/doxygen/main.dox
@@ -34,7 +34,7 @@ Flush out here on general layout.
 
 \section jni_blackboard The Blackboard
 
-\ref mod_bbpage
+\subpage mod_bbpage
 
 
 \section the_database The Database
diff --git a/tsk/fs/hfs.c b/tsk/fs/hfs.c
index 3463908fbdb9aa8744c2c7271be749b26044cbbd..c875992131908cc72c535540ba8d90e0d79c1b3a 100644
--- a/tsk/fs/hfs.c
+++ b/tsk/fs/hfs.c
@@ -817,14 +817,17 @@ hfs_cat_compare_keys(HFS_INFO * hfs, const hfs_btree_key_cat * key1,
 
 
 /** \internal
+ * 
+ * Traverse the HFS catalog file.  Call the callback for each
+ * record. 
+ *
  * @param hfs File system
- * @param targ_data can be null
  * @param a_cb callback 
  * @param ptr Pointer to pass to callback
  * @returns 1 on error
  */
 uint8_t
-hfs_cat_traverse(HFS_INFO * hfs, const void *targ_data,
+hfs_cat_traverse(HFS_INFO * hfs,
     TSK_HFS_BTREE_CB a_cb, void *ptr)
 {
     TSK_FS_INFO *fs = &(hfs->fs_info);
@@ -921,6 +924,7 @@ hfs_cat_traverse(HFS_INFO * hfs, const void *targ_data,
                 size_t rec_off;
                 hfs_btree_key_cat *key;
                 uint8_t retval;
+                uint16_t keylen;
 
                 // get the record offset in the node
                 rec_off =
@@ -935,8 +939,20 @@ hfs_cat_traverse(HFS_INFO * hfs, const void *targ_data,
                     free(node);
                     return 1;
                 }
+
                 key = (hfs_btree_key_cat *) & node[rec_off];
 
+                keylen = 2 + tsk_getu16(hfs->fs_info.endian, key->key_len);
+                if ((keylen) > nodesize) {
+                    tsk_error_set_errno(TSK_ERR_FS_GENFS);
+                    tsk_error_set_errstr
+                        ("hfs_cat_traverse: length of key %d in index node %d too large (%d vs %"
+                        PRIu16 ")", rec, cur_node, keylen, nodesize);
+                    free(node);
+                    return 1;
+                }
+
+
                 /*
                    if (tsk_verbose)
                    tsk_fprintf(stderr,
@@ -946,9 +962,10 @@ hfs_cat_traverse(HFS_INFO * hfs, const void *targ_data,
                    tsk_getu32(fs->endian, key->parent_cnid));
                  */
 
+
                 /* save the info from this record unless it is too big */
                 retval =
-                    a_cb(hfs, HFS_BT_NODE_TYPE_IDX, targ_data, key,
+                    a_cb(hfs, HFS_BT_NODE_TYPE_IDX, key,
                     cur_off + rec_off, ptr);
                 if (retval == HFS_BTREE_CB_ERR) {
                     tsk_error_set_errno(TSK_ERR_FS_GENFS);
@@ -1012,6 +1029,7 @@ hfs_cat_traverse(HFS_INFO * hfs, const void *targ_data,
                 size_t rec_off;
                 hfs_btree_key_cat *key;
                 uint8_t retval;
+                uint16_t keylen;
 
                 // get the record offset in the node
                 rec_off =
@@ -1028,6 +1046,16 @@ hfs_cat_traverse(HFS_INFO * hfs, const void *targ_data,
                 }
                 key = (hfs_btree_key_cat *) & node[rec_off];
 
+                keylen = 2 + tsk_getu16(hfs->fs_info.endian, key->key_len);
+                if ((keylen) > nodesize) {
+                    tsk_error_set_errno(TSK_ERR_FS_GENFS);
+                    tsk_error_set_errstr
+                        ("hfs_cat_traverse: length of key %d in leaf node %d too large (%d vs %"
+                        PRIu16 ")", rec, cur_node, keylen, nodesize);
+                    free(node);
+                    return 1;
+                }
+
                 /*
                    if (tsk_verbose)
                    tsk_fprintf(stderr,
@@ -1039,7 +1067,7 @@ hfs_cat_traverse(HFS_INFO * hfs, const void *targ_data,
                 //                rec_cnid = tsk_getu32(fs->endian, key->file_id);
 
                 retval =
-                    a_cb(hfs, HFS_BT_NODE_TYPE_LEAF, targ_data, key,
+                    a_cb(hfs, HFS_BT_NODE_TYPE_LEAF, key,
                     cur_off + rec_off, ptr);
                 if (retval == HFS_BTREE_CB_LEAF_STOP) {
                     is_done = 1;
@@ -1078,13 +1106,19 @@ hfs_cat_traverse(HFS_INFO * hfs, const void *targ_data,
     return 0;
 }
 
+typedef struct {
+    const hfs_btree_key_cat *targ_key;
+    TSK_OFF_T off;
+} HFS_CAT_GET_RECORD_OFFSET_DATA;
 
 static uint8_t
 hfs_cat_get_record_offset_cb(HFS_INFO * hfs, int8_t level_type,
-    const void *targ_data, const hfs_btree_key_cat * cur_key,
+    const hfs_btree_key_cat * cur_key,
     TSK_OFF_T key_off, void *ptr)
 {
-    const hfs_btree_key_cat *targ_key = (hfs_btree_key_cat *) targ_data;
+    HFS_CAT_GET_RECORD_OFFSET_DATA *offset_data = (HFS_CAT_GET_RECORD_OFFSET_DATA *)ptr;
+    const hfs_btree_key_cat *targ_key = offset_data->targ_key;
+
     if (tsk_verbose)
         tsk_fprintf(stderr,
             "hfs_cat_get_record_offset_cb: %s node want: %" PRIu32
@@ -1108,8 +1142,7 @@ hfs_cat_get_record_offset_cb(HFS_INFO * hfs, int8_t level_type,
             return HFS_BTREE_CB_LEAF_GO;
         }
         else if (diff == 0) {
-            TSK_OFF_T *off = (TSK_OFF_T *) ptr;
-            *off =
+            offset_data->off = 
                 key_off + 2 + tsk_getu16(hfs->fs_info.endian,
                 cur_key->key_len);
         }
@@ -1129,11 +1162,13 @@ hfs_cat_get_record_offset_cb(HFS_INFO * hfs, int8_t level_type,
 static TSK_OFF_T
 hfs_cat_get_record_offset(HFS_INFO * hfs, const hfs_btree_key_cat * needle)
 {
-    TSK_OFF_T off = 0;
-    if (hfs_cat_traverse(hfs, needle, hfs_cat_get_record_offset_cb, &off)) {
+    HFS_CAT_GET_RECORD_OFFSET_DATA offset_data;
+    offset_data.off = 0;
+    offset_data.targ_key = needle;
+    if (hfs_cat_traverse(hfs, hfs_cat_get_record_offset_cb, &offset_data)) {
         return 0;
     }
-    return off;
+    return offset_data.off;
 }
 
 
diff --git a/tsk/fs/hfs_dent.c b/tsk/fs/hfs_dent.c
index c322901c95cf1f16e8c1a9a98d68c6d09dde4651..c1a04e4eb3701f3ba3b9f83f8c7102822aa4680e 100644
--- a/tsk/fs/hfs_dent.c
+++ b/tsk/fs/hfs_dent.c
@@ -193,27 +193,27 @@ hfsmode2tsknametype(uint16_t a_mode)
 typedef struct {
     TSK_FS_DIR *fs_dir;
     TSK_FS_NAME *fs_name;
+    uint32_t cnid;
 } HFS_DIR_OPEN_META_INFO;
 
 static uint8_t
 hfs_dir_open_meta_cb(HFS_INFO * hfs, int8_t level_type,
-    const void *targ_data, const hfs_btree_key_cat * cur_key,
+    const hfs_btree_key_cat * cur_key,
     TSK_OFF_T key_off, void *ptr)
 {
-    uint32_t *cnid_p = (uint32_t *) targ_data;
     HFS_DIR_OPEN_META_INFO *info = (HFS_DIR_OPEN_META_INFO *) ptr;
     TSK_FS_INFO *fs = &hfs->fs_info;
 
     if (tsk_verbose)
         fprintf(stderr,
             "hfs_dir_open_meta_cb: want %" PRIu32 " vs got %" PRIu32
-            " (%s node)\n", *cnid_p, tsk_getu32(hfs->fs_info.endian,
+            " (%s node)\n", info->cnid, tsk_getu32(hfs->fs_info.endian,
                 cur_key->parent_cnid),
             (level_type == HFS_BT_NODE_TYPE_IDX) ? "Index" : "Leaf");
 
     if (level_type == HFS_BT_NODE_TYPE_IDX) {
         if (tsk_getu32(hfs->fs_info.endian,
-                cur_key->parent_cnid) < *cnid_p) {
+                cur_key->parent_cnid) < info->cnid) {
             return HFS_BTREE_CB_IDX_LT;
         }
         else {
@@ -226,26 +226,14 @@ hfs_dir_open_meta_cb(HFS_INFO * hfs, int8_t level_type,
         size_t rec_off2;
 
         if (tsk_getu32(hfs->fs_info.endian,
-                cur_key->parent_cnid) < *cnid_p) {
+                cur_key->parent_cnid) < info->cnid) {
             return HFS_BTREE_CB_LEAF_GO;
         }
         else if (tsk_getu32(hfs->fs_info.endian,
-                cur_key->parent_cnid) > *cnid_p) {
+                cur_key->parent_cnid) > info->cnid) {
             return HFS_BTREE_CB_LEAF_STOP;
         }
         rec_off2 = 2 + tsk_getu16(hfs->fs_info.endian, cur_key->key_len);
-        // @@@ NEED TO REPLACE THIS SOMEHOW, but need to figure out the max length
-        /*
-           if (rec_off2 > nodesize) {
-           tsk_error_set_errno(TSK_ERR_FS_GENFS);
-           tsk_error_set_errstr(
-           "hfs_dir_open_meta: offset of record+keylen %d in leaf node %d too large (%"PRIuSIZE" vs %"
-           PRIu16 ")", rec, cur_node, rec_off2, nodesize);
-           tsk_fs_name_free(fs_name);
-           free(node);
-           return TSK_COR;
-           }
-         */
         rec_type = tsk_getu16(hfs->fs_info.endian, &rec_buf[rec_off2]);
 
         // Catalog entry is for a file
@@ -488,7 +476,8 @@ hfs_dir_open_meta(TSK_FS_INFO * fs, TSK_FS_DIR ** a_fs_dir,
         }
     }
 
-    if (hfs_cat_traverse(hfs, &cnid, hfs_dir_open_meta_cb, &info)) {
+    info.cnid = cnid;
+    if (hfs_cat_traverse(hfs, hfs_dir_open_meta_cb, &info)) {
         tsk_fs_name_free(fs_name);
         return TSK_ERR;
     }
diff --git a/tsk/fs/tsk_hfs.h b/tsk/fs/tsk_hfs.h
index 9d64ccc789fc64c8e645880a7d65e5904cf38a4d..21dec3135a4e2ab9b0f6503f4c2d226a47efa2be 100644
--- a/tsk/fs/tsk_hfs.h
+++ b/tsk/fs/tsk_hfs.h
@@ -774,9 +774,16 @@ extern uint8_t hfs_cat_file_lookup(HFS_INFO * hfs, TSK_INUM_T inum,
 extern void error_returned(char *errstr, ...);
 extern void error_detected(uint32_t errnum, char *errstr, ...);
 
+/**
+ * @param hfs
+ * @param level_type Type of node the records are from
+ * @param cur_key Key currently being analyzed (record data follows it)
+ * @param key_off Byte offset in tree that this key is located in
+ * @param ptr Pointer to data that was passed into parent
+ */
 typedef uint8_t(*TSK_HFS_BTREE_CB) (HFS_INFO *, int8_t level_type,
-    const void *targ_key, const hfs_btree_key_cat * cur_key,
-    TSK_OFF_T key_off, void *);
+    const hfs_btree_key_cat * cur_key,
+    TSK_OFF_T key_off, void *ptr);
 // return values for callback
 #define HFS_BTREE_CB_IDX_LT     1       // current key is less than target (keeps looking in node)
 #define HFS_BTREE_CB_IDX_EQGT   2       // current key is equal or greater than target (stops)
@@ -784,7 +791,7 @@ typedef uint8_t(*TSK_HFS_BTREE_CB) (HFS_INFO *, int8_t level_type,
 #define HFS_BTREE_CB_LEAF_STOP  4       // stop processing keys in the leaf node
 #define HFS_BTREE_CB_ERR        5
 
-extern uint8_t hfs_cat_traverse(HFS_INFO * hfs, const void *targ_data,
+extern uint8_t hfs_cat_traverse(HFS_INFO * hfs, 
     TSK_HFS_BTREE_CB a_cb, void *ptr);
 
 
diff --git a/win32/BUILDING.txt b/win32/BUILDING.txt
index b6c87031a985e4b9efa76378673958638ee98c5a..5f050f92c9d4e1078b7df37537eb75bd84bc9928 100755
--- a/win32/BUILDING.txt
+++ b/win32/BUILDING.txt
@@ -14,9 +14,15 @@ The SDK installation may fail, particularly if Visual Studio 2010 Service Pack 1
 
 
 
-The Visual Studio Solution file has five build targets: Debug, Debug_NoLibs, Release, Debug_PostgreSQL, and Release_PostgreSQL. Debug and Release require that libewf exists (to provide support for E01 image files) and that zlib exists (to provide support for HFS+ compressed data). Debug_NoLibs does not require libewf or zlib and you should be able to compile Debug_NoLibs without any additional setup. Debug_PostgreSQL and Release_PostgreSQL require a 64-bit version of PostgreSQL 9.4 or above installed. See the PostgreSQL section below.
+There are six build targets:
+- Debug_NoLibs and Release_NoLibs do not depend on any third-party libraries.  
+- Debug and Release depend on libewf and zlib to be built so that E01 images aresupported.
+- Debug_PostgreSQL and Release_PostegreSQL depend on libewf for E01 and PostgreSQL libraries.  This target is needed by Autopsy and other programs that want to write database results to a central PostgreSQL database instead of just SQLite.
 
 
+------------------------------------------------------------------------
+Debug and Release Targets
+
 
 The steps below outline the process required to compile the Debug and Release targets.
 
@@ -46,35 +52,26 @@ If you want to build 64-bit libraries though, download a version that we've upgr
 
 
 
-The steps below outline the process required to compile the Debug_PostgreSQL and Release_PostgreSQL targets.
-
-1) Download libewf-20130128 (or later).  The official releases are from:
-    http://sourceforge.net/projects/libewf/
-
-If you want to build 64-bit libraries though, download a version that we've upgraded:
-    https://github.com/sleuthkit/libewf_64bit
+------------------------------------------------------------------------
+PostgreSQL Targets
 
+The steps below outline the process required to compile the Debug_PostgreSQL and Release_PostgreSQL targets.
 
-2) Open archive file and follow the README instructions in libewf to build libewf_dll (at the time of this writing, that includes downloading the zlib dll). Note that TSK will use only the Release version of libewf_dll.  Later steps also depend on the zlib dll being built inside of libewf.  Note that libewf will need to be converted to Visual Studio 2010 and be upgraded to support a 64-bit build.
-
-
-3) Set the LIBEWF_HOME environment variable to point to the libewf folder that you created and built in step 2. 
-
-4) If you want to build libtsk_jni for the Java JNI bindings, then set the JDK_HOME environment variable to point to the top directory of your Java SDK.
+1) Follow all of the steps outlined above that are required to build the Release and Debug targets.
 
-5) Download and install PostgreSQL 9.4 or above. The official releases are from:
+2) Download and install PostgreSQL 9.4 or above. You can either download the full installer or just the ZIP file. The official releases are from:
     http://www.postgresql.org/download/
     
-6) Set the POSTGRESQL_HOME_64 environment variable to point to the PostgreSQL folder containing, but not including, the bin folder.
+3) Set the POSTGRESQL_HOME_64 environment variable to point to the PostgreSQL folder containing, but not including, the bin folder.
    Example: POSTGRESQL_HOME_64=C:\Program Files\PostgreSQL\9.4 
 
-7) Open the TSK Visual Studio Solution file, tsk-win.sln, in the win32 directory. 
+4) Open the TSK Visual Studio Solution file, tsk-win.sln, in the win32 directory. 
 
-8) Compile a Debug_PostgreSQL x64 or Release_PostgreSQL x64 version of the libraries and executables.  The resulting libraries and executables will be put in win32/x64/Debug_PostgreSQL and win32/x64/Release_PostgreSQL folders as appropriate. You can change the type of build using the pulldown in Visual Studio.
+5) Compile a Debug_PostgreSQL x64 or Release_PostgreSQL x64 version of the libraries and executables.  The resulting libraries and executables will be put in win32/x64/Debug_PostgreSQL and win32/x64/Release_PostgreSQL folders as appropriate. You can change the type of build using the pulldown in Visual Studio.
 
-9) Note that the libraries and executables will depend on the libewf, zlib, libpq, libintl-8, libeay32, and ssleay32 DLL files (which are copied to the TSK build directories). 
+6) Note that the libraries and executables will depend on the libewf, zlib, libpq, libintl-8, libeay32, and ssleay32 DLL files (which are copied to the TSK build directories). 
 
-10) If you are using JNI along with the PostgreSQL build, in NetBeans build the DataModel PostgreSQL target or in ant 'ant PostgreSQL'.
+7) If you are using JNI along with the PostgreSQL build, in NetBeans build the DataModel PostgreSQL target or in ant 'ant PostgreSQL'.
 
 Refer to the API docs at http://sleuthkit.org/sleuthkit/docs/api-docs/ for details on how to use the library in an application.