Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • irt/sleuthkit
1 result
Show changes
Showing
with 5321 additions and 0 deletions
/*
* Sleuth Kit Data Model
*
* Copyright 2014-2018 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.datamodel;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import static java.nio.file.StandardOpenOption.READ;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.sleuthkit.datamodel.Blackboard.BlackboardException;
import org.sleuthkit.datamodel.SleuthkitCase.CaseDbTransaction;
/**
* This is a class that models reports.
*/
public class Report implements Content {
private static final BlackboardArtifact.Type KEYWORD_HIT_TYPE = new BlackboardArtifact.Type(BlackboardArtifact.ARTIFACT_TYPE.TSK_KEYWORD_HIT);
static long ID_NOT_SET = -1;
private long objectId = ID_NOT_SET;
private final String pathAsString;
private final Path pathAsPath; // NULL if path is for a URL
private final long createdTime;
private final String sourceModuleName;
private final String reportName;
private Content parent; // The object from which the report was generated.
private final SleuthkitCase db; // A reference to the database instance.
private FileChannel fileChannel = null; // Used to read report content.
private static final Logger LOGGER = Logger.getLogger(Report.class.getName());
/**
* Create a Report instance.
*
* @param id Primary key from associated row in the case database.
* @param path Absolute path to report.
* @param createdTime Created time of report (in UNIX epoch time).
* @param reportName May be empty
* @param parent The parent/source of the Report.
*/
Report(SleuthkitCase db, long id, String path, long createdTime, String sourceModuleName, String reportName, Content parent) {
this.db = db;
this.objectId = id;
this.pathAsString = path;
if (path.startsWith("http")) {
this.pathAsPath = null;
} else {
this.pathAsPath = Paths.get(path);
}
this.createdTime = createdTime;
this.sourceModuleName = sourceModuleName;
this.reportName = reportName;
this.parent = parent;
}
@Override
public long getId() {
return objectId;
}
/**
* Get the absolute local path to the report.
*
* @return
*/
public String getPath() {
return (pathAsPath != null ? pathAsPath.toString() : pathAsString);
}
/**
* Get the creation date of the report.
*
* @return Number of seconds since Jan 1, 1970.
*/
public long getCreatedTime() {
return createdTime;
}
/**
* Get the name of the module (e.g., ingest module, reporting module) that
* generated the report.
*
* @return The module name.
*/
public String getSourceModuleName() {
return this.sourceModuleName;
}
/**
* Get the report name, if any.
*
* @return The name of the report, possibly empty.
*/
public String getReportName() {
return reportName;
}
@Override
public int read(byte[] buf, long offset, long len) throws TskCoreException {
if (pathAsPath == null || Files.isDirectory(pathAsPath)) {
return 0;
}
int totalBytesRead = 0;
ByteBuffer data = ByteBuffer.wrap(buf);
try {
if (fileChannel == null) {
fileChannel = FileChannel.open(pathAsPath, READ);
}
fileChannel.position(offset);
int bytesRead = 0;
do {
bytesRead = fileChannel.read(data);
if (bytesRead != -1) {
totalBytesRead += bytesRead;
}
} while (bytesRead != -1 && data.hasRemaining());
} catch (IOException ex) {
LOGGER.log(Level.SEVERE, "Failed to read report file.", ex);
}
return totalBytesRead;
}
@Override
public void close() {
try {
if (fileChannel != null) {
fileChannel.close();
}
} catch (IOException ex) {
LOGGER.log(Level.WARNING, "Failed to close report file.", ex);
}
}
@Override
public long getSize() {
try {
return (pathAsPath != null ? Files.size(pathAsPath) : 0);
} catch (IOException ex) {
LOGGER.log(Level.SEVERE, "Failed to get size of report.", ex);
// If we cannot determine the size of the report, return zero
// to prevent attempts to read content.
return 0;
}
}
@Override
public <T> T accept(ContentVisitor<T> v) {
return v.visit(this);
}
@Override
public String getName() {
return reportName;
}
@Override
public String getUniquePath() throws TskCoreException {
// @@@ This is wrong... we need to use the same logic is in AbstractContent.getUniquePath().
return getPath();
}
@Override
public Content getDataSource() throws TskCoreException {
if (null == parent) {
return null;
} else {
return parent.getDataSource();
}
}
@Override
public List<Content> getChildren() throws TskCoreException {
return Collections.<Content>emptyList();
}
@Override
public boolean hasChildren() throws TskCoreException {
return false;
}
@Override
public int getChildrenCount() throws TskCoreException {
return 0;
}
@Override
public Content getParent() throws TskCoreException {
if (parent == null) {
SleuthkitCase.ObjectInfo parentInfo;
parentInfo = db.getParentInfo(this);
if (parentInfo == null) {
parent = null;
} else {
parent = db.getContentById(parentInfo.getId());
}
}
return parent;
}
@Override
public List<Long> getChildrenIds() throws TskCoreException {
return Collections.<Long>emptyList();
}
@Deprecated
@Override
public BlackboardArtifact newArtifact(int artifactTypeID) throws TskCoreException {
if (artifactTypeID != BlackboardArtifact.ARTIFACT_TYPE.TSK_KEYWORD_HIT.getTypeID()) {
throw new TskCoreException("Reports can only have keyword hit artifacts.");
}
long fileObjId = getId();
long dsObjId = getDataSource() == null ? null : getDataSource().getId();
try {
return db.getBlackboard().newAnalysisResult(
KEYWORD_HIT_TYPE, fileObjId, dsObjId, Score.SCORE_UNKNOWN,
null, null, null, Collections.emptyList())
.getAnalysisResult();
} catch (BlackboardException ex) {
throw new TskCoreException("Unable to get analysis result for keword hit.", ex);
}
}
@Override
public AnalysisResultAdded newAnalysisResult(BlackboardArtifact.Type artifactType, Score score, String conclusion, String configuration, String justification, Collection<BlackboardAttribute> attributesList) throws TskCoreException {
// Get the data source before opening the transaction
long dataSourceObjId = getDataSource().getId();
CaseDbTransaction trans = db.beginTransaction();
try {
AnalysisResultAdded resultAdded = db.getBlackboard().newAnalysisResult(artifactType, objectId, dataSourceObjId, score, conclusion, configuration, justification, attributesList, trans);
trans.commit();
return resultAdded;
} catch (BlackboardException ex) {
trans.rollback();
throw new TskCoreException("Error adding analysis result.", ex);
}
}
@Override
public AnalysisResultAdded newAnalysisResult(BlackboardArtifact.Type artifactType, Score score, String conclusion, String configuration, String justification, Collection<BlackboardAttribute> attributesList, long dataSourceId) throws TskCoreException {
CaseDbTransaction trans = db.beginTransaction();
try {
AnalysisResultAdded resultAdded = db.getBlackboard().newAnalysisResult(artifactType, objectId, dataSourceId, score, conclusion, configuration, justification, attributesList, trans);
trans.commit();
return resultAdded;
} catch (BlackboardException ex) {
trans.rollback();
throw new TskCoreException("Error adding analysis result.", ex);
}
}
@Override
public DataArtifact newDataArtifact(BlackboardArtifact.Type artifactType, Collection<BlackboardAttribute> attributesList, Long osAccountId) throws TskCoreException {
if (artifactType.getTypeID() != BlackboardArtifact.ARTIFACT_TYPE.TSK_KEYWORD_HIT.getTypeID() &&
artifactType.getTypeID() != BlackboardArtifact.ARTIFACT_TYPE.TSK_ACCOUNT.getTypeID()) {
throw new TskCoreException("Reports can only have keyword hit artifacts.");
}
return db.getBlackboard().newDataArtifact(artifactType, objectId, this.getDataSource().getId(), attributesList, osAccountId);
}
@Override
public DataArtifact newDataArtifact(BlackboardArtifact.Type artifactType, Collection<BlackboardAttribute> attributesList, Long osAccountId, long dataSourceId) throws TskCoreException {
if (artifactType.getTypeID() != BlackboardArtifact.ARTIFACT_TYPE.TSK_KEYWORD_HIT.getTypeID() &&
artifactType.getTypeID() != BlackboardArtifact.ARTIFACT_TYPE.TSK_ACCOUNT.getTypeID()) {
throw new TskCoreException("Reports can only have keyword hit artifacts.");
}
return db.getBlackboard().newDataArtifact(artifactType, objectId, dataSourceId, attributesList, osAccountId);
}
@Override
public DataArtifact newDataArtifact(BlackboardArtifact.Type artifactType, Collection<BlackboardAttribute> attributesList) throws TskCoreException {
return newDataArtifact(artifactType, attributesList, null);
}
@Deprecated
@SuppressWarnings("deprecation")
@Override
public BlackboardArtifact newArtifact(BlackboardArtifact.ARTIFACT_TYPE type) throws TskCoreException {
return newArtifact(type.getTypeID());
}
@Override
public ArrayList<BlackboardArtifact> getArtifacts(String artifactTypeName) throws TskCoreException {
return getArtifacts(db.getBlackboard().getArtifactType(artifactTypeName).getTypeID());
}
@Override
public BlackboardArtifact getGenInfoArtifact() throws TskCoreException {
// TSK_GEN_INFO artifact is obsolete.
return null;
}
@Override
public BlackboardArtifact getGenInfoArtifact(boolean create) throws TskCoreException {
// TSK_GEN_INFO artifact is obsolete.
return null;
}
@Override
public ArrayList<BlackboardAttribute> getGenInfoAttributes(BlackboardAttribute.ATTRIBUTE_TYPE attr_type) throws TskCoreException {
// TSK_GEN_INFO artifact is obsolete.
return null;
}
@Override
public ArrayList<BlackboardArtifact> getArtifacts(int artifactTypeID) throws TskCoreException {
if (artifactTypeID != BlackboardArtifact.ARTIFACT_TYPE.TSK_KEYWORD_HIT.getTypeID()) {
throw new TskCoreException("Reports can only have keyword hit artifacts.");
}
return db.getBlackboardArtifacts(artifactTypeID, objectId);
}
@Override
public ArrayList<BlackboardArtifact> getArtifacts(BlackboardArtifact.ARTIFACT_TYPE type) throws TskCoreException {
return getArtifacts(type.getTypeID());
}
@Override
public ArrayList<BlackboardArtifact> getAllArtifacts() throws TskCoreException {
return db.getMatchingArtifacts("WHERE obj_id = " + objectId); //NON-NLS
}
@Override
public List<AnalysisResult> getAllAnalysisResults() throws TskCoreException {
return db.getBlackboard().getAnalysisResults(objectId);
}
@Override
public List<DataArtifact> getAllDataArtifacts() throws TskCoreException {
return db.getBlackboard().getDataArtifactsBySource(objectId);
}
@Override
public List<AnalysisResult> getAnalysisResults(BlackboardArtifact.Type artifactType) throws TskCoreException {
return db.getBlackboard().getAnalysisResults(objectId, artifactType.getTypeID());
}
@Override
public Score getAggregateScore() throws TskCoreException {
return db.getScoringManager().getAggregateScore(objectId);
}
@Override
public Set<String> getHashSetNames() throws TskCoreException {
return Collections.<String>emptySet();
}
@Override
public long getArtifactsCount(String artifactTypeName) throws TskCoreException {
return getArtifactsCount(db.getBlackboard().getArtifactType(artifactTypeName).getTypeID());
}
@Override
public long getArtifactsCount(int artifactTypeID) throws TskCoreException {
if (artifactTypeID != BlackboardArtifact.ARTIFACT_TYPE.TSK_KEYWORD_HIT.getTypeID()) {
throw new TskCoreException("Reports can only have keyword hit artifacts.");
}
return db.getBlackboardArtifactsCount(artifactTypeID, objectId);
}
@Override
public long getArtifactsCount(BlackboardArtifact.ARTIFACT_TYPE type) throws TskCoreException {
return getArtifactsCount(type.getTypeID());
}
@Override
public long getAllArtifactsCount() throws TskCoreException {
return db.getBlackboardArtifactsCount(objectId);
}
@Override
public <T> T accept(SleuthkitItemVisitor<T> v) {
return v.visit(this);
}
}
/*
* Sleuth Kit Data Model
*
* Copyright 2020 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.datamodel;
/**
* Interface for classes to help create queries for SQLite or PostgreSQL
*/
interface SQLHelper {
// Get the type for the primary key
String getPrimaryKey();
// Get the type for big int-type data
String getBigIntType();
// Get the type for blob-type data
String getBlobType();
// Get the description column name for the tsk_vs_parts table.
// This varies between SQLite and PostgreSQL.
String getVSDescColName();
/**
* PostgreSQL-specific implementation
*/
class PostgreSQLHelper implements SQLHelper {
@Override
public String getPrimaryKey() {
return "BIGSERIAL";
}
@Override
public String getBigIntType() {
return "BIGINT";
}
@Override
public String getBlobType() {
return "BYTEA";
}
@Override
public String getVSDescColName() {
return "descr";
}
}
/**
* SQLite-specific implementation
*/
class SQLiteHelper implements SQLHelper {
@Override
public String getPrimaryKey() {
return "INTEGER";
}
@Override
public String getBigIntType() {
return "INTEGER";
}
@Override
public String getBlobType() {
return "BLOB";
}
@Override
public String getVSDescColName() {
return "desc";
}
}
}
\ No newline at end of file
/*
* Sleuth Kit Data Model
*
* Copyright 2020-2021 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.datamodel;
import java.util.Arrays;
import java.util.Comparator;
import java.util.ResourceBundle;
/**
* Encapsulates either an analysis result score or the aggregate score of
* Content. A score measures how likely the Content object is to be relevant to
* an investigation. Relevance is determined by a series of analysis techniques,
* each of which has a score. The aggregate score for an item is then determined
* based on its analysis results.
*
* A score has two primary fields: Significance and Priority.
*
* There are two priorities : Normal and Override. Nearly everything should have
* a "Normal" priority. "Override" is used when a user wants to change the score
* because of a false positive. An "Override" score will take priority over
* the combined "Normal" scores. An item should have only one "Override" score
* at a time, but that is not currently enforced.
*
* The significance is a range of how Notable (i.e. "Bad") the item is. The
* range is from NONE (i.e. "Good") to NOTABLE with values in the middle, such
* as LIKELY_NOTABLE for suspicious items. The LIKELY_ values are used when
* there is less confidence in the result. The significance has to do with the
* false positive rate at actually detecting notable or benign things.
*
*
* For an example, if a file is found in a MD5 hashset of notable files, then a
* module would use a significance of NOTABLE. This is because the MD5 is exact
* match and the hash set is all notable files.
*
* For a keyword hit, the significance would be LIKELY_NOTABLE because keywords
* often can be used in both good and bad ways. A user will need to review the
* file to determine if it is a true or false positive.
*
* If a file is found to be on a good list (via MD5), then it could have a
* significance of NONE and then other modules could ignore it.
*
* An aggregate score is the combination of the specific analysis results.
* USER_RESULTS will overrule NORMAL. NOTABLE overrules NONE. Both of those
* overrule the LIKELY_* results.
*
* NOTABLE > NONE > LIKELY_NOTABLE > LIKELY_NONE > UNKNOWN
*/
public class Score implements Comparable<Score> {
private static final ResourceBundle bundle = ResourceBundle.getBundle("org.sleuthkit.datamodel.Bundle");
/**
* Indicates the relevance of an item based on the analysis result's conclusion.
*
* For comparing significance, the following ordering applies
*
* Bad > Good > Likely Bad > Likely Good > Unknown
*
*/
public enum Significance {
// Enum name must not have any spaces.
/* Notes on the ordinal numbers: We defined these so that we could easily
* compare values while also have some concept of grouping.
* The 1x values are a higher confidence than the 0x files.
* NOTABLE (x9) has priority over NOT NOTABLE (x8).
* If we need to make this more complicated in the future, we can add
* other groupings, such as 14 and 15.
*/
/// no significance assigned yet.
UNKNOWN(0, "Unknown", "Significance.Unknown.displayName.text"),
/// likely good
LIKELY_NONE(8, "LikelyNone", "Significance.LikelyNone.displayName.text"),
/// likely bad, suspicious
LIKELY_NOTABLE(9, "LikelyNotable", "Significance.LikelyNotable.displayName.text"),
/// good
NONE(18, "None", "Significance.None.displayName.text"),
/// bad
NOTABLE(19, "Notable", "Significance.Notable.displayName.text");
private final int id;
private final String name; // name must not have spaces
private final String displayNameKey; // display name is loaded from resource bundle using this key.
private Significance(int id, String name, String displayNameKey) {
this.id = id;
this.name = name;
this.displayNameKey = displayNameKey;
}
public static Significance fromString(String name) {
return Arrays.stream(values())
.filter(val -> val.getName().equals(name))
.findFirst().orElse(NONE);
}
static public Significance fromID(int id) {
return Arrays.stream(values())
.filter(val -> val.getId() == id)
.findFirst().orElse(NONE);
}
/**
* Get enum ordinal.
*
* @return Ordinal.
*/
public int getId() {
return id;
}
/**
* Gets name that has no spaces in it.
* Does not get translated.
*
* @return Name.
*/
public String getName() {
return name;
}
/**
* Gets display name that may have spaces and can be used in the UI.
* May return a translated version.
*
* @return Display name.
*/
public String getDisplayName() {
return bundle.getString(displayNameKey);
}
@Override
public String toString() {
return name;
}
}
/**
* Represents the priority of the score to allow overrides by a user or module
*/
public enum Priority {
// Name must not have any spaces.
NORMAL(0, "Normal", "Score.Priority.Normal.displayName.text"),
OVERRIDE(10, "Override", "Score.Priority.Override.displayName.text");
private final int id;
private final String name;
private final String displayNameKey; // display name is loaded from resource bundle using this key.
private Priority(int id, String name, String displayNameKey) {
this.id = id;
this.name = name;
this.displayNameKey = displayNameKey;
}
public static Priority fromString(String name) {
return Arrays.stream(values())
.filter(val -> val.getName().equals(name))
.findFirst().orElse(NORMAL);
}
static public Priority fromID(int id) {
return Arrays.stream(values())
.filter(val -> val.getId() == id)
.findFirst().orElse(NORMAL);
}
public int getId() {
return id;
}
public String getName() {
return name;
}
public String getDisplayName() {
return bundle.getString(displayNameKey);
}
@Override
public String toString() {
return name;
}
}
public static final Score SCORE_NOTABLE = new Score(Significance.NOTABLE, Priority.NORMAL);
public static final Score SCORE_LIKELY_NOTABLE = new Score(Significance.LIKELY_NOTABLE, Priority.NORMAL);
public static final Score SCORE_LIKELY_NONE = new Score(Significance.LIKELY_NONE, Priority.NORMAL);
public static final Score SCORE_NONE= new Score(Significance.NONE, Priority.NORMAL);
public static final Score SCORE_UNKNOWN = new Score(Significance.UNKNOWN, Priority.NORMAL);
// Score is a combination of significance and priority.
private final Significance significance;
private final Priority priority;
public Score(Significance significance, Priority priority) {
this.significance = significance;
this.priority = priority;
}
public Significance getSignificance() {
return significance;
}
public Priority getPriority() {
return priority;
}
@Override
public int compareTo(Score other) {
// A score is a combination of significance & priority.
// Priority Override overrides Normal.
// If two results have same priority, then the higher significance wins.
if (this.getPriority() != other.getPriority()) {
return this.getPriority().ordinal() - other.getPriority().ordinal();
} else {
return this.getSignificance().ordinal() - other.getSignificance().ordinal();
}
}
public static final Comparator<Score> getScoreComparator() {
return (Score score1, Score score2) -> {
return score1.compareTo(score2);
};
}
}
/*
* Sleuth Kit Data Model
*
* Copyright 2020-2021 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.datamodel;
import java.util.Optional;
/**
* This class encapsulates a score change.
*/
final public class ScoreChange {
private final long objId;
private final Long dataSourceObjectId;
private final Score oldScore;
private final Score newScore;
ScoreChange(long objId, Long dataSourceObjectId, Score oldScore, Score newScore) {
this.objId = objId;
this.dataSourceObjectId = dataSourceObjectId;
this.oldScore = oldScore;
this.newScore = newScore;
}
public Long getDataSourceObjectId() {
return dataSourceObjectId;
}
public long getObjectId() {
return objId;
}
public Score getOldScore() {
return oldScore;
}
public Score getNewScore() {
return newScore;
}
}
/*
* Sleuth Kit Data Model
*
* Copyright 2020 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.datamodel;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Optional;
import java.util.Map;
import java.util.Set;
import java.util.logging.Logger;
import java.util.stream.Collectors;
import org.sleuthkit.datamodel.Score.Priority;
import org.sleuthkit.datamodel.Score.Significance;
import org.sleuthkit.datamodel.SleuthkitCase.CaseDbConnection;
import org.sleuthkit.datamodel.SleuthkitCase.CaseDbTransaction;
/**
* The scoring manager is responsible for updating and querying the score of
* objects.
*
*/
public class ScoringManager {
private static final Logger LOGGER = Logger.getLogger(ScoringManager.class.getName());
private final SleuthkitCase db;
/**
* Construct a ScoringManager for the given SleuthkitCase.
*
* @param skCase The SleuthkitCase
*
*/
ScoringManager(SleuthkitCase skCase) {
this.db = skCase;
}
/**
* Get the aggregate score for the given object.
*
* @param objId Object id.
*
* @return Score, if it is found, unknown otherwise.
*
* @throws TskCoreException
*/
public Score getAggregateScore(long objId) throws TskCoreException {
db.acquireSingleUserCaseReadLock();
try (CaseDbConnection connection = db.getConnection()) {
return getAggregateScore(objId, connection);
} finally {
db.releaseSingleUserCaseReadLock();
}
}
/**
* Get the aggregate scores for the given list of object ids.
*
* @param objIds Object id list.
*
* @return Map<Long, Score> Each input object id will be mapped. If a score
* is not found for an object Unknown score will be mapped.
*
* @throws TskCoreException
*/
public Map<Long, Score> getAggregateScores(List<Long> objIds) throws TskCoreException {
if (objIds.isEmpty()) {
return Collections.emptyMap();
}
// We need to deduplicate the list of object IDs. Otherwise the map
// below breaks and throws an exception.
Set<Long> set = new HashSet<>(objIds);
String queryString = "SELECT obj_id, significance, priority FROM tsk_aggregate_score WHERE obj_id in "
+ set.stream().map(l -> l.toString()).collect(Collectors.joining(",", "(", ")"));
Map<Long, Score> results = set.stream().collect(Collectors.toMap( key -> key, key -> Score.SCORE_UNKNOWN));
db.acquireSingleUserCaseReadLock();
try (CaseDbConnection connection = db.getConnection()) {
try (Statement s = connection.createStatement(); ResultSet rs = connection.executeQuery(s, queryString)) {
while (rs.next()) {
Long objId = rs.getLong("obj_id");
Score score = new Score(Significance.fromID(rs.getInt("significance")), Priority.fromID(rs.getInt("priority")));
results.put(objId, score);
}
} catch (SQLException ex) {
throw new TskCoreException("SQLException thrown while running query: " + queryString, ex);
}
} finally {
db.releaseSingleUserCaseReadLock();
}
return results;
}
/**
* Get the aggregate score for the given object. Uses the connection from the
* given transaction.
*
* @param objId Object id.
* @param transaction Transaction that provides the connection to use.
*
* @return Score, if it is found, unknown otherwise.
*
* @throws TskCoreException
*/
private Score getAggregateScore(long objId, CaseDbTransaction transaction) throws TskCoreException {
CaseDbConnection connection = transaction.getConnection();
return getAggregateScore(objId, connection);
}
/**
* Get the aggregate score for the given object.
*
* @param objId Object id.
* @param connection Connection to use for the query.
*
* @return Score, if it is found, SCORE_UNKNOWN otherwise.
*
* @throws TskCoreException
*/
private Score getAggregateScore(long objId, CaseDbConnection connection) throws TskCoreException {
String queryString = "SELECT significance, priority FROM tsk_aggregate_score WHERE obj_id = " + objId;
try (Statement s = connection.createStatement(); ResultSet rs = connection.executeQuery(s, queryString)) {
if (rs.next()) {
return new Score(Significance.fromID(rs.getInt("significance")), Priority.fromID(rs.getInt("priority")));
} else {
return Score.SCORE_UNKNOWN;
}
} catch (SQLException ex) {
throw new TskCoreException("SQLException thrown while running query: " + queryString, ex);
}
}
/**
* Inserts or updates the score for the given object.
*
* @param objId Object id of the object.
* @param dataSourceObjectId Data source object id, may be null.
* @param score Score to be inserted/updated.
* @param transaction Transaction to use for the update.
*
* @throws TskCoreException
*/
private void setAggregateScore(long objId, Long dataSourceObjectId, Score score, CaseDbTransaction transaction) throws TskCoreException {
String insertSQLString = "INSERT INTO tsk_aggregate_score (obj_id, data_source_obj_id, significance , priority) VALUES (?, ?, ?, ?)"
+ " ON CONFLICT (obj_id) DO UPDATE SET significance = ?, priority = ?";
CaseDbConnection connection = transaction.getConnection();
try {
PreparedStatement preparedStatement = connection.getPreparedStatement(insertSQLString, Statement.NO_GENERATED_KEYS);
preparedStatement.clearParameters();
preparedStatement.setLong(1, objId);
if (dataSourceObjectId != null) {
preparedStatement.setLong(2, dataSourceObjectId);
} else {
preparedStatement.setNull(2, java.sql.Types.NULL);
}
preparedStatement.setInt(3, score.getSignificance().getId());
preparedStatement.setInt(4, score.getPriority().getId());
preparedStatement.setInt(5, score.getSignificance().getId());
preparedStatement.setInt(6, score.getPriority().getId());
connection.executeUpdate(preparedStatement);
} catch (SQLException ex) {
throw new TskCoreException(String.format("Error updating aggregate score, query: %s for objId = %d", insertSQLString, objId), ex);//NON-NLS
}
}
/**
* Updates the score for the specified object after a result has been
* added. Is optimized to do nothing if the new score is less than the
* current aggregate score.
*
* @param objId Object id.
* @param dataSourceObjectId Object id of the data source, may be null.
* @param newResultScore Score for a newly added analysis result.
* @param transaction Transaction to use for the update.
*
* @return Aggregate score for the object.
*
* @throws TskCoreException
*/
Score updateAggregateScoreAfterAddition(long objId, Long dataSourceObjectId, Score newResultScore, CaseDbTransaction transaction) throws TskCoreException {
/* get an exclusive write lock on the DB before we read anything so that we know we are
* the only one reading existing scores and updating. The risk is that two computers
* could update the score and the aggregate score ends up being incorrect.
*
* NOTE: The alternative design is to add a 'version' column for opportunistic locking
* and calculate these outside of a transaction. We opted for table locking for performance
* reasons so that we can still add the analysis results in a batch. That remains an option
* if we get into deadlocks with the current design.
*/
try {
CaseDbConnection connection = transaction.getConnection();
connection.getAggregateScoreTableWriteLock();
} catch (SQLException ex) {
throw new TskCoreException("Error getting exclusive write lock on aggregate score table", ex);//NON-NLS
}
// Get the current score
Score currentAggregateScore = ScoringManager.this.getAggregateScore(objId, transaction);
// If current score is Unknown And newscore is not Unknown - allow None (good) to be recorded
// or if the new score is higher than the current score
if ( (currentAggregateScore.compareTo(Score.SCORE_UNKNOWN) == 0 && newResultScore.compareTo(Score.SCORE_UNKNOWN) != 0)
|| (Score.getScoreComparator().compare(newResultScore, currentAggregateScore) > 0)) {
setAggregateScore(objId, dataSourceObjectId, newResultScore, transaction);
// register score change in the transaction.
transaction.registerScoreChange(new ScoreChange(objId, dataSourceObjectId, currentAggregateScore, newResultScore));
return newResultScore;
} else {
// return the current score
return currentAggregateScore;
}
}
/**
* Recalculate the aggregate score after an analysis result was
* deleted.
*
* @param objId Content that had result deleted from
* @param dataSourceObjectId Data source content is in
* @param transaction
* @return New Score
* @throws TskCoreException
*/
Score updateAggregateScoreAfterDeletion(long objId, Long dataSourceObjectId, CaseDbTransaction transaction) throws TskCoreException {
CaseDbConnection connection = transaction.getConnection();
/* get an exclusive write lock on the DB before we read anything so that we know we are
* the only one reading existing scores and updating. The risk is that two computers
* could update the score and the aggregate score ends up being incorrect.
*
* NOTE: The alternative design is to add a 'version' column for opportunistic locking
* and calculate these outside of a transaction. We opted for table locking for performance
* reasons so that we can still add the analysis results in a batch. That remains an option
* if we get into deadlocks with the current design.
*/
try {
connection.getAggregateScoreTableWriteLock();
} catch (SQLException ex) {
throw new TskCoreException("Error getting exclusive write lock on aggregate score table", ex);//NON-NLS
}
// Get the current score
Score currentScore = ScoringManager.this.getAggregateScore(objId, transaction);
// Calculate the score from scratch by getting all of them and getting the highest
List<AnalysisResult> analysisResults = db.getBlackboard().getAnalysisResults(objId, connection);
Score newScore = Score.SCORE_UNKNOWN;
for (AnalysisResult iter : analysisResults) {
Score iterScore = iter.getScore();
if (Score.getScoreComparator().compare(iterScore, newScore) > 0) {
newScore = iterScore;
}
}
// get the maximum score of the calculated aggregate score of analysis results
// or the score derived from the maximum known status of a content tag on this content.
Optional<Score> tagScore = db.getTaggingManager().getMaxTagKnownStatus(objId, transaction)
.map(knownStatus -> TaggingManager.getTagScore(knownStatus));
if (tagScore.isPresent() && Score.getScoreComparator().compare(tagScore.get(), newScore) > 0) {
newScore = tagScore.get();
}
// only change the DB if we got a new score.
if (newScore.compareTo(currentScore) != 0) {
setAggregateScore(objId, dataSourceObjectId, newScore, transaction);
// register the score change with the transaction so an event can be fired for it.
transaction.registerScoreChange(new ScoreChange(objId, dataSourceObjectId, currentScore, newScore));
}
return newScore;
}
/**
* Get the count of contents within the specified data source
* with the specified significance.
*
* @param dataSourceObjectId Data source object id.
* @param significance Significance to look for.
*
* @return Number of contents with given score.
* @throws TskCoreException if there is an error getting the count.
*/
public long getContentCount(long dataSourceObjectId, Score.Significance significance) throws TskCoreException {
db.acquireSingleUserCaseReadLock();
try (CaseDbConnection connection = db.getConnection()) {
return getContentCount(dataSourceObjectId, significance, connection);
} finally {
db.releaseSingleUserCaseReadLock();
}
}
/**
* Get the count of contents with the specified significance. Uses the
* specified database connection.
*
* @param dataSourceObjectId Data source object id.
* @param significance Significance to look for.
* @param connection Database connection to use..
*
* @return Number of contents with given score.
*
* @throws TskCoreException if there is an error getting the count.
*/
private long getContentCount(long dataSourceObjectId, Score.Significance significance, CaseDbConnection connection) throws TskCoreException {
String queryString = "SELECT COUNT(obj_id) AS count FROM tsk_aggregate_score"
+ " WHERE data_source_obj_id = " + dataSourceObjectId
+ " AND significance = " + significance.getId();
try (Statement statement = connection.createStatement();
ResultSet resultSet = connection.executeQuery(statement, queryString);) {
long count = 0;
if (resultSet.next()) {
count = resultSet.getLong("count");
}
return count;
} catch (SQLException ex) {
throw new TskCoreException("Error getting count of items with significance = " + significance.toString(), ex);
}
}
/**
* Get the contents with the specified score.
*
* @param dataSourceObjectId Data source object id.
* @param significance Significance to look for.
*
* @return Collection of contents with given score.
*
* @throws TskCoreException if there is an error getting the contents.
*/
public List<Content> getContent(long dataSourceObjectId, Score.Significance significance) throws TskCoreException {
db.acquireSingleUserCaseReadLock();
try (CaseDbConnection connection = db.getConnection()) {
return getContent(dataSourceObjectId, significance, connection);
} finally {
db.releaseSingleUserCaseReadLock();
}
}
/**
* Gets the contents with the specified score. Uses the specified
* database connection.
*
* @param dataSourceObjectId Data source object id.
* @param significance Significance to look for.
* @param connection Connection to use for the query.
*
* @return List of contents with given score.
*
* @throws TskCoreException
*/
private List<Content> getContent(long dataSourceObjectId, Score.Significance significance, CaseDbConnection connection) throws TskCoreException {
String queryString = "SELECT obj_id FROM tsk_aggregate_score"
+ " WHERE data_source_obj_id = " + dataSourceObjectId
+ " AND significance = " + significance.getId();
try (Statement statement = connection.createStatement();
ResultSet resultSet = connection.executeQuery(statement, queryString);) {
List<Content> items = new ArrayList<>();
while (resultSet.next()) {
long objId = resultSet.getLong("obj_id");
items.add(db.getContentById(objId));
}
return items;
} catch (SQLException ex) {
throw new TskCoreException("Error getting list of items with significance = " + significance.toString(), ex);
}
}
}
/*
* SleuthKit Java Bindings
*
* Copyright 2011-2022 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.datamodel;
import java.util.Collections;
import org.sleuthkit.datamodel.TskData.FileKnown;
import org.sleuthkit.datamodel.TskData.TSK_FS_ATTR_TYPE_ENUM;
import org.sleuthkit.datamodel.TskData.TSK_FS_META_TYPE_ENUM;
import org.sleuthkit.datamodel.TskData.TSK_FS_NAME_FLAG_ENUM;
import org.sleuthkit.datamodel.TskData.TSK_FS_NAME_TYPE_ENUM;
/**
* A representation of a slack file that has been added to a case.
*/
public class SlackFile extends FsContent {
/**
* Constructs a representation of the slack space from a file system file
* that has been added to the case.
*
* @param db The case database to which the file has been
* added.
* @param objId The object id of the file in the case database.
* @param dataSourceObjectId The object id of the data source for the file.
* @param fsObjId The object id of the file system to which this
* file belongs.
* @param attrType The type attribute given to the file by the
* file system.
* @param attrId The type id given to the file by the file
* system.
* @param name The name of the file.
* @param metaAddr The meta address of the file.
* @param metaSeq The meta sequence number of the file.
* @param dirType The type of the base file, usually as reported
* in the name structure of the file system. May
* be set to TSK_FS_NAME_TYPE_ENUM.UNDEF.
* @param metaType The type of the base file, usually as reported
* in the metadata structure of the file system.
* May be set to
* TSK_FS_META_TYPE_ENUM.TSK_FS_META_TYPE_UNDEF.
* @param dirFlag The allocated status of the base file, usually
* as reported in the name structure of the file
* system.
* @param metaFlags The allocated status of the base file, usually
* as reported in the metadata structure of the
* file system.
* @param size The size of the file.
* @param ctime The changed time of the file.
* @param crtime The created time of the file.
* @param atime The accessed time of the file.
* @param mtime The modified time of the file.
* @param modes The modes for the file.
* @param uid The UID for the file.
* @param gid The GID for the file.
* @param md5Hash The MD5 hash of the file, null if not yet
* calculated.
* @param sha256Hash sha256 hash of the file, or null if not present
* @param sha1Hash SHA-1 hash of the file, or null if not present
* @param knownState The known state of the file from a hash
* database lookup, null if not yet looked up.
* @param parentPath The path of the parent of the file.
* @param mimeType The MIME type of the file, null if it has not
* yet been determined.
* @param extension The extension part of the file name (not
* including the '.'), can be null.
* @param ownerUid UID of the file owner as found in the file
* system, can be null.
* @param osAccountObjId Obj id of the owner OS account, may be null.
*
*/
SlackFile(SleuthkitCase db,
long objId,
long dataSourceObjectId,
long fsObjId,
TSK_FS_ATTR_TYPE_ENUM attrType, int attrId,
String name,
long metaAddr, int metaSeq,
TSK_FS_NAME_TYPE_ENUM dirType, TSK_FS_META_TYPE_ENUM metaType,
TSK_FS_NAME_FLAG_ENUM dirFlag, short metaFlags,
long size,
long ctime, long crtime, long atime, long mtime,
short modes, int uid, int gid,
String md5Hash, String sha256Hash, String sha1Hash,
FileKnown knownState, String parentPath, String mimeType,
String extension,
String ownerUid,
Long osAccountObjId) {
super(db, objId, dataSourceObjectId, fsObjId, attrType, attrId, name, TskData.TSK_DB_FILES_TYPE_ENUM.SLACK, metaAddr, metaSeq, dirType, metaType, dirFlag, metaFlags, size, ctime, crtime, atime, mtime, modes, uid, gid, md5Hash, sha256Hash, sha1Hash, knownState, parentPath, mimeType, extension, ownerUid, osAccountObjId, TskData.CollectedStatus.UNKNOWN, Collections.emptyList());
}
/**
* Reads bytes from the slack space
*
* @param buf Buffer to read into.
* @param offset Start position in the slack space.
* @param len Number of bytes to read.
*
* @return Number of bytes read.
*
* @throws TskCoreException if there is a problem reading the file.
*/
@Override
@SuppressWarnings("deprecation")
protected synchronized int readInt(byte[] buf, long offset, long len) throws TskCoreException {
if (offset == 0 && size == 0) {
//special case for 0-size file
return 0;
}
loadFileHandle();
return SleuthkitJNI.readFileSlack(fileHandle, buf, offset, len);
}
/**
* Accepts a content visitor (Visitor design pattern).
*
* @param v A ContentVisitor supplying an algorithm to run using this file
* as input.
*
* @return The output of the algorithm.
*/
@Override
public <T> T accept(SleuthkitItemVisitor<T> v) {
return v.visit(this);
}
/**
* Accepts a Sleuthkit item visitor (Visitor design pattern).
*
* @param v A SleuthkitItemVisitor supplying an algorithm to run using this
* file as input.
*
* @return The output of the algorithm.
*/
@Override
public <T> T accept(ContentVisitor<T> v) {
return v.visit(this);
}
/**
* Provides a string representation of this file.
*
* @param preserveState True if state should be included in the string
* representation of this object.
*/
@Override
public String toString(boolean preserveState) {
return super.toString(preserveState) + "SlackFile [\t" + "]\t"; //NON-NLS
}
}
Source diff could not be displayed: it is too large. Options to address this: view the blob.
/*
* Sleuth Kit Data Model
*
* Copyright 2019 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.datamodel;
/**
* Utility methods for administering a case database.
*/
public final class SleuthkitCaseAdminUtil {
/**
* Deletes a data source from a case database.
*
* @param caseDB The case database.
* @param dataSourceObjID The object ID of the data source to be deleted.
*
* @throws TskCoreException If there is an error deleting the data source.
*/
public static void deleteDataSource(SleuthkitCase caseDB, long dataSourceObjID) throws TskCoreException {
caseDB.deleteDataSource(dataSourceObjID);
}
/**
* Prevent instantiation of this utility class.
*/
private SleuthkitCaseAdminUtil() {
}
}
/*
* Sleuth Kit Data Model
*
* Copyright 2011-2021 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.datamodel;
/**
* Interface for implementing a visitor pattern on all displayable items:
* Content implementations and blackboard artifacts.
*
* Visitor implements an algorithm on the content and blackboard artifacts
* objects. The algorithm is completely decoupled from the data object. The
* visitor pattern emulates double dispatch mechanism. It allows to act
* differently depending on the instance type, without need to test what the
* actual type is. E.g. it allows for processing an object hierarchy without
* using instanceof statements. Generic type parameter T is a return type from
* the visit methods.
*
* @param <T> return type of visit methods
*/
public interface SleuthkitItemVisitor<T> {
/**
* Act on (visit) a Directory content object
*
* @param d the directory to visit / act on
*
* @return result of the visit
*/
T visit(Directory d);
/**
* Act on (visit) a File content object
*
* @param f the file to visit / act on
*
* @return result of the visit
*/
T visit(File f);
/**
* Act on (visit) a FileSystem content object
*
* @param fs the filesystem to visit / act on
*
* @return result of the visit
*/
T visit(FileSystem fs);
/**
* Act on (visit) an Image content object
*
* @param i the image to visit / act on
*
* @return result of the visit
*/
T visit(Image i);
/**
* Act on (visit) a Volume content object
*
* @param v the volume to visit / act on
*
* @return result of the visit
*/
T visit(Volume v);
/**
* Act on (visit) a VolumeSystem content object
*
* @param vs the volume system to visit / act on
*
* @return result of the visit
*/
T visit(VolumeSystem vs);
/**
* Act on (visit) a Pool content object
*
* @param pool the volume system to visit / act on
*
* @return result of the visit
*/
T visit(Pool pool);
/**
* Act on (visit) a blackboard artifact object
*
* @param ba blackboard artifact object to visit / act on
*
* @return result of the visit
*/
T visit(BlackboardArtifact ba);
/**
* Act on (visit) a blackboard artifact type
*
* @param tw blackboard artifact type to visit / act on
*
* @return result of the visit
*/
T visit(BlackboardArtifact.ARTIFACT_TYPE tw);
/**
* Act on (visit) a layout file content object
*
* @param lf layout file to visit / act on
*
* @return result of the visit
*/
T visit(LayoutFile lf);
/**
* Act on (visit) a VirtualDirectory content object
*
* @param ld layout dir to visit / act on
*
* @return result of the visit
*/
T visit(VirtualDirectory ld);
/**
* Act on (visit) a LocalDirectory content object
*
* @param ld layout dir to visit / act on
*
* @return result of the visit
*/
T visit(LocalDirectory ld);
/**
* Act on (visit) a DerivedFile content object
*
* @param df derived file to visit / act on
*
* @return result of the visit
*/
T visit(DerivedFile df);
/**
* Act on (visit) a LocalFile content object
*
* @param lf local file to visit / act on
*
* @return result of the visit
*/
T visit(LocalFile lf);
/**
* Act on (visit) a SlackFile content object
*
* @param sf slack file to visit / act on
*
* @return result of the visit
*/
T visit(SlackFile sf);
/**
* Act on (visit) a Report content object
*
* @param report report to visit / act on
*
* @return result of the visit
*/
T visit(Report report);
/**
* Act on (visit) a OsAccount content object
*
* @param account report to visit / act on
*
* @return result of the visit
*/
T visit(OsAccount account);
/**
* Act on (visit) an UnsupportedContent object
*
* @param unsupportedContent content to visit / act on
*
* @return result of the visit
*/
T visit(UnsupportedContent unsupportedContent);
/**
* Act on (visit) a LocalFilesDataSource content object
*
* @param localFilesDataSource report to visit / act on
*
* @return result of the visit
*/
T visit(LocalFilesDataSource localFilesDataSource);
/**
* The default visitor - quickest method for implementing a custom visitor.
* Every visit method delegates to the defaultVisit method, the only
* required method to be implemented. Then, implement the specific visit
* methods for the objects on which the algorithm needs to act differently.
*
* @param <T> generic type, signifies the object type to be returned from
* visit()
*/
static abstract public class Default<T> implements SleuthkitItemVisitor<T> {
protected abstract T defaultVisit(SleuthkitVisitableItem s);
@Override
public T visit(Directory d) {
return defaultVisit(d);
}
@Override
public T visit(File f) {
return defaultVisit(f);
}
@Override
public T visit(FileSystem fs) {
return defaultVisit(fs);
}
@Override
public T visit(Image i) {
return defaultVisit(i);
}
@Override
public T visit(Volume v) {
return defaultVisit(v);
}
@Override
public T visit(VolumeSystem vs) {
return defaultVisit(vs);
}
@Override
public T visit(Pool p) {
return defaultVisit(p);
}
@Override
public T visit(BlackboardArtifact ba) {
return defaultVisit(ba);
}
@Override
public T visit(BlackboardArtifact.ARTIFACT_TYPE tw) {
return defaultVisit(tw);
}
@Override
public T visit(LayoutFile lf) {
return defaultVisit(lf);
}
@Override
public T visit(VirtualDirectory vd) {
return defaultVisit(vd);
}
@Override
public T visit(LocalDirectory ld) {
return defaultVisit(ld);
}
@Override
public T visit(DerivedFile df) {
return defaultVisit(df);
}
@Override
public T visit(LocalFile lf) {
return defaultVisit(lf);
}
@Override
public T visit(SlackFile sf) {
return defaultVisit(sf);
}
@Override
public T visit(Report report) {
return defaultVisit(report);
}
@Override
public T visit(OsAccount account) {
return defaultVisit(account);
}
@Override
public T visit(UnsupportedContent unsupportedContent) {
return defaultVisit(unsupportedContent);
}
@Override
public T visit(LocalFilesDataSource localFilesDataSource) {
return defaultVisit(localFilesDataSource);
}
}
}
/*
* Sleuth Kit Data Model
*
* Copyright 2011-2018 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.datamodel;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.GregorianCalendar;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TimeZone;
import java.util.UUID;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.apache.commons.lang3.StringUtils;
import org.sleuthkit.datamodel.TskData.TSK_FS_ATTR_TYPE_ENUM;
import org.sleuthkit.datamodel.SleuthkitCase.CaseDbTransaction;
/**
* A utility class that provides a interface to the SleuthKit via JNI. Supports
* case management, add image process, reading data off content objects Setting
* up Hash database parameters and updating / reading values
*
* Caches image and filesystem handles and reuses them for the duration of the
* application
*/
public class SleuthkitJNI {
private static final Logger logger = Logger.getLogger(SleuthkitJNI.class.getName());
/**
* Lock to protect against the TSK data structures being closed while
* another thread is in the C++ code. Do not use this lock after obtaining
* HandleCache.cacheLock. Additionally, the only code that should acquire
* the write lock is CaseDbHandle.free().
*/
private static final ReadWriteLock tskLock = new ReentrantReadWriteLock();
/*
* Loads the SleuthKit libraries.
*/
static {
LibraryUtils.loadSleuthkitJNI();
}
/**
* Constructor for the utility class that provides a interface to the
* SleuthKit via JNI.
*/
private SleuthkitJNI() {
}
/**
* Utility class to hold the handles for a single case.
*/
private static class CaseHandles {
/*
* A SleuthKit image handle cache implemented as a mappng of
* concatenated image file paths to image handles.
*/
private final Map<String, Long> imageHandleCache = new HashMap<>();
/*
* A SleuthKit file system handles cache implemented as a mapping of
* image handles to image offset and file system handle pairs.
*/
private final Map<Long, Map<Long, Long>> fsHandleCache = new HashMap<>();
/*
* The collection of open file handles. We will only allow requests
* through to the C code if the file handle exists in this collection.
*/
private final Set<Long> fileHandleCache = new HashSet<>();
private final Map<Long, List<Long>> fileSystemToFileHandles = new HashMap<>();
private final Map<Long, Map<Long, Long>> poolHandleCache = new HashMap<>();
// The poolImgCache is only used to close the images later.
private final List<Long> poolImgCache = new ArrayList<>();
/*
* Currently, our APFS code is not thread-safe and it is the only code
* that uses pools. To prevent crashes, we make any reads to a file system
* contained in a pool single-threaded. This cache keeps track of which
* open file system handles are contained in a pool so we can set the locks
* appropriately.
*/
private final List<Long> poolFsList = new ArrayList<>();
private CaseHandles() {
// Nothing to do here
}
}
/**
* Cache of all handles allocated in the JNI layer. Used for: (a) quick
* lookup of frequently used handles (e.g. file system and image) (b)
* ensuring all handles passed in by clients of SleuthkitJNI are valid. (c)
* consistent cleanup of handles on closure.
*/
private static class HandleCache {
/*
* A monitor used to guard access to cached Sleuthkit JNI handles.
*/
private static final Object cacheLock = new Object();
private static final Map<String, CaseHandles> caseHandlesCache = new HashMap<>();
private static final String INVALID_FILE_HANDLE = "Invalid file handle."; //NON-NLS
/*
* Currently, our APFS code is not thread-safe and it is the only code
* that uses pools. To prevent crashes, we make any reads to a file system
* contained in a pool single-threaded. This cache keeps track of which
* open file handles are contained in a pool so we can set the locks
* appropriately.
*
* Access to this list should be guarded by cacheLock.
*/
private static final List<Long> poolFileHandles = new ArrayList<>();
/**
* Create the empty cache for a new case
*
* @param caseIdentifier Unique identifier for the case.
*/
private static void createCaseHandleCache(String caseIdentifier) {
caseHandlesCache.put(caseIdentifier, new CaseHandles());
}
/**
* If there is one case open return its unique identifier.
* This is to support deprecated methods that don't have a case parameter.
*
* @return the open case identifier
*
* @throws TskCoreException If there are no cases open or if multiple cases are open
*/
private static String getDefaultCaseIdentifier() throws TskCoreException {
synchronized (cacheLock) {
if (caseHandlesCache.keySet().size() > 1) {
throw new TskCoreException("Can not get default case identifier with multiple open cases");
} else if (caseHandlesCache.keySet().isEmpty()) {
throw new TskCoreException("Can not get default case identifier with no open case");
}
return (caseHandlesCache.keySet().iterator().next());
}
}
/**
* Gets the case handle cache for a given case.
*
* @param caseIdentifier Unique identifier for the case.
*
* @return the case handle cache
*
* @throws TskCoreException If there is no cache for this case.
*/
private static CaseHandles getCaseHandles(String caseIdentifier) throws TskCoreException {
synchronized (cacheLock) {
if (caseHandlesCache.containsKey(caseIdentifier)) {
return caseHandlesCache.get(caseIdentifier);
}
// If the CaseHandles object isn't in there, it should mean the case has been closed.
throw new TskCoreException("No entry for case " + caseIdentifier + " in cache. Case may have been closed");
}
}
/**
* Removes the case handle cache for a given case.
*
* @param caseIdentifier Unique identifier for the case.
*/
private static void removeCaseHandlesCache(String caseIdentifier) {
synchronized (cacheLock) {
if (caseHandlesCache.containsKey(caseIdentifier)) {
caseHandlesCache.get(caseIdentifier).fsHandleCache.clear();
caseHandlesCache.get(caseIdentifier).imageHandleCache.clear();
caseHandlesCache.get(caseIdentifier).fileHandleCache.clear();
caseHandlesCache.get(caseIdentifier).fileSystemToFileHandles.clear();
caseHandlesCache.get(caseIdentifier).poolHandleCache.clear();
caseHandlesCache.remove(caseIdentifier);
}
}
}
/**
* Searches all the open caches for an image handle.
*
* @param imgHandle
*
* @return true if the handle is found in any cache, false otherwise
*/
private static boolean isImageInAnyCache(long imgHandle) {
synchronized (cacheLock) {
for (String caseIdentifier:caseHandlesCache.keySet()) {
if (caseHandlesCache.get(caseIdentifier).fsHandleCache.keySet().contains(imgHandle)) {
return true;
}
}
return false;
}
}
/**
* Add a new file handle to the cache.
*
* @param caseIdentifier Unique identifier for the case.
* @param fileHandle The new file handle.
* @param fsHandle The file system handle in which the file lives.
*/
private static void addFileHandle(String caseIdentifier, long fileHandle, long fsHandle) {
try {
synchronized (cacheLock) {
// Add to collection of open file handles.
getCaseHandles(caseIdentifier).fileHandleCache.add(fileHandle);
// Add to map of file system to file handles.
if (getCaseHandles(caseIdentifier).fileSystemToFileHandles.containsKey(fsHandle)) {
getCaseHandles(caseIdentifier).fileSystemToFileHandles.get(fsHandle).add(fileHandle);
} else {
getCaseHandles(caseIdentifier).fileSystemToFileHandles.put(fsHandle, new ArrayList<>(Arrays.asList(fileHandle)));
}
}
} catch (TskCoreException ex) {
logger.log(Level.WARNING, "Error caching file handle for case {0}", caseIdentifier);
}
}
/**
* Removes a file handle from the cache for the given case
*
* @param fileHandle
* @param skCase Can be null. If so, the first matching handle will be removed.
*/
private static void removeFileHandle(long fileHandle, SleuthkitCase skCase) {
synchronized (cacheLock) {
// Remove from collection of open file handles.
if (skCase != null) {
try {
getCaseHandles(skCase.getCaseHandleIdentifier()).fileHandleCache.remove(fileHandle);
} catch (TskCoreException ex) {
// If the call to getCaseHandles() failed, we've already cleared the cache.
}
} else {
// If we don't know what case the handle is from, delete the first one we find
for (String caseIdentifier:caseHandlesCache.keySet()) {
if (caseHandlesCache.get(caseIdentifier).fileHandleCache.contains(fileHandle)) {
caseHandlesCache.get(caseIdentifier).fileHandleCache.remove(fileHandle);
return;
}
}
}
}
}
/**
* Searches all the open caches for a file handle.
*
* @param fileHandle
*
* @return true if the handle is found in any cache, false otherwise
*/
private static boolean isValidFileHandle(long fileHandle) {
synchronized (cacheLock) {
for (String caseIdentifier:caseHandlesCache.keySet()) {
if (caseHandlesCache.get(caseIdentifier).fileHandleCache.contains(fileHandle)) {
return true;
}
}
return false;
}
}
private static void closeHandlesAndClearCache(String caseIdentifier) throws TskCoreException {
synchronized (cacheLock) {
/*
* Close any cached file system handles.
*/
for (Map<Long, Long> imageToFsMap : getCaseHandles(caseIdentifier).fsHandleCache.values()) {
for (Long fsHandle : imageToFsMap.values()) {
// First close all open file handles for the file system.
if (getCaseHandles(caseIdentifier).fileSystemToFileHandles.containsKey(fsHandle)) {
for (Long fileHandle : getCaseHandles(caseIdentifier).fileSystemToFileHandles.get(fsHandle)) {
// Update the cache of file handles contained in pools
if (poolFileHandles.contains(fileHandle)) {
poolFileHandles.remove(fileHandle);
}
closeFile(fileHandle);
}
}
// Then close the file system handle.
closeFsNat(fsHandle);
}
}
/*
* Clear out the list of pool file systems.
*/
getCaseHandles(caseIdentifier).poolFsList.clear();
/*
* Close any cached pools
*/
for (Long imgHandle : getCaseHandles(caseIdentifier).poolHandleCache.keySet()) {
for (Long poolHandle : getCaseHandles(caseIdentifier).poolHandleCache.get(imgHandle).values()) {
closePoolNat(poolHandle);
}
}
/*
* Close any open pool images
*/
for (Long imageHandle : getCaseHandles(caseIdentifier).poolImgCache) {
closeImgNat(imageHandle);
}
/*
* Close any cached image handles.
*/
for (Long imageHandle : getCaseHandles(caseIdentifier).imageHandleCache.values()) {
closeImgNat(imageHandle);
}
removeCaseHandlesCache(caseIdentifier);
}
}
}
/**
* Encapsulates a handle to a SleuthKit case database with support for
* adding images to the database.
*/
public static class CaseDbHandle {
/*
* A unique indentifier for a case
*/
private final String caseDbIdentifier;
/**
* Constructs an object that encapsulates a handle to a single user SleuthKit case
* database with support for adding images to the database.
*
* @param databaseName A path to a case database
*/
private CaseDbHandle(String databaseName) {
this.caseDbIdentifier = "SingleUser:" + databaseName; // NON-NLS
HandleCache.createCaseHandleCache(caseDbIdentifier);
}
/**
* Constructs an object that encapsulates a handle to a multi user SleuthKit case
* database with support for adding images to the database.
*
* @param databaseName The name of the multi-user database.
* @param info Connection info for the multi-user database.
*/
private CaseDbHandle(String databaseName, CaseDbConnectionInfo info) {
this.caseDbIdentifier = "MultiUser:" + info.getHost() + ":" + databaseName;
HandleCache.createCaseHandleCache(caseDbIdentifier);
}
/**
* Get the TSK pointer for the database
*
* @return Unique identifier for the case.
*/
String getCaseDbIdentifier() {
return caseDbIdentifier;
}
/**
* Closes the case database and any open image and file system handles.
*
* @throws TskCoreException if there is a problem competing the
* operation.
*/
void free() throws TskCoreException {
tskLock.writeLock().lock();
try {
HandleCache.closeHandlesAndClearCache(caseDbIdentifier);
//SleuthkitJNI.closeCaseDbNat(caseDbIdentifier);
} finally {
tskLock.writeLock().unlock();
}
}
/**
* Adds an image to the case database. For finer-grained control of the
* process of adding the image, call CaseDbHandle.initAddImageProcess
* instead.
*
* @param deviceObjId The object id of the device associated with
* the image.
* @param imageFilePaths The image file paths.
* @param timeZone The time zone for the image.
* @param addFileSystems Pass true to attempt to add file systems
* within the image to the case database.
* @param addUnallocSpace Pass true to create virtual files for
* unallocated space. Ignored if addFileSystems
* is false.
* @param skipFatFsOrphans Pass true to skip processing of orphan files
* for FAT file systems. Ignored if
* addFileSystems is false.
*
* @return The object id of the image.
*
* @throws TskCoreException if there is an error adding the image to
* case database.
*/
long addImageInfo(long deviceObjId, List<String> imageFilePaths, String timeZone, Host host, SleuthkitCase skCase) throws TskCoreException {
try {
if (host == null) {
String hostName;
if (imageFilePaths.size() > 0) {
String path = imageFilePaths.get(0);
hostName = (new java.io.File(path)).getName() + " Host";
} else {
hostName = "Image_" + deviceObjId + " Host";
}
host = skCase.getHostManager().newHost(hostName);
}
TskCaseDbBridge dbHelper = new TskCaseDbBridge(skCase, new DefaultAddDataSourceCallbacks(), host);
long tskAutoDbPointer = initializeAddImgNat(dbHelper, timezoneLongToShort(timeZone), false, false, false);
runOpenAndAddImgNat(tskAutoDbPointer, UUID.randomUUID().toString(), imageFilePaths.toArray(new String[0]), imageFilePaths.size(), timeZone);
long id = finishAddImgNat(tskAutoDbPointer);
dbHelper.finish();
skCase.addDataSourceToHasChildrenMap();
return id;
} catch (TskDataException ex) {
throw new TskCoreException("Error adding image to case database", ex);
}
}
/**
* Initializes a multi-step process for adding an image to the case
* database.
*
* @param timeZone The time zone of the image.
* @param addUnallocSpace Pass true to create virtual files for
* unallocated space.
* @param skipFatFsOrphans Pass true to skip processing of orphan files
* for FAT file systems.
* @param imageCopyPath Path to which a copy of the image should be
* written. Use the empty string to disable
* image writing.
*
* @return An object that can be used to exercise fine-grained control
* of the process of adding the image to the case database.
*/
AddImageProcess initAddImageProcess(String timeZone, boolean addUnallocSpace, boolean skipFatFsOrphans, String imageCopyPath, SleuthkitCase skCase) {
return new AddImageProcess(timeZone, addUnallocSpace, skipFatFsOrphans, imageCopyPath, skCase);
}
/**
* Encapsulates a multi-step process to add an image to the case
* database.
*/
public class AddImageProcess {
private final String timeZone;
private final boolean addUnallocSpace;
private final boolean skipFatFsOrphans;
private final String imageWriterPath;
private volatile long tskAutoDbPointer;
private long imageId = 0;
private boolean isCanceled;
private final SleuthkitCase skCase;
private TskCaseDbBridge dbHelper;
/**
* Constructs an object that encapsulates a multi-step process to
* add an image to the case database.
*
* @param timeZone The time zone of the image.
* @param addUnallocSpace Pass true to create virtual files for
* unallocated space.
* @param skipFatFsOrphans Pass true to skip processing of orphan
* files for FAT file systems.
* @param imageWriterPath Path that a copy of the image should be
* written to. Use empty string to disable
* image writing
*/
private AddImageProcess(String timeZone, boolean addUnallocSpace, boolean skipFatFsOrphans, String imageWriterPath, SleuthkitCase skCase) {
this.timeZone = timeZone;
this.addUnallocSpace = addUnallocSpace;
this.skipFatFsOrphans = skipFatFsOrphans;
this.imageWriterPath = imageWriterPath;
tskAutoDbPointer = 0;
this.isCanceled = false;
this.skCase = skCase;
}
/**
* Starts the process of adding an image to the case database.
*
* @param deviceId An ASCII-printable identifier for the
* device associated with the image that
* should be unique across multiple cases
* (e.g., a UUID).
* @param imageFilePaths Full path(s) to the image file(s).
* @param sectorSize The sector size (use '0' for autodetect).
*
* @throws TskCoreException if a critical error occurs within the
* SleuthKit.
* @throws TskDataException if a non-critical error occurs within
* the SleuthKit (should be OK to continue
* the process)
*/
public void run(String deviceId, String[] imageFilePaths, int sectorSize) throws TskCoreException, TskDataException {
Image img = addImageToDatabase(skCase, imageFilePaths, sectorSize, "", "", "", "", deviceId);
run(deviceId, img, sectorSize, new DefaultAddDataSourceCallbacks());
}
/**
* Starts the process of adding an image to the case database.
*
* @param deviceId An ASCII-printable identifier for the
* device associated with the image that
* should be unique across multiple cases
* (e.g., a UUID).
* @param image The image object (has already been added to the database)
* @param sectorSize The sector size (no longer used).
* @param addDataSourceCallbacks The callbacks to use to send data to ingest (may do nothing).
*
* @throws TskCoreException if a critical error occurs within the
* SleuthKit.
* @throws TskDataException if a non-critical error occurs within
* the SleuthKit (should be OK to continue
* the process)
*/
public void run(String deviceId, Image image, int sectorSize,
AddDataSourceCallbacks addDataSourceCallbacks) throws TskCoreException, TskDataException {
dbHelper = new TskCaseDbBridge(skCase, addDataSourceCallbacks, image.getHost());
getTSKReadLock();
try {
long imageHandle = 0;
synchronized (this) {
if (0 != tskAutoDbPointer) {
throw new TskCoreException("Add image process already started");
}
if (!isCanceled) { //with isCanceled being guarded by this it will have the same value everywhere in this synchronized block
imageHandle = image.getImageHandle();
tskAutoDbPointer = initAddImgNat(dbHelper, timezoneLongToShort(timeZone), addUnallocSpace, skipFatFsOrphans);
}
if (0 == tskAutoDbPointer) {
throw new TskCoreException("initAddImgNat returned a NULL TskAutoDb pointer");
}
}
if (imageHandle != 0) {
runAddImgNat(tskAutoDbPointer, deviceId, imageHandle, image.getId(), timeZone, imageWriterPath);
}
} finally {
finishAddImageProcess();
releaseTSKReadLock();
}
}
/**
* Stops the process of adding the image to the case database that
* was started by calling AddImageProcess.run.
* AddImageProcess.revert should be called after calling
* AddImageProcess.stop.
*
* @throws TskCoreException if a critical error occurs within the
* SleuthKit.
*/
public synchronized void stop() throws TskCoreException {
getTSKReadLock();
try {
isCanceled = true;
if (tskAutoDbPointer != 0) {
stopAddImgNat(tskAutoDbPointer);
}
} finally {
releaseTSKReadLock();
}
}
/**
* Call at the end of the add image process regardless of the error/canceled state.
*
* Note that the new image is no longer deleted on error/cancellation
*
* If the process was not canceled, will add the final batch of files to the database
* and submit for any further processing through the callback.
*
* @throws TskCoreException
*/
private synchronized void finishAddImageProcess() throws TskCoreException {
if (tskAutoDbPointer == 0) {
return;
}
// If the process wasn't cancelled, finish up processing the
// remaining files.
if (! this.isCanceled && dbHelper != null) {
dbHelper.finish();
}
// Free the auto DB pointer and get the image ID
imageId = finishAddImgNat(tskAutoDbPointer);
tskAutoDbPointer = 0;
skCase.addDataSourceToHasChildrenMap();
}
/**
* This no longer needs to be called.
*
* @throws TskCoreException if a critical error occurs within the
* SleuthKit.
*
* @deprecated No longer necessary
*/
@Deprecated
public synchronized void revert() throws TskCoreException {
// No-op
}
/**
* This no longer needs to be called. Will simply return the
* object ID of the new image.
*
* @return The object id of the image that was added.
*
* @throws TskCoreException if a critical error occurs within the
* SleuthKit.
*
* @deprecated No longer necessary
*/
@Deprecated
public synchronized long commit() throws TskCoreException {
return imageId;
}
/**
* Gets the file system directory currently being processed by the
* SleuthKit.
*
* @return The directory
*/
public synchronized String currentDirectory() {
return tskAutoDbPointer == 0 ? "" : getCurDirNat(tskAutoDbPointer); //NON-NLS
}
/**
* Starts the process of adding an image to the case database.
* Either commit() or revert() MUST be called after calling run().
*
* @param imageFilePaths Full path(s) to the image file(s).
*
* @throws TskCoreException if a critical error occurs within the
* SleuthKit.
* @throws TskDataException if a non-critical error occurs within
* the SleuthKit (should be OK to continue
* the process)
*
* @deprecated Use run(String dataSourceId, String[] imageFilePaths)
* instead
*/
@Deprecated
public void run(String[] imageFilePaths) throws TskCoreException, TskDataException {
run(null, imageFilePaths, 0);
}
/**
* Starts the process of adding an image to the case database.
*
* @param deviceId An ASCII-printable identifier for the
* device associated with the image that
* should be unique across multiple cases
* (e.g., a UUID).
* @param imageFilePaths Full path(s) to the image file(s).
*
* @throws TskCoreException if a critical error occurs within the
* SleuthKit.
* @throws TskDataException if a non-critical error occurs within
* the SleuthKit (should be OK to continue
* the process)
*/
public void run(String deviceId, String[] imageFilePaths) throws TskCoreException, TskDataException {
run(deviceId, imageFilePaths, 0);
}
}
}
/**
* Creates a new case database. Must call .free() on CaseDbHandle instance
* when done.
*
* @param path Location to create the database at.
*
* @return Handle for a new TskCaseDb instance.
*
* @throws TskCoreException exception thrown if critical error occurs within
* TSK
*/
static CaseDbHandle newCaseDb(String path) throws TskCoreException {
return new CaseDbHandle(path);
}
/**
* Creates a new case database. Must call .free() on CaseDbHandle instance
* when done.
*
* @param databaseName the name of the database to create
* @param info the connection info class for the database to create
*
* @return Handle for a new TskCaseDb instance.
*
* @throws TskCoreException exception thrown if critical error occurs within
* TSK
*/
static CaseDbHandle newCaseDb(String databaseName, CaseDbConnectionInfo info) throws TskCoreException {
return new CaseDbHandle(databaseName, info);
}
/**
* Opens an existing case database. Must call .free() on CaseDbHandle
* instance when done.
*
* @param path Location of the existing database.
*
* @return Handle for a new TskCaseDb instance.
*
* @throws TskCoreException exception thrown if critical error occurs within
* TSK
*/
static CaseDbHandle openCaseDb(String path) throws TskCoreException {
return new CaseDbHandle(path);
}
/**
* Opens an existing case database. Must call .free() on CaseDbHandle
* instance when done.
*
* @param databaseName the name of the database to open
* @param info the connection info class for the database to open
*
* @return Handle for a new TskCaseDb instance.
*
* @throws TskCoreException exception thrown if critical error occurs within
* TSK
*/
static CaseDbHandle openCaseDb(String databaseName, CaseDbConnectionInfo info) throws TskCoreException {
return new CaseDbHandle(databaseName, info);
}
/**
* get the Sleuth Kit version string
*
* @return the version string
*/
public static String getVersion() {
return getVersionNat();
}
/**
* Enable verbose logging and redirect stderr to the given log file.
*
* @param logPath the log file path
*/
public static void startVerboseLogging(String logPath) {
startVerboseLoggingNat(logPath);
}
/**
* Open the image and return the image info pointer.
*
* @param imageFiles the paths to the images
* @param skCase the case this image belongs to
*
* @return the image info pointer
*
* @throws TskCoreException exception thrown if critical error occurs within
* TSK
*/
public static long openImage(String[] imageFiles, SleuthkitCase skCase) throws TskCoreException {
if (skCase == null) {
throw new TskCoreException("SleuthkitCase can not be null");
}
return openImage(imageFiles, 0, true, skCase.getCaseHandleIdentifier());
}
/**
* Open the image with a specified sector size and return the image info
* pointer.
*
* @param imageFiles the paths to the images
* @param sSize the sector size (use '0' for autodetect)
* @param skCase the case this image belongs to
*
* @return the image info pointer
*
* @throws TskCoreException exception thrown if critical error occurs within
* TSK
*/
public static long openImage(String[] imageFiles, int sSize, SleuthkitCase skCase) throws TskCoreException {
if (skCase == null) {
throw new TskCoreException("SleuthkitCase can not be null");
}
return openImage(imageFiles, sSize, true, skCase.getCaseHandleIdentifier());
}
/**
* Open the image and return the image info pointer. This is a temporary
* measure to allow ingest of multiple local disks on the same drive letter.
* We need to clear the cache to make sure cached data from the first drive
* is not used.
*
* @param imageFiles the paths to the images
* @param sSize the sector size (use '0' for autodetect)
* @param useCache true if the image handle cache should be used, false to
* always go to TSK to open a fresh copy
* @param caseIdentifer The caseDbIdentifier for this case. Can be null to support deprecated methods.
*
* @return the image info pointer
*
* @throws TskCoreException exception thrown if critical error occurs within
* TSK
*/
private static long openImage(String[] imageFiles, int sSize, boolean useCache, String caseIdentifer) throws TskCoreException {
getTSKReadLock();
try {
long imageHandle;
StringBuilder keyBuilder = new StringBuilder();
for (int i = 0; i < imageFiles.length; ++i) {
keyBuilder.append(imageFiles[i]);
}
final String imageKey = keyBuilder.toString();
synchronized (HandleCache.cacheLock) {
String nonNullCaseIdentifer = caseIdentifer;
if (nonNullCaseIdentifer == null) {
nonNullCaseIdentifer = HandleCache.getDefaultCaseIdentifier();
}
// If we're getting a fresh copy and an image with this path is already
// in the cache, move the existing cache reference so it won't be used by
// any subsequent calls to openImage but will still be valid if any objects
// have it cached. This happens in the case where the user adds the same data
// source twice (see JIRA-5868).
if (!useCache && HandleCache.getCaseHandles(nonNullCaseIdentifer).imageHandleCache.containsKey(imageKey)) {
long tempImageHandle = HandleCache.getCaseHandles(nonNullCaseIdentifer).imageHandleCache.get(imageKey);
// Store the old image handle in a fake path. This way it will no longer be found but will
// still be valid and the image and its file systems will be closed with the case.
String newPath = "Image_" + UUID.randomUUID().toString();
HandleCache.getCaseHandles(nonNullCaseIdentifer).imageHandleCache.put(newPath, tempImageHandle);
HandleCache.getCaseHandles(nonNullCaseIdentifer).imageHandleCache.remove(imageKey);
}
if (useCache && HandleCache.getCaseHandles(nonNullCaseIdentifer).imageHandleCache.containsKey(imageKey)) //get from cache
{
imageHandle = HandleCache.getCaseHandles(nonNullCaseIdentifer).imageHandleCache.get(imageKey);
} else {
//open new handle and cache it
imageHandle = openImgNat(imageFiles, imageFiles.length, sSize);
HandleCache.getCaseHandles(nonNullCaseIdentifer).fsHandleCache.put(imageHandle, new HashMap<>());
HandleCache.getCaseHandles(nonNullCaseIdentifer).imageHandleCache.put(imageKey, imageHandle);
}
}
return imageHandle;
} finally {
releaseTSKReadLock();
}
}
/**
* This is a temporary measure to support opening an image at the beginning
* of the add image process. The open image handle is put into the normal image cache so
* it won't be opened a second time and it will be closed during case closing.
*
* This will change when all image opens are done by object ID and not paths.
*
* @param skCase The case the image belongs to.
* @param imagePaths The complete list of paths for the image.
* @param imageHandle The open image handle from TSK.
*
* @throws TskCoreException If the new image could not be added to the cache
*/
private static void cacheImageHandle(SleuthkitCase skCase, List<String> imagePaths, long imageHandle) throws TskCoreException {
// Construct the hash key from the image paths
StringBuilder keyBuilder = new StringBuilder();
for (int i = 0; i < imagePaths.size(); ++i) {
keyBuilder.append(imagePaths.get(i));
}
final String imageKey = keyBuilder.toString();
// Get the case identifier
String caseIdentifier = skCase.getCaseHandleIdentifier();
synchronized (HandleCache.cacheLock) {
HandleCache.getCaseHandles(caseIdentifier).fsHandleCache.put(imageHandle, new HashMap<>());
HandleCache.getCaseHandles(caseIdentifier).imageHandleCache.put(imageKey, imageHandle);
}
}
/**
* Add an image to the database and return the open image.
*
* @param skCase The current case.
* @param imagePaths The path(s) to the image (will just be the first for .e01, .001, etc).
* @param sectorSize The sector size (0 for auto-detect).
* @param timeZone The time zone.
* @param md5fromSettings MD5 hash (if known).
* @param sha1fromSettings SHA1 hash (if known).
* @param sha256fromSettings SHA256 hash (if known).
* @param deviceId Device ID.
*
* @return The Image object.
*
* @throws TskCoreException
*/
public static Image addImageToDatabase(SleuthkitCase skCase, String[] imagePaths, int sectorSize,
String timeZone, String md5fromSettings, String sha1fromSettings, String sha256fromSettings, String deviceId) throws TskCoreException {
return addImageToDatabase(skCase, imagePaths, sectorSize, timeZone, md5fromSettings, sha1fromSettings, sha256fromSettings, deviceId, null);
}
/**
* Add an image to the database and return the open image.
*
* @param skCase The current case.
* @param imagePaths The path(s) to the image (will just be the first for .e01, .001, etc).
* @param sectorSize The sector size (0 for auto-detect).
* @param timeZone The time zone.
* @param md5fromSettings MD5 hash (if known).
* @param sha1fromSettings SHA1 hash (if known).
* @param sha256fromSettings SHA256 hash (if known).
* @param deviceId Device ID.
* @param host Host.
*
* @return The Image object.
*
* @throws TskCoreException
*/
public static Image addImageToDatabase(SleuthkitCase skCase, String[] imagePaths, int sectorSize,
String timeZone, String md5fromSettings, String sha1fromSettings, String sha256fromSettings, String deviceId, Host host) throws TskCoreException {
// Open the image
long imageHandle = openImgNat(imagePaths, 1, sectorSize);
// Get the fields stored in the native code
List<String> computedPaths = Arrays.asList(getPathsForImageNat(imageHandle));
long size = getSizeForImageNat(imageHandle);
long type = getTypeForImageNat(imageHandle);
long computedSectorSize = getSectorSizeForImageNat(imageHandle);
String md5 = md5fromSettings;
if (StringUtils.isEmpty(md5)) {
md5 = getMD5HashForImageNat(imageHandle);
}
String sha1 = sha1fromSettings;
if (StringUtils.isEmpty(sha1)) {
sha1 = getSha1HashForImageNat(imageHandle);
}
// Sleuthkit does not currently generate any SHA256 hashes. Set to empty
// string for consistency.
String sha256 = sha256fromSettings;
if (sha256 == null) {
sha256 = "";
}
String collectionDetails = getCollectionDetailsForImageNat(imageHandle);
// Now save to database
CaseDbTransaction transaction = skCase.beginTransaction();
try {
Image img = skCase.addImage(TskData.TSK_IMG_TYPE_ENUM.valueOf(type), computedSectorSize,
size, null, computedPaths,
timeZone, md5, sha1, sha256,
deviceId, host, transaction);
if (!StringUtils.isEmpty(collectionDetails)) {
skCase.setAcquisitionDetails(img, collectionDetails);
}
transaction.commit();
img.setImageHandle(imageHandle);
cacheImageHandle(skCase, computedPaths, imageHandle);
return img;
} catch (TskCoreException ex) {
transaction.rollback();
throw(ex);
}
}
/**
* Get volume system Handle
*
* @param imgHandle a handle to previously opened image
* @param vsOffset byte offset in the image to the volume system (usually
* 0)
*
* @return pointer to a vsHandle structure in the sleuthkit
*
* @throws TskCoreException exception thrown if critical error occurs within
* TSK
*/
public static long openVs(long imgHandle, long vsOffset) throws TskCoreException {
getTSKReadLock();
try {
if(! imgHandleIsValid(imgHandle)) {
throw new TskCoreException("Image handle " + imgHandle + " is closed");
}
return openVsNat(imgHandle, vsOffset);
} finally {
releaseTSKReadLock();
}
}
//get pointers
/**
* Get volume Handle
*
* @param vsHandle pointer to the volume system structure in the sleuthkit
* @param volId id of the volume
*
* @return pointer to a volHandle structure in the sleuthkit
*
* @throws TskCoreException exception thrown if critical error occurs within
* TSK
*/
public static long openVsPart(long vsHandle, long volId) throws TskCoreException {
getTSKReadLock();
try {
//returned long is ptr to vs Handle object in tsk
return openVolNat(vsHandle, volId);
} finally {
releaseTSKReadLock();
}
}
/**
* Get pool Handle
*
* @param imgHandle pointer to the image structure in the sleuthkit
* @param offset offset of the pool
*
* @return pointer to a pool info structure in the sleuthkit
*
* @throws TskCoreException exception thrown if critical error occurs within
* TSK
*/
static long openPool(long imgHandle, long offset, SleuthkitCase skCase) throws TskCoreException {
getTSKReadLock();
try {
if(! imgHandleIsValid(imgHandle)) {
throw new TskCoreException("Image handle " + imgHandle + " is closed");
}
synchronized (HandleCache.cacheLock) {
String caseIdentifier;
if (skCase == null) {
caseIdentifier = HandleCache.getDefaultCaseIdentifier();
} else {
caseIdentifier = skCase.getCaseHandleIdentifier();
}
// If a pool handle cache for this image does not exist, make one
if (! HandleCache.getCaseHandles(caseIdentifier).poolHandleCache.containsKey(imgHandle)) {
HandleCache.getCaseHandles(caseIdentifier).poolHandleCache.put(imgHandle, new HashMap<>());
}
// Get the pool handle cache for this image
Map<Long, Long> poolCacheForImage = HandleCache.getCaseHandles(caseIdentifier).poolHandleCache.get(imgHandle);
if (poolCacheForImage.containsKey(offset)) {
return poolCacheForImage.get(offset);
} else {
//returned long is ptr to pool Handle object in tsk
long poolHandle = openPoolNat(imgHandle, offset);
poolCacheForImage.put(offset, poolHandle);
return poolHandle;
}
}
} finally {
releaseTSKReadLock();
}
}
/**
* Get file system Handle Opened handle is cached (transparently) so it does
* not need be reopened next time for the duration of the application
*
* @param imgHandle pointer to imgHandle in sleuthkit
* @param fsOffset byte offset to the file system
* @param skCase the case containing the file system
*
* @return pointer to a fsHandle structure in the sleuthkit
*
* @throws TskCoreException exception thrown if critical error occurs within
* TSK
*/
public static long openFs(long imgHandle, long fsOffset, SleuthkitCase skCase) throws TskCoreException {
getTSKReadLock();
try {
long fsHandle;
synchronized (HandleCache.cacheLock) {
String caseIdentifier;
if (skCase == null) {
caseIdentifier = HandleCache.getDefaultCaseIdentifier();
} else {
caseIdentifier = skCase.getCaseHandleIdentifier();
}
final Map<Long, Long> imgOffSetToFsHandle = HandleCache.getCaseHandles(caseIdentifier).fsHandleCache.get(imgHandle);
if (imgOffSetToFsHandle == null) {
throw new TskCoreException("Missing image offset to file system handle cache for image handle " + imgHandle);
}
if (imgOffSetToFsHandle.containsKey(fsOffset)) {
//return cached
fsHandle = imgOffSetToFsHandle.get(fsOffset);
} else {
fsHandle = openFsNat(imgHandle, fsOffset);
//cache it
imgOffSetToFsHandle.put(fsOffset, fsHandle);
}
}
return fsHandle;
} finally {
releaseTSKReadLock();
}
}
/**
* Get file system handle for a file system contained in a pool.
* Opened handle is cached (transparently) so it does
* not need be reopened next time for the duration of the application
*
* @param imgHandle pointer to imgHandle in sleuthkit
* @param fsOffset byte offset to the file system
* @param poolHandle pointer to the pool info handle
* @param poolBlock pool block
* @param skCase the case containing the file system
*
* @return pointer to a fsHandle structure in the sleuthkit
*
* @throws TskCoreException exception thrown if critical error occurs within
* TSK
*/
static long openFsPool(long imgHandle, long fsOffset, long poolHandle, long poolBlock, SleuthkitCase skCase) throws TskCoreException {
/*
* Currently, our APFS code is not thread-safe and it is the only code
* that uses pools. To prevent crashes, we make any reads to a file system
* contained in a pool single-threaded.
*/
getTSKWriteLock();
try {
long fsHandle;
synchronized (HandleCache.cacheLock) {
String caseIdentifier;
if (skCase == null) {
caseIdentifier = HandleCache.getDefaultCaseIdentifier();
} else {
caseIdentifier = skCase.getCaseHandleIdentifier();
}
final Map<Long, Long> imgOffSetToFsHandle = HandleCache.getCaseHandles(caseIdentifier).fsHandleCache.get(imgHandle);
if (imgOffSetToFsHandle == null) {
throw new TskCoreException("Missing image offset to file system handle cache for image handle " + imgHandle);
}
if (imgOffSetToFsHandle.containsKey(poolBlock)) {
//return cached
fsHandle = imgOffSetToFsHandle.get(poolBlock);
} else {
long poolImgHandle = getImgInfoForPoolNat(poolHandle, poolBlock);
HandleCache.getCaseHandles(caseIdentifier).poolImgCache.add(poolImgHandle);
fsHandle = openFsNat(poolImgHandle, fsOffset);
//cache it
imgOffSetToFsHandle.put(poolBlock, fsHandle);
HandleCache.getCaseHandles(caseIdentifier).poolFsList.add(fsHandle);
}
}
return fsHandle;
} finally {
releaseTSKWriteLock();
}
}
/**
* Get file Handle
*
* @param fsHandle fsHandle pointer in the sleuthkit
* @param fileId id of the file
* @param attrType file attribute type to open
* @param attrId file attribute id to open
* @param skCase the case associated with this file
*
* @return pointer to a file structure in the sleuthkit
*
* @throws TskCoreException exception thrown if critical error occurs within
* TSK
*/
public static long openFile(long fsHandle, long fileId, TSK_FS_ATTR_TYPE_ENUM attrType, int attrId, SleuthkitCase skCase) throws TskCoreException {
/*
* NOTE: previously attrId used to be stored in AbstractFile as (signed)
* short even though it is stored as uint16 in TSK. In extremely rare
* occurrences attrId can be larger than what a signed short can hold
* (2^15). Changes were made to AbstractFile to store attrId as integer.
* However, a depricated method still exists in AbstractFile to get
* attrId as short. In that method we convert attribute ids that are
* larger than 32K to a negative number. Therefore if encountered, we
* need to convert negative attribute id to uint16 which is what TSK is
* using to store attribute id.
*/
boolean withinPool = false;
synchronized (HandleCache.cacheLock) {
String caseIdentifier;
if (skCase == null) {
caseIdentifier = HandleCache.getDefaultCaseIdentifier();
} else {
caseIdentifier = skCase.getCaseHandleIdentifier();
}
if (HandleCache.getCaseHandles(caseIdentifier).poolFsList.contains(fsHandle)) {
withinPool = true;
}
}
/*
* The current APFS code is not thread-safe. To compensate, we make any
* reads to the APFS pool single-threaded by obtaining a write
* lock instead of a read lock.
*/
if (withinPool) {
getTSKWriteLock();
} else {
getTSKReadLock();
}
try {
long fileHandle = openFileNat(fsHandle, fileId, attrType.getValue(), convertSignedToUnsigned(attrId));
synchronized (HandleCache.cacheLock) {
String caseIdentifier;
if (skCase == null) {
caseIdentifier = HandleCache.getDefaultCaseIdentifier();
} else {
caseIdentifier = skCase.getCaseHandleIdentifier();
}
HandleCache.addFileHandle(caseIdentifier, fileHandle, fsHandle);
// If this file is in a pool file system, record it so the locks
// can be set appropriately when reading it.
if (withinPool) {
HandleCache.poolFileHandles.add(fileHandle);
}
}
return fileHandle;
} finally {
if (withinPool) {
releaseTSKWriteLock();
} else {
releaseTSKReadLock();
}
}
}
/**
* Converts signed integer to an unsigned integer.
*
* @param val value to be converter
*
* @return unsigned integer value
*/
private static int convertSignedToUnsigned(int val) {
if (val >= 0) {
return val;
}
return val & 0xffff; // convert negative value to positive value
}
/**
* Test that the given image handle is valid.
* @param imgHandle
* @return true if it is valid, false otherwise
*/
private static boolean imgHandleIsValid(long imgHandle) {
synchronized(HandleCache.cacheLock) {
return HandleCache.isImageInAnyCache(imgHandle);
}
}
//do reads
/**
* reads data from an image
*
* @param imgHandle
* @param readBuffer buffer to read to
* @param offset byte offset in the image to start at
* @param len amount of data to read
*
* @return the number of characters read, or -1 if the end of the stream has
* been reached
*
* @throws TskCoreException exception thrown if critical error occurs within
* TSK
*/
public static int readImg(long imgHandle, byte[] readBuffer, long offset, long len) throws TskCoreException {
getTSKReadLock();
try {
if(! imgHandleIsValid(imgHandle)) {
throw new TskCoreException("Image handle " + imgHandle + " is closed");
}
//returned byte[] is the data buffer
return readImgNat(imgHandle, readBuffer, offset, len);
} finally {
releaseTSKReadLock();
}
}
/**
* reads data from an volume system
*
* @param vsHandle pointer to a volume system structure in the sleuthkit
* @param readBuffer buffer to read to
* @param offset sector offset in the image to start at
* @param len amount of data to read
*
* @return the number of characters read, or -1 if the end of the stream has
* been reached
*
* @throws TskCoreException exception thrown if critical error occurs within
* TSK
*/
public static int readVs(long vsHandle, byte[] readBuffer, long offset, long len) throws TskCoreException {
getTSKReadLock();
try {
return readVsNat(vsHandle, readBuffer, offset, len);
} finally {
releaseTSKReadLock();
}
}
/**
* Reads data from a pool
*
* @param poolHandle handle to the pool info struct
* @param readBuffer buffer to read into
* @param offset starting offset
* @param len length
*
* @return number of bytes read
*
* @throws TskCoreException
*/
static int readPool(long poolHandle, byte[] readBuffer, long offset, long len) throws TskCoreException {
getTSKReadLock();
try {
return readPoolNat(poolHandle, readBuffer, offset, len);
} finally {
releaseTSKReadLock();
}
}
/**
* reads data from an volume
*
* @param volHandle pointer to a volume structure in the sleuthkit
* @param readBuffer buffer to read to
* @param offset byte offset in the image to start at
* @param len amount of data to read
*
* @return the number of characters read, or -1 if the end of the stream has
* been reached
*
* @throws TskCoreException exception thrown if critical error occurs within
* TSK
*/
public static int readVsPart(long volHandle, byte[] readBuffer, long offset, long len) throws TskCoreException {
getTSKReadLock();
try {
//returned byte[] is the data buffer
return readVolNat(volHandle, readBuffer, offset, len);
} finally {
releaseTSKReadLock();
}
}
/**
* reads data from an file system
*
* @param fsHandle pointer to a file system structure in the sleuthkit
* @param readBuffer buffer to read to
* @param offset byte offset in the image to start at
* @param len amount of data to read
*
* @return the number of characters read, or -1 if the end of the stream has
* been reached
*
* @throws TskCoreException exception thrown if critical error occurs within
* TSK
*/
public static int readFs(long fsHandle, byte[] readBuffer, long offset, long len) throws TskCoreException {
getTSKReadLock();
try {
//returned byte[] is the data buffer
return readFsNat(fsHandle, readBuffer, offset, len);
} finally {
releaseTSKReadLock();
}
}
/**
* enum used to tell readFileNat whether the offset is from the beginning of
* the file or from the beginning of the slack space.
*/
private enum TSK_FS_FILE_READ_OFFSET_TYPE_ENUM {
START_OF_FILE(0),
START_OF_SLACK(1);
private final int val;
TSK_FS_FILE_READ_OFFSET_TYPE_ENUM(int val) {
this.val = val;
}
int getValue() {
return val;
}
}
/**
* reads data from an file
*
* @param fileHandle pointer to a file structure in the sleuthkit
* @param readBuffer pre-allocated buffer to read to
* @param offset byte offset in the image to start at
* @param len amount of data to read
*
* @return the number of characters read, or -1 if the end of the stream has
* been reached
*
* @throws TskCoreException exception thrown if critical error occurs within
* TSK
*/
public static int readFile(long fileHandle, byte[] readBuffer, long offset, long len) throws TskCoreException {
boolean withinPool = false;
synchronized (HandleCache.cacheLock) {
if (HandleCache.poolFileHandles.contains(fileHandle)) {
withinPool = true;
}
}
/*
* The current APFS code is not thread-safe. To compensate, we make any
* reads to the APFS pool single-threaded by obtaining a write
* lock instead of a read lock.
*/
if (withinPool) {
getTSKWriteLock();
} else {
getTSKReadLock();
}
try {
if (!HandleCache.isValidFileHandle(fileHandle)) {
throw new TskCoreException(HandleCache.INVALID_FILE_HANDLE);
}
return readFileNat(fileHandle, readBuffer, offset, TSK_FS_FILE_READ_OFFSET_TYPE_ENUM.START_OF_FILE.getValue(), len);
} finally {
if (withinPool) {
releaseTSKWriteLock();
} else {
releaseTSKReadLock();
}
}
}
/**
* reads data from the slack space of a file
*
* @param fileHandle pointer to a file structure in the sleuthkit
* @param readBuffer pre-allocated buffer to read to
* @param offset byte offset in the slack to start at
* @param len amount of data to read
*
* @return the number of characters read, or -1 if the end of the stream has
* been reached
*
* @throws TskCoreException exception thrown if critical error occurs within
* TSK
*/
public static int readFileSlack(long fileHandle, byte[] readBuffer, long offset, long len) throws TskCoreException {
getTSKReadLock();
try {
if (!HandleCache.isValidFileHandle(fileHandle)) {
throw new TskCoreException(HandleCache.INVALID_FILE_HANDLE);
}
return readFileNat(fileHandle, readBuffer, offset, TSK_FS_FILE_READ_OFFSET_TYPE_ENUM.START_OF_SLACK.getValue(), len);
} finally {
releaseTSKReadLock();
}
}
/**
* Get human readable (some what) details about a file. This is the same as
* the 'istat' TSK tool
*
* @param fileHandle pointer to file structure in the sleuthkit
*
* @return text
*
* @throws TskCoreException if errors occurred
*/
public static List<String> getFileMetaDataText(long fileHandle) throws TskCoreException {
getTSKReadLock();
try {
if (!HandleCache.isValidFileHandle(fileHandle)) {
throw new TskCoreException(HandleCache.INVALID_FILE_HANDLE);
}
try {
java.io.File tmp = java.io.File.createTempFile("tsk", ".txt");
saveFileMetaDataTextNat(fileHandle, tmp.getAbsolutePath());
FileReader fr = new FileReader(tmp.getAbsolutePath());
BufferedReader textReader = new BufferedReader(fr);
List<String> lines = new ArrayList<String>();
while (true) {
String line = textReader.readLine();
if (line == null) {
break;
}
lines.add(line);
}
textReader.close();
fr.close();
tmp.delete();
return lines;
} catch (IOException ex) {
throw new TskCoreException("Error reading istat output: " + ex.getLocalizedMessage());
}
} finally {
releaseTSKReadLock();
}
}
/**
* frees the fileHandle pointer
*
* @param fileHandle pointer to file structure in sleuthkit
*/
public static void closeFile(long fileHandle) {
closeFile(fileHandle, null);
}
/**
* frees the fileHandle pointer
*
* @param fileHandle pointer to file structure in sleuthkit
* @param skCase the case containing the file
*/
public static void closeFile(long fileHandle, SleuthkitCase skCase) {
boolean withinPool = false;
synchronized (HandleCache.cacheLock) {
if (HandleCache.poolFileHandles.contains(fileHandle)) {
withinPool = true;
}
}
/*
* The current APFS code is not thread-safe. To compensate, we make any
* reads to the APFS pool single-threaded by obtaining a write
* lock instead of a read lock.
*/
if (withinPool) {
getTSKWriteLock();
} else {
getTSKReadLock();
}
try {
synchronized (HandleCache.cacheLock) {
if (!HandleCache.isValidFileHandle(fileHandle)) {
// File handle is not open so this is a no-op.
return;
}
closeFileNat(fileHandle);
HandleCache.removeFileHandle(fileHandle, skCase);
if (HandleCache.poolFileHandles.contains(fileHandle)) {
HandleCache.poolFileHandles.remove(fileHandle);
}
}
} finally {
if (withinPool) {
releaseTSKWriteLock();
} else {
releaseTSKReadLock();
}
}
}
/**
* Create an index for a hash database.
*
* @param dbHandle A hash database handle.
*
* @throws TskCoreException if a critical error occurs within TSK core
*/
public static void createLookupIndexForHashDatabase(int dbHandle) throws TskCoreException {
hashDbCreateIndexNat(dbHandle);
}
/**
* Check if an index exists for a hash database.
*
* @param dbHandle A hash database handle.
*
* @return true if index exists
*
* @throws TskCoreException if a critical error occurs within TSK core
*/
public static boolean hashDatabaseHasLookupIndex(int dbHandle) throws TskCoreException {
return hashDbIndexExistsNat(dbHandle);
}
/**
* hashDatabaseCanBeReindexed
*
* @param dbHandle previously opened hash db handle
*
* @return Does this database have a source database that is different than
* the index?
*
* @throws TskCoreException if a critical error occurs within TSK core
*/
public static boolean hashDatabaseCanBeReindexed(int dbHandle) throws TskCoreException {
return hashDbIsReindexableNat(dbHandle);
}
/**
* getHashDatabasePath
*
* @param dbHandle previously opened hash db handle
*
* @return Hash db file path
*
* @throws TskCoreException if a critical error occurs within TSK core
*/
public static String getHashDatabasePath(int dbHandle) throws TskCoreException {
return hashDbPathNat(dbHandle);
}
/**
* getHashDatabaseIndexPath
*
* @param dbHandle previously opened hash db handle
*
* @return Index file path
*
* @throws TskCoreException if a critical error occurs within TSK core
*/
public static String getHashDatabaseIndexPath(int dbHandle) throws TskCoreException {
return hashDbIndexPathNat(dbHandle);
}
/**
* Open a hash database for lookups
* @param path Path to Hash DB or index file
* @return Handle open db
* @throws TskCoreException if there is an error opening the DB
*/
public static int openHashDatabase(String path) throws TskCoreException {
return hashDbOpenNat(path);
}
/**
* Creates a hash database. Will be of the default TSK hash database type.
*
* @param path The path to the database
*
* @return a handle for that database
*
* @throws TskCoreException if a critical error occurs within TSK core
*/
public static int createHashDatabase(String path) throws TskCoreException {
return hashDbNewNat(path);
}
/**
* Close the currently open lookup databases. Resets the handle counting.
*
* @throws TskCoreException exception thrown if critical error occurs within
* TSK
*/
public static void closeAllHashDatabases() throws TskCoreException {
hashDbCloseAll();
}
/**
* Close a particular open lookup database. Existing handles are not
* affected.
*
* @param dbHandle Handle of database to close.
*
* @throws TskCoreException exception thrown if critical error occurs within
* TSK
*/
public static void closeHashDatabase(int dbHandle) throws TskCoreException {
hashDbClose(dbHandle);
}
/**
* Get the name of the database
*
* @param dbHandle Previously opened hash db handle.
*
* @return The display name.
*
* @throws TskCoreException if a critical error occurs within TSK core
*/
public static String getHashDatabaseDisplayName(int dbHandle) throws TskCoreException {
return hashDbGetDisplayName(dbHandle);
}
/**
* Lookup the given hash value and get basic answer
*
* @param hash Hash value to search for.
* @param dbHandle Handle of database to lookup in.
*
* @return True if hash was found in database.
*
* @throws TskCoreException
*/
public static boolean lookupInHashDatabase(String hash, int dbHandle) throws TskCoreException {
return hashDbLookup(hash, dbHandle);
}
/**
* Lookup hash value in DB and return details on results (more time
* consuming than basic lookup)
*
* @param hash Hash value to search for
* @param dbHandle Handle of database to lookup in.
*
* @return Details on hash if it was in DB or null if it was not found.
*
* @throws TskCoreException
*/
public static HashHitInfo lookupInHashDatabaseVerbose(String hash, int dbHandle) throws TskCoreException {
return hashDbLookupVerbose(hash, dbHandle);
}
/**
* Adds a hash value to a hash database.
*
* @param filename Name of file (can be null)
* @param md5 Text of MD5 hash (can be null)
* @param sha1 Text of SHA1 hash (can be null)
* @param sha256 Text of SHA256 hash (can be null)
* @param comment A comment (can be null)
* @param dbHandle Handle to DB
*
* @throws TskCoreException
*/
public static void addToHashDatabase(String filename, String md5, String sha1, String sha256, String comment, int dbHandle) throws TskCoreException {
hashDbAddEntryNat(filename, md5, sha1, sha256, comment, dbHandle);
}
public static void addToHashDatabase(List<HashEntry> hashes, int dbHandle) throws TskCoreException {
hashDbBeginTransactionNat(dbHandle);
try {
for (HashEntry entry : hashes) {
hashDbAddEntryNat(entry.getFileName(), entry.getMd5Hash(), entry.getSha1Hash(), entry.getSha256Hash(), entry.getComment(), dbHandle);
}
hashDbCommitTransactionNat(dbHandle);
} catch (TskCoreException ex) {
try {
hashDbRollbackTransactionNat(dbHandle);
} catch (TskCoreException ex2) {
ex2.initCause(ex);
throw ex2;
}
throw ex;
}
}
public static boolean isUpdateableHashDatabase(int dbHandle) throws TskCoreException {
return hashDbIsUpdateableNat(dbHandle);
}
public static boolean hashDatabaseIsIndexOnly(int dbHandle) throws TskCoreException {
return hashDbIsIdxOnlyNat(dbHandle);
}
/**
* Convert this timezone from long to short form Convert timezoneLongForm
* passed in from long to short form
*
* @param timezoneLongForm the long form (e.g., America/New_York)
*
* @return the short form (e.g., EST5EDT) string representation, or an empty
* string if empty long form was passed in
*/
private static String timezoneLongToShort(String timezoneLongForm) {
if (timezoneLongForm == null || timezoneLongForm.isEmpty()) {
return "";
}
String timezoneShortForm;
TimeZone zone = TimeZone.getTimeZone(timezoneLongForm);
int offset = zone.getRawOffset() / 1000;
int hour = offset / 3600;
int min = (offset % 3600) / 60;
DateFormat dfm = new SimpleDateFormat("z");
dfm.setTimeZone(zone);
boolean hasDaylight = zone.useDaylightTime();
String first = dfm.format(new GregorianCalendar(2010, 1, 1).getTime()).substring(0, 3); // make it only 3 letters code
String second = dfm.format(new GregorianCalendar(2011, 6, 6).getTime()).substring(0, 3); // make it only 3 letters code
int mid = hour * -1;
timezoneShortForm = first + Integer.toString(mid);
if (min != 0) {
timezoneShortForm = timezoneShortForm + ":" + (min < 10 ? "0" : "") + Integer.toString(min);
}
if (hasDaylight) {
timezoneShortForm += second;
}
return timezoneShortForm;
}
/**
* Fills in any gaps in the image created by image writer.
*
* @param imgHandle The image handle.
*
* @return 0 if no errors occurred; 1 otherwise.
*
* @throws TskCoreException exception thrown if critical error occurs within
* TSK
*/
public static int finishImageWriter(long imgHandle) throws TskCoreException {
getTSKReadLock();
try {
if(! imgHandleIsValid(imgHandle)) {
throw new TskCoreException("Image handle " + imgHandle + " is closed");
}
return finishImageWriterNat(imgHandle);
} finally {
releaseTSKReadLock();
}
}
/**
* Get the current progress of the finish image process (0-100)
*
* @param imgHandle
*
* @return Percentage of blocks completed (0-100)
*/
public static int getFinishImageProgress(long imgHandle) {
getTSKReadLock();
try {
if (imgHandleIsValid(imgHandle)) {
return getFinishImageProgressNat(imgHandle);
} else {
return 0;
}
} finally {
releaseTSKReadLock();
}
}
/**
* Cancel the finish image process
*
* @param imgHandle
*/
public static void cancelFinishImage(long imgHandle) {
getTSKReadLock();
try {
if (imgHandleIsValid(imgHandle)) {
cancelFinishImageNat(imgHandle);
}
} finally {
releaseTSKReadLock();
}
}
/**
* Get size of a device (physical, logical device, image) pointed to by
* devPath
*
* @param devPath device path pointing to the device
*
* @return size of the device in bytes
*
* @throws TskCoreException exception thrown if the device size could not be
* queried
*/
public static long findDeviceSize(String devPath) throws TskCoreException {
return findDeviceSizeNat(devPath);
}
public static boolean isImageSupported(String imagePath) {
return isImageSupportedNat(imagePath);
}
/** Get the version of the Sleuthkit code in number form.
* Upper byte is A, next is B, and next byte is C in version A.B.C.
* Lowest byte is 0xff, except in beta releases, in which case it
* increments from 1. Nightly snapshots will have upper byte as
* 0xff and next bytes with year, month, and date, respectively.
* Note that you will not be able to differentiate between snapshots
* from the trunk or branches with this method...
* For example, 3.1.2 would be stored as 0x030102FF.
* 3.1.2b1 would be 0x03010201. Snapshot from Jan 2, 2003 would be
* 0xFF030102.
*
* @return the current Sleuthkit version
*/
static long getSleuthkitVersion() {
return getSleuthkitVersionNat();
}
/**
* Get a read lock for the C++ layer. Do not get this lock after obtaining
* HandleCache.cacheLock.
*/
private static void getTSKReadLock() {
tskLock.readLock().lock();
}
/**
* Release the read lock
*/
private static void releaseTSKReadLock() {
tskLock.readLock().unlock();
}
/**
* Get a write lock for the C++ layer. Do not get this lock after obtaining
* HandleCache.cacheLock.
*
* This is a temporary fix for APFS which is not thread-safe. Should be used
* when accessing anything under a pool.
*/
private static void getTSKWriteLock() {
tskLock.writeLock().lock();
}
/**
* Release the write lock
*/
private static void releaseTSKWriteLock() {
tskLock.writeLock().unlock();
}
//free pointers
/**
* frees the imgHandle pointer currently does not close the image -
* imgHandle should only be freed as part of CaseDbHandle.free().
*
* @param imgHandle to close the image
*/
@Deprecated
public static void closeImg(long imgHandle) {
//closeImgNat(imgHandle);
}
/**
* frees the vsHandle pointer - currently does nothing
*
* @param vsHandle pointer to volume system structure in sleuthkit
*/
@Deprecated
public static void closeVs(long vsHandle) {
// closeVsNat(vsHandle); TODO JIRA-3829
}
/**
* frees the fsHandle pointer Currently does not do anything - fsHandle
* should only be freed as part of CaseDbHandle.free().
*
* @param fsHandle pointer to file system structure in sleuthkit
*/
@Deprecated
public static void closeFs(long fsHandle) {
//closeFsNat(fsHandle);
}
/**
* Open the image and return the image info pointer.
*
* @param imageFiles the paths to the images
*
* @return the image info pointer
*
* @throws TskCoreException exception thrown if critical error occurs within
* TSK
* @deprecated Use the version with the SleuthkitCase argument
*/
@Deprecated
public static long openImage(String[] imageFiles) throws TskCoreException {
return openImage(imageFiles, 0, true, null);
}
/**
* Open the image with a specified sector size and return the image info
* pointer.
*
* @param imageFiles the paths to the images
* @param sSize the sector size (use '0' for autodetect)
*
* @return the image info pointer
*
* @throws TskCoreException exception thrown if critical error occurs within
* TSK
* @deprecated Use the version with the SleuthkitCase argument
*/
@Deprecated
public static long openImage(String[] imageFiles, int sSize) throws TskCoreException {
return openImage(imageFiles, sSize, true, null);
}
/**
* Get file system Handle Opened handle is cached (transparently) so it does
* not need be reopened next time for the duration of the application
*
* @param imgHandle pointer to imgHandle in sleuthkit
* @param fsOffset byte offset to the file system
*
* @return pointer to a fsHandle structure in the sleuthkit
*
* @throws TskCoreException exception thrown if critical error occurs within
* TSK
* @deprecated Use the version with the SleuthkitCase argument
*/
@Deprecated
public static long openFs(long imgHandle, long fsOffset) throws TskCoreException {
return openFs(imgHandle, fsOffset, null);
}
/**
* Get file Handle
*
* @param fsHandle fsHandle pointer in the sleuthkit
* @param fileId id of the file
* @param attrType file attribute type to open
* @param attrId file attribute id to open
*
* @return pointer to a file structure in the sleuthkit
*
* @throws TskCoreException exception thrown if critical error occurs within
* TSK
* @deprecated Use the version with the SleuthkitCase argument
*/
@Deprecated
public static long openFile(long fsHandle, long fileId, TSK_FS_ATTR_TYPE_ENUM attrType, int attrId) throws TskCoreException {
return openFile(fsHandle, fileId, attrType, attrId, null);
}
private static native String getVersionNat();
private static native void startVerboseLoggingNat(String logPath);
private static native int hashDbOpenNat(String hashDbPath) throws TskCoreException;
private static native int hashDbNewNat(String hashDbPath) throws TskCoreException;
private static native int hashDbBeginTransactionNat(int dbHandle) throws TskCoreException;
private static native int hashDbCommitTransactionNat(int dbHandle) throws TskCoreException;
private static native int hashDbRollbackTransactionNat(int dbHandle) throws TskCoreException;
private static native int hashDbAddEntryNat(String filename, String hashMd5, String hashSha1, String hashSha256, String comment, int dbHandle) throws TskCoreException;
private static native boolean hashDbIsUpdateableNat(int dbHandle);
private static native boolean hashDbIsReindexableNat(int dbHandle);
private static native String hashDbPathNat(int dbHandle);
private static native String hashDbIndexPathNat(int dbHandle);
private static native String hashDbGetDisplayName(int dbHandle) throws TskCoreException;
private static native void hashDbCloseAll() throws TskCoreException;
private static native void hashDbClose(int dbHandle) throws TskCoreException;
private static native void hashDbCreateIndexNat(int dbHandle) throws TskCoreException;
private static native boolean hashDbIndexExistsNat(int dbHandle) throws TskCoreException;
private static native boolean hashDbIsIdxOnlyNat(int dbHandle) throws TskCoreException;
private static native boolean hashDbLookup(String hash, int dbHandle) throws TskCoreException;
private static native HashHitInfo hashDbLookupVerbose(String hash, int dbHandle) throws TskCoreException;
private static native long initAddImgNat(TskCaseDbBridge dbHelperObj, String timezone, boolean addUnallocSpace, boolean skipFatFsOrphans) throws TskCoreException;
private static native long initializeAddImgNat(TskCaseDbBridge dbHelperObj, String timezone, boolean addFileSystems, boolean addUnallocSpace, boolean skipFatFsOrphans) throws TskCoreException;
private static native void runOpenAndAddImgNat(long process, String deviceId, String[] imgPath, int splits, String timezone) throws TskCoreException, TskDataException;
private static native void runAddImgNat(long process, String deviceId, long a_img_info, long image_id, String timeZone, String imageWriterPath) throws TskCoreException, TskDataException;
private static native void stopAddImgNat(long process) throws TskCoreException;
private static native long finishAddImgNat(long process) throws TskCoreException;
private static native long openImgNat(String[] imgPath, int splits, int sSize) throws TskCoreException;
private static native long openVsNat(long imgHandle, long vsOffset) throws TskCoreException;
private static native long openVolNat(long vsHandle, long volId) throws TskCoreException;
private static native long openPoolNat(long imgHandle, long offset) throws TskCoreException;
private static native long getImgInfoForPoolNat(long poolHandle, long poolOffset) throws TskCoreException;
private static native long openFsNat(long imgHandle, long fsId) throws TskCoreException;
private static native long openFileNat(long fsHandle, long fileId, int attrType, int attrId) throws TskCoreException;
private static native int readImgNat(long imgHandle, byte[] readBuffer, long offset, long len) throws TskCoreException;
private static native int readVsNat(long vsHandle, byte[] readBuffer, long offset, long len) throws TskCoreException;
private static native int readPoolNat(long poolHandle, byte[] readBuffer, long offset, long len) throws TskCoreException;
private static native int readVolNat(long volHandle, byte[] readBuffer, long offset, long len) throws TskCoreException;
private static native int readFsNat(long fsHandle, byte[] readBuffer, long offset, long len) throws TskCoreException;
private static native int readFileNat(long fileHandle, byte[] readBuffer, long offset, int offset_type, long len) throws TskCoreException;
private static native int saveFileMetaDataTextNat(long fileHandle, String fileName) throws TskCoreException;
private static native String[] getPathsForImageNat(long imgHandle);
private static native long getSizeForImageNat(long imgHandle);
private static native long getTypeForImageNat(long imgHandle);
private static native long getSectorSizeForImageNat(long imgHandle);
private static native String getMD5HashForImageNat(long imgHandle);
private static native String getSha1HashForImageNat(long imgHandle);
private static native String getCollectionDetailsForImageNat(long imgHandle);
private static native void closeImgNat(long imgHandle);
private static native void closePoolNat(long poolHandle);
private static native void closeVsNat(long vsHandle);
private static native void closeFsNat(long fsHandle);
private static native void closeFileNat(long fileHandle);
private static native long findDeviceSizeNat(String devicePath) throws TskCoreException;
private static native String getCurDirNat(long process);
private static native boolean isImageSupportedNat(String imagePath);
private static native long getSleuthkitVersionNat();
private static native int finishImageWriterNat(long a_img_info);
private static native int getFinishImageProgressNat(long a_img_info);
private static native void cancelFinishImageNat(long a_img_info);
}
/*
* Sleuth Kit Data Model
*
* Copyright 2011 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.datamodel;
/**
* Interface for all visitable datatypes that can be found in the tsk database
*/
public interface SleuthkitVisitableItem {
/**
* visitor pattern support
*
* @param v visitor
*
* @return visitor return value
*/
public <T> T accept(SleuthkitItemVisitor<T> v);
}
/*
* SleuthKit Java Bindings
*
* Copyright 2011-2022 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.datamodel;
import java.util.Collections;
import java.util.List;
import org.sleuthkit.datamodel.TskData.FileKnown;
import org.sleuthkit.datamodel.TskData.TSK_FS_META_TYPE_ENUM;
import org.sleuthkit.datamodel.TskData.TSK_FS_NAME_FLAG_ENUM;
import org.sleuthkit.datamodel.TskData.TSK_FS_NAME_TYPE_ENUM;
/**
* Parent class for special directory types (Local and Virtual)
*/
public abstract class SpecialDirectory extends AbstractFile {
SpecialDirectory(SleuthkitCase db,
long objId,
long dataSourceObjectId,
Long fileSystemObjectId,
TskData.TSK_FS_ATTR_TYPE_ENUM attrType, int attrId,
String name,
TskData.TSK_DB_FILES_TYPE_ENUM fileType,
long metaAddr, int metaSeq,
TSK_FS_NAME_TYPE_ENUM dirType, TSK_FS_META_TYPE_ENUM metaType,
TSK_FS_NAME_FLAG_ENUM dirFlag, short metaFlags,
long size,
long ctime, long crtime, long atime, long mtime,
short modes,
int uid, int gid,
String md5Hash, String sha256Hash, String sha1Hash,
FileKnown knownState,
String parentPath,
String mimeType) {
super(db, objId, dataSourceObjectId, fileSystemObjectId, attrType, attrId, name,
fileType, metaAddr, metaSeq, dirType, metaType, dirFlag,
metaFlags, size, ctime, crtime, atime, mtime, modes, uid, gid, md5Hash, sha256Hash, sha1Hash, knownState, parentPath, mimeType, null, OsAccount.NO_OWNER_ID, OsAccount.NO_ACCOUNT, TskData.CollectedStatus.UNKNOWN, Collections.emptyList());
}
/**
* Gets the extents in terms of byte addresses of this directory
* within its data source, always an empty list.
*
* @return An empty list.
*
* @throws TskCoreException if there was an error querying the case
* database.
*/
@Override
public List<TskFileRange> getRanges() throws TskCoreException {
return Collections.<TskFileRange>emptyList();
}
/**
* Indicates whether or not this is a data source.
*
* @return True or false.
*/
public boolean isDataSource() {
return (this.getDataSourceObjectId() == this.getId());
}
/**
* Does nothing, a special directory cannot be opened, read, or closed.
*/
@Override
public void close() {
}
/**
* Indicates whether or not this directory is the root of a file
* system, always returns false.
*
* @return False.
*/
@Override
public boolean isRoot() {
return false;
}
}
/*
* Sleuth Kit Data Model
*
* Copyright 2013 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.datamodel;
/**
* This class is a base class for data transfer object (DTO) classes that model
* tags applied to content and blackboard artifacts by users.
*/
public abstract class Tag {
static long ID_NOT_SET = -1;
private long tagID = ID_NOT_SET;
private final TagName name;
private final String comment;
private final String userName;
Tag(long tagID, TagName name, String comment, String userName) {
this.tagID = tagID;
this.name = name;
this.comment = comment;
this.userName = userName;
}
/**
* Get Tag ID (unique amongst tags)
*
* @return
*/
public long getId() {
return tagID;
}
public TagName getName() {
return name;
}
public String getComment() {
return comment;
}
public String getUserName() {
return userName == null ? "" : userName;
}
}
/*
* Sleuth Kit Data Model
*
* Copyright 2013-2020 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.datamodel;
import java.io.Serializable;
import java.util.HashMap;
import java.util.Objects;
/**
* Instances of this class are data transfer objects (DTOs) that represent the
* names (and related properties) a user can select from to apply a tag to
* content or a blackboard artifact.
*/
public class TagName implements Comparable<TagName>, Serializable {
private static final long serialVersionUID = 1L;
public enum HTML_COLOR {
NONE("None", ""), //NON-NLS
WHITE("White", "#FFFFFF"), //NON-NLS
SILVER("Silver", "#C0C0C0"), //NON-NLS
GRAY("Gray", "#808080"), //NON-NLS
BLACK("Black", "#000000"), //NON-NLS
RED("Red", "#FF0000"), //NON-NLS
MAROON("Maron", "#800000"), //NON-NLS
YELLOW("Yellow", "#FFFF00"), //NON-NLS
OLIVE("Olive", "#808000"), //NON-NLS
LIME("Lime", "#00FF00"), //NON-NLS
GREEN("Green", "#008000"), //NON-NLS
AQUA("Aqua", "#00FFFF"), //NON-NLS
TEAL("Teal", "#008080"), //NON-NLS
BLUE("Blue", "#0000FF"), //NON-NLS
NAVY("Navy", "#000080"), //NON-NLS
FUCHSIA("Fuchsia", "#FF00FF"), //NON-NLS
PURPLE("Purple", "#800080"); //NON-NLS
private final static HashMap<String, HTML_COLOR> colorMap = new HashMap<String, HTML_COLOR>();
private final String name;
private final String hexString;
static {
for (HTML_COLOR color : HTML_COLOR.values()) {
colorMap.put(color.getName(), color);
}
}
HTML_COLOR(String name, String hexString) {
this.hexString = hexString;
this.name = name;
}
String getName() {
return name;
}
public String getRgbValue() {
return hexString;
}
public static HTML_COLOR getColorByName(String colorName) {
if (colorMap.containsKey(colorName)) {
return colorMap.get(colorName);
} else {
return NONE;
}
}
}
private final long id;
private final String displayName;
private final String description;
private final HTML_COLOR color;
private final TskData.FileKnown knownStatus;
private final long tagSetId;
private final int rank;
// Clients of the org.sleuthkit.datamodel package should not directly create these objects.
TagName(long id, String displayName, String description, HTML_COLOR color, TskData.FileKnown knownStatus, long tagSetId, int rank) {
this.id = id;
this.displayName = displayName;
this.description = description;
this.color = color;
this.knownStatus = knownStatus;
this.tagSetId = tagSetId;
this.rank = rank;
}
public long getId() {
return id;
}
public String getDisplayName() {
return displayName;
}
public String getDescription() {
return description;
}
public HTML_COLOR getColor() {
return color;
}
public TskData.FileKnown getKnownStatus() {
return knownStatus;
}
long getTagSetId() {
return tagSetId;
}
public int getRank() {
return rank;
}
/**
* Compares two TagName objects by comparing their display names.
*
* @param other The other TagName to compare this TagName to
*
* @return the result of calling compareTo on the displayNames
*/
@Override
public int compareTo(TagName other) {
return this.getDisplayName().compareTo(other.getDisplayName());
}
@Override
public int hashCode() {
int hash = 5;
hash = 89 * hash + (int) (this.id ^ (this.id >>> 32));
hash = 89 * hash + (this.displayName != null ? this.displayName.hashCode() : 0);
hash = 89 * hash + (this.description != null ? this.description.hashCode() : 0);
hash = 89 * hash + (this.color != null ? this.color.hashCode() : 0);
hash = 89 * hash + (this.knownStatus != null ? this.knownStatus.hashCode() : 0);
hash = 89 * hash + (int) (this.id ^ (this.tagSetId >>> 32));
return hash;
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final TagName other = (TagName) obj;
return (this.id == other.getId()
&& Objects.equals(this.displayName, other.getDisplayName())
&& Objects.equals(this.description, other.getDescription())
&& Objects.equals(this.color, other.getColor())
&& Objects.equals(this.knownStatus, other.getKnownStatus())
&& this.tagSetId == other.getTagSetId());
}
}
/*
* Sleuth Kit Data Model
*
* Copyright 2020 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.datamodel;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Objects;
/**
* A TagSet is a named group of TagNames.
*/
public class TagSet {
private final String setName;
private final long id;
private final List<TagName> tagNameList;
/**
* Construct a TagSet.
*
* @param id Tag set id value.
* @param setName Name of tag set.
*/
TagSet(long id, String setName, List<TagName> tagNameList) {
if (setName == null || setName.isEmpty()) {
throw new IllegalArgumentException("TagSet name must be a non-empty string");
}
this.tagNameList = new ArrayList<>(tagNameList);
this.tagNameList.sort(new TagNameComparator());
this.id = id;
this.setName = setName;
}
/**
* Returns the name of the tag set.
*
* @return Tag set name.
*/
public String getName() {
return setName;
}
/**
* Returns a list of the TagName objects that belong to the tag set.
*
* @return An unmodifiable list of TagName objects.
*/
public List<TagName> getTagNames() {
return Collections.unmodifiableList(tagNameList);
}
/**
* Return the TagSet id.
*
* @return TagSet id value.
*/
public long getId() {
return id;
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final TagSet other = (TagSet) obj;
return (this.id == other.getId()
&& setName.equals(other.getName())
&& tagNameList.equals(other.tagNameList));
}
@Override
public int hashCode() {
int hash = 5;
hash = 89 * hash + (int) (this.id ^ (this.id >>> 32));
hash = 89 * hash + Objects.hashCode(this.setName);
hash = 89 * hash + Objects.hashCode(this.tagNameList);
return hash;
}
/**
* Comparator for TagNames. TagNames will sort by rank, then TagName.getName().
*/
private class TagNameComparator implements Comparator<TagName> {
@Override
public int compare(TagName tagName1, TagName tagName2) {
int result = ((Integer)tagName1.getRank()).compareTo(tagName2.getRank());
if(result == 0) {
result = tagName1.getDisplayName().compareTo(tagName2.getDisplayName());
}
return result;
}
}
}
/*
* Sleuth Kit Data Model
*
* Copyright 2020-2021 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.datamodel;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import org.sleuthkit.datamodel.SleuthkitCase.CaseDbConnection;
import org.sleuthkit.datamodel.SleuthkitCase.CaseDbTransaction;
import static org.sleuthkit.datamodel.TskData.DbType.POSTGRESQL;
import org.sleuthkit.datamodel.TskEvent.TagNamesAddedTskEvent;
import org.sleuthkit.datamodel.TskEvent.TagNamesDeletedTskEvent;
import org.sleuthkit.datamodel.TskEvent.TagNamesUpdatedTskEvent;
import org.sleuthkit.datamodel.TskEvent.TagSetsAddedTskEvent;
import org.sleuthkit.datamodel.TskEvent.TagSetsDeletedTskEvent;
/**
* Provides an API to manage Tags.
*/
public class TaggingManager {
private final SleuthkitCase skCase;
/**
* Construct a TaggingManager for the given SleuthkitCase.
*
* @param skCase The SleuthkitCase.
*/
TaggingManager(SleuthkitCase skCase) {
this.skCase = skCase;
}
/**
* Returns a list of all the TagSets that exist in the case.
*
* @return A List of TagSet objects or an empty list if none were found.
*
* @throws TskCoreException
*/
public List<TagSet> getTagSets() throws TskCoreException {
List<TagSet> tagSetList = new ArrayList<>();
skCase.acquireSingleUserCaseReadLock();
String getAllTagSetsQuery = "SELECT * FROM tsk_tag_sets";
try (CaseDbConnection connection = skCase.getConnection(); Statement stmt = connection.createStatement(); ResultSet resultSet = stmt.executeQuery(getAllTagSetsQuery);) {
while (resultSet.next()) {
int setID = resultSet.getInt("tag_set_id");
String setName = resultSet.getString("name");
TagSet set = new TagSet(setID, setName, getTagNamesByTagSetID(setID));
tagSetList.add(set);
}
} catch (SQLException ex) {
throw new TskCoreException("Error occurred getting TagSet list.", ex);
} finally {
skCase.releaseSingleUserCaseReadLock();
}
return tagSetList;
}
/**
* Inserts a row into the tag_sets table in the case database.
*
* @param name The tag set name.
* @param tagNames
*
* @return A TagSet object for the new row.
*
* @throws TskCoreException
*/
public TagSet addTagSet(String name, List<TagName> tagNames) throws TskCoreException {
if (name == null || name.isEmpty()) {
throw new IllegalArgumentException("Error adding TagSet, TagSet name must be non-empty string.");
}
TagSet tagSet = null;
CaseDbTransaction trans = skCase.beginTransaction();
try (Statement stmt = trans.getConnection().createStatement()) {
String query = String.format("INSERT INTO tsk_tag_sets (name) VALUES('%s')", name);
if (skCase.getDatabaseType() == POSTGRESQL) {
stmt.execute(query, Statement.RETURN_GENERATED_KEYS);
} else {
stmt.execute(query);
}
try (ResultSet resultSet = stmt.getGeneratedKeys()) {
resultSet.next();
int setID = resultSet.getInt(1);
List<TagName> updatedTags = new ArrayList<>();
if (tagNames != null) {
// Get all of the TagName ids they can be updated in one
// SQL call.
for (int index = 0; index < tagNames.size(); index++) {
TagName tagName = tagNames.get(index);
stmt.executeUpdate(String.format("UPDATE tag_names SET tag_set_id = %d, rank = %d WHERE tag_name_id = %d", setID, index, tagName.getId()));
updatedTags.add(new TagName(tagName.getId(),
tagName.getDisplayName(),
tagName.getDescription(),
tagName.getColor(),
tagName.getKnownStatus(),
setID,
index));
}
}
tagSet = new TagSet(setID, name, updatedTags);
skCase.fireTSKEvent(new TagSetsAddedTskEvent(Collections.singletonList(tagSet)));
skCase.fireTSKEvent(new TagNamesUpdatedTskEvent(updatedTags));
}
trans.commit();
} catch (SQLException ex) {
trans.rollback();
throw new TskCoreException(String.format("Error adding tag set %s", name), ex);
}
return tagSet;
}
/**
* Remove a row from the tag set table. If the given TagSet has a valid list
* of TagNames the TagNames will be removed from the tag_name table if there
* are not references to the TagNames in the content_tag or
* blackboard_artifact_tag table.
*
* @param tagSet TagSet to be deleted.
*
* @throws TskCoreException
*/
public void deleteTagSet(TagSet tagSet) throws TskCoreException {
if (tagSet == null) {
throw new IllegalArgumentException("Error adding deleting TagSet, TagSet object was null");
}
if (isTagSetInUse(tagSet)) {
throw new TskCoreException("Unable to delete TagSet (%d). TagSet TagName list contains TagNames that are currently in use.");
}
CaseDbTransaction trans = skCase.beginTransaction();
try (Statement stmt = trans.getConnection().createStatement()) {
String queryTemplate = "DELETE FROM tag_names WHERE tag_name_id IN (SELECT tag_name_id FROM tag_names WHERE tag_set_id = %d)";
stmt.execute(String.format(queryTemplate, tagSet.getId()));
queryTemplate = "DELETE FROM tsk_tag_sets WHERE tag_set_id = '%d'";
stmt.execute(String.format(queryTemplate, tagSet.getId()));
trans.commit();
List<Long> tagNameIds = new ArrayList<>();
for (TagName tagName : tagSet.getTagNames()) {
tagNameIds.add(tagName.getId());
}
skCase.fireTSKEvent(new TagSetsDeletedTskEvent(Collections.singletonList(tagSet.getId())));
skCase.fireTSKEvent(new TagNamesDeletedTskEvent(tagNameIds));
} catch (SQLException ex) {
trans.rollback();
throw new TskCoreException(String.format("Error deleting tag set where id = %d.", tagSet.getId()), ex);
}
}
/**
* Gets the tag set a tag name (tag definition) belongs to, if any.
*
* @param tagName The tag name.
*
* @return A TagSet object or null.
*
* @throws TskCoreException If there is an error querying the case database.
*/
public TagSet getTagSet(TagName tagName) throws TskCoreException {
if (tagName == null) {
throw new IllegalArgumentException("Null tagName argument");
}
if (tagName.getTagSetId() <= 0) {
return null;
}
skCase.acquireSingleUserCaseReadLock();
TagSet tagSet = null;
String sqlQuery = String.format("SELECT * FROM tsk_tag_sets WHERE tag_set_id = %d", tagName.getTagSetId());
try (CaseDbConnection connection = skCase.getConnection(); Statement stmt = connection.createStatement(); ResultSet resultSet = stmt.executeQuery(sqlQuery);) {
if (resultSet.next()) {
int setID = resultSet.getInt("tag_set_id");
String setName = resultSet.getString("name");
tagSet = new TagSet(setID, setName, getTagNamesByTagSetID(setID));
}
return tagSet;
} catch (SQLException ex) {
throw new TskCoreException(String.format("Error occurred getting TagSet for TagName '%s' (ID=%d)", tagName.getDisplayName(), tagName.getId()), ex);
} finally {
skCase.releaseSingleUserCaseReadLock();
}
}
/**
* Return a TagSet object for the given id.
*
* @param id TagSet id.
*
* @return The TagSet represented by the given it, or null if one was not
* found.
*
* @throws TskCoreException
*/
public TagSet getTagSet(long id) throws TskCoreException {
TagSet tagSet = null;
String preparedQuery = "Select * FROM tsk_tag_sets WHERE tag_set_id = ?";
skCase.acquireSingleUserCaseReadLock();
try (CaseDbConnection connection = skCase.getConnection(); PreparedStatement statement = connection.getPreparedStatement(preparedQuery, Statement.NO_GENERATED_KEYS)) {
statement.setLong(1, id);
try (ResultSet resultSet = statement.executeQuery()) {
if (resultSet.next()) {
int setID = resultSet.getInt("tag_set_id");
String setName = resultSet.getString("name");
tagSet = new TagSet(setID, setName, getTagNamesByTagSetID(setID));
}
}
} catch (SQLException ex) {
throw new TskCoreException(String.format("Error occurred getting TagSet (ID=%d)", id), ex);
} finally {
skCase.releaseSingleUserCaseReadLock();
}
return tagSet;
}
/**
* Inserts a row into the blackboard_artifact_tags table in the case
* database.
*
* @param artifact The blackboard artifact to tag.
* @param tagName The name to use for the tag.
* @param comment A comment to store with the tag.
*
* @return A BlackboardArtifactTag data transfer object (DTO) for the new
* row.
*
* @throws TskCoreException
*/
public BlackboardArtifactTagChange addArtifactTag(BlackboardArtifact artifact, TagName tagName, String comment) throws TskCoreException {
if (artifact == null || tagName == null) {
throw new IllegalArgumentException("NULL argument passed to addArtifactTag");
}
List<BlackboardArtifactTag> removedTags = new ArrayList<>();
List<String> removedTagIds = new ArrayList<>();
CaseDbTransaction trans = null;
try {
// If a TagName is part of a TagSet remove any existing tags from the
// set that are currenctly on the artifact
long tagSetId = tagName.getTagSetId();
if (tagSetId > 0) {
// Get the list of all of the blackboardArtifactTags that use
// TagName for the given artifact.
String selectQuery = String.format("SELECT * from blackboard_artifact_tags JOIN tag_names ON tag_names.tag_name_id = blackboard_artifact_tags.tag_name_id JOIN tsk_examiners on tsk_examiners.examiner_id = blackboard_artifact_tags.examiner_id WHERE artifact_id = %d AND tag_names.tag_set_id = %d", artifact.getArtifactID(), tagSetId);
TagName removedTag;
try (Statement stmt = skCase.getConnection().createStatement(); ResultSet resultSet = stmt.executeQuery(selectQuery)) {
while (resultSet.next()) {
removedTag = new TagName(
resultSet.getLong("tag_name_id"),
resultSet.getString("display_name"),
resultSet.getString("description"),
TagName.HTML_COLOR.getColorByName(resultSet.getString("color")),
TskData.FileKnown.valueOf(resultSet.getByte("knownStatus")),
tagSetId,
resultSet.getInt("rank")
);
BlackboardArtifactTag bat
= new BlackboardArtifactTag(resultSet.getLong("tag_id"),
artifact,
skCase.getContentById(artifact.getObjectID()),
removedTag,
resultSet.getString("comment"),
resultSet.getString("login_name"));
removedTags.add(bat);
removedTagIds.add(Long.toString(bat.getId()));
}
}
}
Content content = skCase.getContentById(artifact.getObjectID());
Examiner currentExaminer = skCase.getCurrentExaminer();
trans = skCase.beginTransaction();
CaseDbConnection connection = trans.getConnection();
if (!removedTags.isEmpty()) {
// Remove the tags.
String removeQuery = String.format("DELETE FROM blackboard_artifact_tags WHERE tag_id IN (%s)", String.join(",", removedTagIds));
try (Statement stmt = connection.createStatement()) {
stmt.executeUpdate(removeQuery);
}
}
// Add the new Tag.
BlackboardArtifactTag artifactTag;
try (Statement stmt = connection.createStatement()) {
String query = String.format(
"INSERT INTO blackboard_artifact_tags (artifact_id, tag_name_id, comment, examiner_id) VALUES (%d, %d, '%s', %d)",
artifact.getArtifactID(),
tagName.getId(),
comment,
currentExaminer.getId());
if (skCase.getDatabaseType() == POSTGRESQL) {
stmt.execute(query, Statement.RETURN_GENERATED_KEYS);
} else {
stmt.execute(query);
}
try (ResultSet resultSet = stmt.getGeneratedKeys()) {
resultSet.next();
artifactTag = new BlackboardArtifactTag(resultSet.getLong(1), //last_insert_rowid()
artifact, content, tagName, comment, currentExaminer.getLoginName());
}
}
skCase.getScoringManager().updateAggregateScoreAfterAddition(
artifact.getId(), artifact.getDataSourceObjectID(), getTagScore(tagName.getKnownStatus()), trans);
trans.commit();
return new BlackboardArtifactTagChange(artifactTag, removedTags);
} catch (SQLException ex) {
if (trans != null) {
trans.rollback();
}
throw new TskCoreException("Error adding row to blackboard_artifact_tags table (obj_id = " + artifact.getArtifactID() + ", tag_name_id = " + tagName.getId() + ")", ex);
}
}
/**
* Translates the known status of a tag defnition into an item score. This
* supports scoring of tagged items.
*
* @param knownStatus The known status of a tag definition.
*
* @return The corresponding item score.
*/
static Score getTagScore(TskData.FileKnown knownStatus) {
switch (knownStatus) {
case BAD:
/*
* The "bad" known status is used to define tags that are
* "notable." An item tagged with a "notable" tag is scored as
* notable.
*/
return Score.SCORE_NOTABLE;
case UNKNOWN:
case KNOWN:
default: // N/A
/*
* All other known status values have no special significance in
* a tag definition. However, if an item has been tagged at all
* by a user, the item is scored as likely notable.
*/
return Score.SCORE_LIKELY_NOTABLE;
}
}
/**
* Retrieves the maximum FileKnown status of any tag associated with the
* object id.
*
* @param objectId The object id of the item.
* @param transaction The case db transaction to perform this query.
*
* @return The maximum FileKnown status for this object or empty.
*
* @throws TskCoreException
*/
Optional<TskData.FileKnown> getMaxTagKnownStatus(long objectId, CaseDbTransaction transaction) throws TskCoreException {
// query content tags and blackboard artifact tags for highest
// known status associated with a tag associated with this object id
String queryString = "SELECT tag_names.knownStatus AS knownStatus\n"
+ " FROM (\n"
+ " SELECT ctags.tag_name_id AS tag_name_id FROM content_tags ctags WHERE ctags.obj_id = " + objectId + "\n"
+ " UNION\n"
+ " SELECT btags.tag_name_id AS tag_name_id FROM blackboard_artifact_tags btags \n"
+ " INNER JOIN blackboard_artifacts ba ON btags.artifact_id = ba.artifact_id\n"
+ " WHERE ba.artifact_obj_id = " + objectId + "\n"
+ " ) tag_name_ids\n"
+ " INNER JOIN tag_names ON tag_name_ids.tag_name_id = tag_names.tag_name_id\n"
+ " ORDER BY tag_names.knownStatus DESC\n"
+ " LIMIT 1";
try (Statement statement = transaction.getConnection().createStatement();
ResultSet resultSet = transaction.getConnection().executeQuery(statement, queryString);) {
if (resultSet.next()) {
return Optional.ofNullable(TskData.FileKnown.valueOf(resultSet.getByte("knownStatus")));
} else {
return Optional.empty();
}
} catch (SQLException ex) {
throw new TskCoreException("Error getting content tag FileKnown status for content with id: " + objectId);
}
}
/**
* Inserts a row into the content_tags table in the case database.
*
* @param content The content to tag.
* @param tagName The name to use for the tag.
* @param comment A comment to store with the tag.
* @param beginByteOffset Designates the beginning of a tagged section.
* @param endByteOffset Designates the end of a tagged section.
*
* @return A ContentTag data transfer object (DTO) for the new row.
*
* @throws TskCoreException
*/
public ContentTagChange addContentTag(Content content, TagName tagName, String comment, long beginByteOffset, long endByteOffset) throws TskCoreException {
List<ContentTag> removedTags = new ArrayList<>();
List<String> removedTagIds = new ArrayList<>();
Examiner currentExaminer = skCase.getCurrentExaminer();
CaseDbTransaction trans = skCase.beginTransaction();
CaseDbConnection connection = trans.getConnection();
try {
long tagSetId = tagName.getTagSetId();
if (tagSetId > 0) {
String selectQuery = String.format("SELECT * from content_tags JOIN tag_names ON tag_names.tag_name_id = content_tags.tag_name_id JOIN tsk_examiners on tsk_examiners.examiner_id = content_tags.examiner_id WHERE obj_id = %d AND tag_names.tag_set_id = %d", content.getId(), tagSetId);
try (Statement stmt = connection.createStatement(); ResultSet resultSet = stmt.executeQuery(selectQuery)) {
while (resultSet.next()) {
TagName removedTag = new TagName(
resultSet.getLong("tag_name_id"),
resultSet.getString("display_name"),
resultSet.getString("description"),
TagName.HTML_COLOR.getColorByName(resultSet.getString("color")),
TskData.FileKnown.valueOf(resultSet.getByte("knownStatus")),
tagSetId,
resultSet.getInt("rank")
);
ContentTag bat
= new ContentTag(resultSet.getLong("tag_id"),
content,
removedTag,
resultSet.getString("comment"),
resultSet.getLong("begin_byte_offset"),
resultSet.getLong("end_byte_offset"),
resultSet.getString("login_name"));
removedTagIds.add(Long.toString(bat.getId()));
removedTags.add(bat);
}
}
if (!removedTags.isEmpty()) {
String removeQuery = String.format("DELETE FROM content_tags WHERE tag_id IN (%s)", String.join(",", removedTagIds));
try (Statement stmt = connection.createStatement()) {
stmt.executeUpdate(removeQuery);
}
}
}
String queryTemplate = "INSERT INTO content_tags (obj_id, tag_name_id, comment, begin_byte_offset, end_byte_offset, examiner_id) VALUES (%d, %d, '%s', %d, %d, %d)";
ContentTag contentTag = null;
try (Statement stmt = connection.createStatement()) {
String query = String.format(queryTemplate,
content.getId(),
tagName.getId(),
comment,
beginByteOffset,
endByteOffset,
currentExaminer.getId());
if (skCase.getDatabaseType() == POSTGRESQL) {
stmt.executeUpdate(query, Statement.RETURN_GENERATED_KEYS);
} else {
stmt.executeUpdate(query);
}
try (ResultSet resultSet = stmt.getGeneratedKeys()) {
resultSet.next();
contentTag = new ContentTag(resultSet.getLong(1), //last_insert_rowid()
content, tagName, comment, beginByteOffset, endByteOffset, currentExaminer.getLoginName());
}
}
Long dataSourceId = content.getDataSource() != null ? content.getDataSource().getId() : null;
skCase.getScoringManager().updateAggregateScoreAfterAddition(
content.getId(), dataSourceId, getTagScore(tagName.getKnownStatus()), trans);
trans.commit();
return new ContentTagChange(contentTag, removedTags);
} catch (SQLException ex) {
trans.rollback();
throw new TskCoreException("Error adding row to content_tags table (obj_id = " + content.getId() + ", tag_name_id = " + tagName.getId() + ")", ex);
}
}
/**
* Inserts row into the tags_names table, or updates the existing row if the
* displayName already exists in the tag_names table in the case database.
*
* @param displayName The display name for the new tag name.
* @param description The description for the new tag name.
* @param color The HTML color to associate with the new tag name.
* @param knownStatus The TskData.FileKnown value to associate with the new
* tag name.
*
* @return A TagName data transfer object (DTO) for the new row.
*
* @throws TskCoreException
*/
public TagName addOrUpdateTagName(String displayName, String description, TagName.HTML_COLOR color, TskData.FileKnown knownStatus) throws TskCoreException {
String insertQuery = "INSERT INTO tag_names (display_name, description, color, knownStatus) VALUES (?, ?, ?, ?) ON CONFLICT (display_name) DO UPDATE SET description = ?, color = ?, knownStatus = ?";
boolean isUpdated = false;
skCase.acquireSingleUserCaseWriteLock();
try (CaseDbConnection connection = skCase.getConnection()) {
try (PreparedStatement statement = connection.getPreparedStatement("SELECT * FROM tag_names WHERE display_name = ?", Statement.NO_GENERATED_KEYS)) {
statement.setString(1, displayName);
try (ResultSet resultSet = statement.executeQuery()) {
isUpdated = resultSet.next();
}
}
try (PreparedStatement statement = connection.getPreparedStatement(insertQuery, Statement.RETURN_GENERATED_KEYS);) {
statement.clearParameters();
statement.setString(5, description);
statement.setString(6, color.getName());
statement.setByte(7, knownStatus.getFileKnownValue());
statement.setString(1, displayName);
statement.setString(2, description);
statement.setString(3, color.getName());
statement.setByte(4, knownStatus.getFileKnownValue());
statement.executeUpdate();
}
try (PreparedStatement statement = connection.getPreparedStatement("SELECT * FROM tag_names where display_name = ?", Statement.NO_GENERATED_KEYS)) {
statement.setString(1, displayName);
try (ResultSet resultSet = connection.executeQuery(statement)) {
resultSet.next();
TagName newTag = new TagName(resultSet.getLong("tag_name_id"), displayName, description, color, knownStatus, resultSet.getLong("tag_set_id"), resultSet.getInt("rank"));
if (!isUpdated) {
skCase.fireTSKEvent(new TagNamesAddedTskEvent(Collections.singletonList(newTag)));
} else {
skCase.fireTSKEvent(new TagNamesUpdatedTskEvent(Collections.singletonList(newTag)));
}
return newTag;
}
}
} catch (SQLException ex) {
throw new TskCoreException("Error adding row for " + displayName + " tag name to tag_names table", ex);
} finally {
skCase.releaseSingleUserCaseWriteLock();
}
}
/**
* Return the TagName object for the given id.
*
* @param id The TagName id.
*
* @return The TagName object for the given id.
*
* @throws TskCoreException
*/
public TagName getTagName(long id) throws TskCoreException {
String preparedQuery = "SELECT * FROM tag_names where tag_name_id = ?";
skCase.acquireSingleUserCaseReadLock();
try (CaseDbConnection connection = skCase.getConnection()) {
try (PreparedStatement statement = connection.getPreparedStatement(preparedQuery, Statement.NO_GENERATED_KEYS)) {
statement.clearParameters();
statement.setLong(1, id);
try (ResultSet resultSet = statement.executeQuery()) {
if (resultSet.next()) {
return new TagName(resultSet.getLong("tag_name_id"),
resultSet.getString("display_name"),
resultSet.getString("description"),
TagName.HTML_COLOR.getColorByName(resultSet.getString("color")),
TskData.FileKnown.valueOf(resultSet.getByte("knowStatus")),
resultSet.getLong("tag_set_id"),
resultSet.getInt("rank"));
}
}
}
} catch (SQLException ex) {
throw new TskCoreException("", ex);
} finally {
skCase.releaseSingleUserCaseWriteLock();
}
return null;
}
/**
* Determine if the given TagSet contains TagNames that are currently in
* use, ie there is an existing ContentTag or ArtifactTag that uses TagName.
*
* @param tagSet The Tagset to check.
*
* @return Return true if the TagSet is in use.
*
* @throws TskCoreException
*/
private boolean isTagSetInUse(TagSet tagSet) throws TskCoreException {
skCase.acquireSingleUserCaseReadLock();
try (CaseDbConnection connection = skCase.getConnection()) {
List<TagName> tagNameList = tagSet.getTagNames();
if (tagNameList != null && !tagNameList.isEmpty()) {
String statement = String.format("SELECT tag_id FROM content_tags WHERE tag_name_id IN (SELECT tag_name_id FROM tag_names WHERE tag_set_id = %d)", tagSet.getId());
try (Statement stmt = connection.createStatement(); ResultSet resultSet = stmt.executeQuery(statement)) {
if (resultSet.next()) {
return true;
}
} catch (SQLException ex) {
throw new TskCoreException(String.format("Failed to determine if TagSet is in use (%s)", tagSet.getId()), ex);
}
statement = String.format("SELECT tag_id FROM blackboard_artifact_tags WHERE tag_name_id IN (SELECT tag_name_id FROM tag_names WHERE tag_set_id = %d)", tagSet.getId());
try (Statement stmt = connection.createStatement(); ResultSet resultSet = stmt.executeQuery(statement)) {
if (resultSet.next()) {
return true;
}
} catch (SQLException ex) {
throw new TskCoreException(String.format("Failed to determine if TagSet is in use (%s)", tagSet.getId()), ex);
}
}
} finally {
skCase.releaseSingleUserCaseReadLock();
}
return false;
}
/**
* Returns a list of all of the TagNames that are apart of the given TagSet.
*
* @param tagSetId ID of a TagSet.
*
* @return List of TagNames for the TagSet or empty list if none were found.
*
* @throws TskCoreException
*/
private List<TagName> getTagNamesByTagSetID(int tagSetId) throws TskCoreException {
if (tagSetId <= 0) {
throw new IllegalArgumentException("Invalid tagSetID passed to getTagNameByTagSetID");
}
List<TagName> tagNameList = new ArrayList<>();
skCase.acquireSingleUserCaseReadLock();
String query = String.format("SELECT * FROM tag_names WHERE tag_set_id = %d", tagSetId);
try (CaseDbConnection connection = skCase.getConnection(); Statement stmt = connection.createStatement(); ResultSet resultSet = stmt.executeQuery(query)) {
while (resultSet.next()) {
tagNameList.add(new TagName(resultSet.getLong("tag_name_id"),
resultSet.getString("display_name"),
resultSet.getString("description"),
TagName.HTML_COLOR.getColorByName(resultSet.getString("color")),
TskData.FileKnown.valueOf(resultSet.getByte("knownStatus")),
tagSetId,
resultSet.getInt("rank")));
}
} catch (SQLException ex) {
throw new TskCoreException(String.format("Error getting tag names for tag set (%d)", tagSetId), ex);
} finally {
skCase.releaseSingleUserCaseReadLock();
}
return tagNameList;
}
/**
* Object to store the tag change from a call to addArtifactTag.
*/
public static class BlackboardArtifactTagChange {
private final BlackboardArtifactTag addedTag;
private final List<BlackboardArtifactTag> removedTagList;
/**
* Construct a new artifact tag change object.
*
* @param added Newly created artifact tag.
* @param removed List of removed tags.
*/
BlackboardArtifactTagChange(BlackboardArtifactTag added, List<BlackboardArtifactTag> removed) {
this.addedTag = added;
this.removedTagList = removed;
}
/**
* Returns the newly created tag.
*
* @return Add artifact tag.
*/
public BlackboardArtifactTag getAddedTag() {
return addedTag;
}
/**
* Returns a list of the artifacts tags that were removed.
*
* @return
*/
public List<BlackboardArtifactTag> getRemovedTags() {
return Collections.unmodifiableList(removedTagList);
}
}
/**
* Object to store the tag change from a call to addContentTag.
*/
public static class ContentTagChange {
private final ContentTag addedTag;
private final List<ContentTag> removedTagList;
/**
* Construct a new content tag change object.
*
* @param added Newly created artifact tag.
* @param removed List of removed tags.
*/
ContentTagChange(ContentTag added, List<ContentTag> removed) {
this.addedTag = added;
this.removedTagList = removed;
}
/**
* Returns the newly created tag.
*
* @return Add artifact tag.
*/
public ContentTag getAddedTag() {
return addedTag;
}
/**
* Returns a list of the artifacts tags that were removed.
*
* @return
*/
public List<ContentTag> getRemovedTags() {
return Collections.unmodifiableList(removedTagList);
}
}
}
/*
* Sleuth Kit Data Model
*
* Copyright 2017 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.datamodel;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.TimeZone;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Time related utility methods
*
*/
public class TimeUtilities {
private static final Logger LOGGER = Logger.getLogger(TimeUtilities.class.getName());
private static final SimpleDateFormat DATE_FORMATTER = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss z");
private TimeUtilities(){
}
/**
* Return the epoch into string in yyyy-MM-dd HH:mm:ss format
*
* @param epoch time in seconds
*
* @return formatted date time string as "yyyy-MM-dd HH:mm:ss"
*/
public static String epochToTime(long epoch) {
String time = "0000-00-00 00:00:00";
if (epoch != 0) {
time = new java.text.SimpleDateFormat("yyyy-MM-dd HH:mm:ss z").format(new java.util.Date(epoch * 1000));
}
return time;
}
/**
* Return the epoch into string in yyyy-MM-dd HH:mm:ss format,
* in the given timezone
*
* @param epoch time in seconds
* @param tzone time zone
*
* @return formatted date time string as "yyyy-MM-dd HH:mm:ss"
*/
public static String epochToTime(long epoch, TimeZone tzone) {
String time = "0000-00-00 00:00:00";
if (epoch != 0) {
synchronized (DATE_FORMATTER) {
DATE_FORMATTER.setTimeZone(tzone);
time = DATE_FORMATTER.format(new java.util.Date(epoch * 1000));
}
}
return time;
}
/**
* Return the epoch into string in ISO8601 format, in the given timezone.
*
* @param epoch time in seconds
* @param tzone time zone
*
* @return formatted date time string as
*/
public static String epochToTimeISO8601(long epoch, TimeZone tzone) {
SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'");
formatter.setTimeZone(tzone);
return formatter.format(new Date(epoch));
}
/**
* Convert from ISO 8601 formatted date time string to epoch time in seconds
*
* @param time formatted date time string as "yyyy-MM-dd HH:mm:ss"
*
* @return epoch time in seconds
*/
public static long timeToEpoch(String time) {
long epoch = 0;
try {
epoch = new java.text.SimpleDateFormat("yyyy-MM-dd HH:mm:ss").parse(time).getTime() / 1000;
} catch (Exception e) {
LOGGER.log(Level.WARNING, "Failed to parse time string", e); //NON-NLS
}
return epoch;
}
}