diff --git a/exercise-session-1.ipynb b/exercise-session-1.ipynb index 36b938ab0c9d3dba9a1982b0c4b34ef7c3093fd5..67e896dbaeaf7af4da96752cc1d7cc35265c2a3b 100644 --- a/exercise-session-1.ipynb +++ b/exercise-session-1.ipynb @@ -25,10 +25,7 @@ "import seaborn as sns\n", "sns.set()\n", "rng = np.random.default_rng(13)\n", - "import jax\n", - "import jax.numpy as jnp\n", "import tqdm\n", - "import scipy.stats as stats\n", "from src.sim import generate_data\n", "from src.trajectories import get_ex1_trajectories\n", "ex1_trajectories = get_ex1_trajectories()\n", @@ -37,7 +34,8 @@ "import src.associators as associators\n", "import src.trackers as trackers\n", "import src.plotters as plotters\n", - "import src.models as models" + "import src.models as models\n", + "import src.logic as logic" ] }, { @@ -112,7 +110,7 @@ "metadata": {}, "outputs": [], "source": [ - "motion_model = models.cv_model(Q=np.identity(2), D=2, T=1)\n", + "motion_model = models.cv_model(Q=np.identity(2)*10, D=2, T=1)\n", "# Setup filter\n", "filt = filters.EKF(motion_model, sensor_model)\n", "# Setup tracker\n", @@ -179,7 +177,7 @@ "source": [ "### Task 2.3 - IMM for Maneuvering Targets\n", "\n", - "A two-model IMM was implemented with two CV models with process noise tuning $\\mathbf{Q}_1=\\mathbf{I}$ and $\\mathbf{Q}_2=100\\mathbf{I}$, respectively. Mahalanobis gating was used with $\\gamma=15$. Further, a nearest neighbour association step was used for associating the gated measurements. \n", + "A two-model IMM was implemented with two CV models with process noise tuning $\\mathbf{Q}_1=\\mathbf{I}$ and $\\mathbf{Q}_2=100\\mathbf{I}$, respectively. Mahalanobis gating was used with $\\gamma=12.43$. Further, a nearest neighbour association step was used for associating the gated measurements. \n", "The transition probability matrix was set to\n", "$$\\Pi = \\begin{bmatrix}p & 1-p\\\\1-p & p\\end{bmatrix},$$\n", "with $p=0.9$, which yielded good performance.\n", @@ -196,11 +194,11 @@ "metadata": {}, "outputs": [], "source": [ - "motion_model = models.cv_model(Q=np.identity(2), D=2, T=1) # Use Q=I\n", + "motion_model = models.cv_model(Q=1*np.identity(2), D=2, T=1) # Use Q=I\n", "motion_model_two = models.cv_model(Q=100*np.identity(2), D=2, T=1) # Use Q=100I\n", "filtone = filters.EKF(motion_model, sensor_model)\n", "filttwo = filters.EKF(motion_model_two, sensor_model)\n", - "p = 0.9\n", + "p = 0.95\n", "trans_prob = np.array([[p, 1-p], [1-p, p]])\n", "imm = filters.IMM([filtone, filttwo], sensor_model, trans_prob)\n", "gater.gamma = 12.43 # Corresponds to alpha=0.998. Tuned for performance." @@ -466,7 +464,6 @@ "metadata": {}, "outputs": [], "source": [ - "import src.logic as logic\n", "F = np.array([[1, 1], [0, 1]])\n", "Q1 = 0.001*np.array([[1/4, 1/2], [1/2, 1]])\n", "R = np.array([[0.01]])\n", diff --git a/exercise-session-2.ipynb b/exercise-session-2.ipynb index 43c03e3e4aa79503680047325fffa87812375781..130d095b35a5e3fc39e84f97aab8e9e84394592c 100644 --- a/exercise-session-2.ipynb +++ b/exercise-session-2.ipynb @@ -67,7 +67,7 @@ "The filter was chosen as an EKF for simplicity (and it seemed to work fine).\n", "\n", "##### **Initialization**\n", - "The tracks were initialized at the measurement (converted to the positional domain) with a $0$ velocity. The initial uncertainty was set to $\\mathbf{P}_0=\\mathsc{diag}[10, 10, 100, 100]$ to account for the unknown initial velocity. The track score is initially set to $L_t=0$." + "The tracks were initialized at the measurement (converted to the positional domain) with a $0$ velocity. The initial uncertainty was set to $\\mathbf{P}_0=\\mathrm{diag}[10, 10, 100, 100]$ to account for the unknown initial velocity. The track score is initially set to $L_t=0$." ] }, { diff --git a/exercise-session-3.ipynb b/exercise-session-3.ipynb index 21525f961a3044cb55014a99b0b78468c4b7956d..e299ebc02c2c046e23140a22510e982138d6ec25 100644 --- a/exercise-session-3.ipynb +++ b/exercise-session-3.ipynb @@ -13,7 +13,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "id": "3e6fa80c-07ba-4112-a0d3-2b3aec744e1d", "metadata": {}, "outputs": [], @@ -23,9 +23,6 @@ "import seaborn as sns\n", "sns.set()\n", "rng = np.random.default_rng(13)\n", - "import tqdm.notebook as tqdm\n", - "import copy\n", - "import scipy.stats as stats\n", "from src.sim import generate_data\n", "from src.trajectories import get_ex2_trajectories\n", "import src.gaters as gaters\n", @@ -36,8 +33,19 @@ "import src.models as models\n", "import src.logic as logic\n", "import src.utility as util\n", - "from src.utility import murty\n", - "# from src.utility import match_tracks_to_ground_truth, recreate_trajectories, murty, load_result, save_resu\n", + "\n", + "# For convenience later on (loads previous tracker results from a file to avoid running the GNN/JPDA again)\n", + "def get_tracker_result(filename, tracker):\n", + " try:\n", + " result = util.load_result(filename)\n", + " except:\n", + " tracks, confirmed_tracks = tracker.evaluate(Y)\n", + " matches = util.match_tracks_to_ground_truth(confirmed_tracks, filtered_trajs)\n", + " result = dict(matches=matches, \n", + " tracks=tracks, \n", + " confirmed_tracks=confirmed_tracks,\n", + " Y=Y)\n", + " return result\n", "# Get the necessary trajectories\n", "trajectories = get_ex2_trajectories()\n", "trajs = ['T1', 'T3', 'T5', 'T6'] # Select the trajectories to use\n", @@ -51,7 +59,7 @@ "source": [ "### Task 3.1 - HOMHT\n", "\n", - "A HOMHT tracker was implemented, see src.trackers. The model setup is as follows\n", + "An HOMHT tracker was implemented, see src.trackers. The model setup is as follows\n", "\n", "##### **Sensor Model**\n", "The sensor model is the standard distance and bearing radar with $\\mathbf{R}=\\mathrm{diag}[10, 0.001]^2$. The probability of detection is set to $P_D=0.9$.\n", @@ -72,236 +80,20 @@ "The filter was chosen as an EKF for simplicity (and it seemed to work fine).\n", "\n", "##### **Initialization**\n", - "The tracks were initialized at the measurement (converted to the positional domain) with a $0$ velocity. The initial uncertainty was set to $\\mathbf{P}_0=\\mathsc{diag}[10, 10, 100, 100]$ to account for the unknown initial velocity. The track score is initially set to $L_t=0$." - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "bb81be9c-d47c-4dc2-8514-fa7727d67b4e", - "metadata": {}, - "outputs": [], - "source": [ - "class MHT():\n", - " def __init__(self, logic, logic_params, init_track, filt, gater, clutter_model, pthresh=0.95, Nh=30):\n", - " \"\"\"An implementation of a Hypothesis Oriented Multiple Hypothesis Tracker.\n", - "\n", - " Parameters\n", - " ----------\n", - " logic : logic\n", - " See src.logic. Some sort of track logic.\n", - " logic_params : dict\n", - " Contains parameters to the track logic.\n", - " init_track : callable\n", - " A function that initiates a track. Should take a measurement, the\n", - " time, an id and the filter to use for the track as input.\n", - " filt : filter\n", - " See src.filter. Some sort of filter to use for the tracks.\n", - " gater : gater\n", - " See src.gater. A gating function.\n", - " clutter_model : dict\n", - " A dict containing the clutter model.\n", - " pthresh : float [0, 1\n", - " Threshold for the amount of probability \"mass\" to keep each timestep. Prunes unlikely hypothesis until this threshold is reached\n", - " Nh : int\n", - " Maximum allowed number of hypothesis after each time step\n", - "\n", - " \"\"\"\n", - " self.logic = logic\n", - " self.logic_params = logic_params\n", - " self.init_track = init_track\n", - " self.filt = filt\n", - " self.gater = gater\n", - " self.clutter_model = clutter_model\n", - " self.pthresh = pthresh\n", - " self.Nh = Nh\n", - " \n", - " def get_association_cost(self, meas, tracks, validation_matrix):\n", - " ny = meas.shape[1]\n", - " Nc = len(tracks) # Number of tracks to associate\n", - " association_matrix = -np.inf*np.ones((ny, Nc+ny))\n", - " likelihood_matrix = np.zeros((ny, Nc+ny))\n", - " # Entry for false alarms\n", - " np.fill_diagonal(association_matrix[:, Nc:Nc+ny], np.log(self.logic_params['Bfa']))\n", - " np.fill_diagonal(likelihood_matrix[:, Nc:Nc+ny], (1-self.logic_params['PD']*self.logic_params['PG'])*self.logic_params['Bfa'])\n", - "\n", - " if tracks:\n", - " # All of the tracks are assumed to use the same sensor model!\n", - " x = np.vstack([track['x'][-1] for track in tracks]).T\n", - " yhat_t = tracks[0]['filt'].sensor_model['h'](x) # Returns a (ny x nx) matrix\n", - " H_t = tracks[0]['filt'].sensor_model['dhdx'](x) # Returns a (ny x nC x nx x nC) tensor\n", - " for ti, track in enumerate(tracks): # Iterate over confirmed tracks\n", - " # Entry for validated tracks\n", - " val_meas = meas[:, validation_matrix[:, ti]] # Get the validated measurements for this track\n", - " yhat = yhat_t[:, ti]\n", - " H = H_t[:, ti, :, ti]\n", - " py = stats.multivariate_normal.pdf(val_meas.squeeze().T, mean=yhat.flatten(), cov=H@track['P'][-1]@H.T+track['filt'].sensor_model['R'])\n", - " association_matrix[validation_matrix[:, ti], ti] = np.log(track['filt'].sensor_model['PD']*py/(1-track['filt'].sensor_model['PD'])) # PG assumed = 1\n", - " likelihood_matrix[validation_matrix[:, ti], ti] = track['filt'].sensor_model['PD']*py\n", - " return association_matrix, likelihood_matrix\n", - "\n", - " def get_validation_matrix(self, meas, tracks):\n", - " ny = meas.shape[1]\n", - " Nc = len(tracks) # Number of tracks to associate\n", - " validation_matrix = np.zeros((ny, Nc), dtype=bool)\n", - " for ti, track in enumerate(tracks): # Iterate over confirmed tracks\n", - " validation_matrix[:, ti] = self.gater.gate(track['x'][-1], track['P'][-1], meas)\n", - " return validation_matrix\n", - " \n", - " def _update_track(self, meas, track):\n", - " \"\"\"Handles the update of a certain track with the given measurement(s).\n", - "\n", - " Modifies the track in-place!\n", - "\n", - " Parameters\n", - " ----------\n", - " meas : numpy.ndarray\n", - " Contains measurement(s) to update a specific track with. ny by N,\n", - " where N is the number of measurements to update the track with.\n", - " track : dict\n", - " A dict containing everything relevant to the track.\n", - "\n", - " \"\"\"\n", - " if meas.size == 0:\n", - " track = self.logic(np.array([]), track['filt'], track, self.logic_params) # If no meas associated, still update logic of track\n", - " return\n", - " # Calculate prediction error of each measurement\n", - " yhat = track['filt'].sensor_model['h'](track['x'][-1])\n", - " \n", - " eps = meas-yhat\n", - " track = self.logic(meas, track['filt'], track, self.logic_params)\n", - " \n", - " # Update\n", - " track['x'][-1], track['P'][-1] = track['filt'].update(track['x'][-1], track['P'][-1], eps)\n", - "\n", - " def evaluate(self, Y):\n", - " \"\"\" Evaluates the detections in Y.\n", - "\n", - " Parameters\n", - " ----------\n", - " Y : list\n", - " List of detections at time k=0 to K where K is the length of Y.\n", - " Each entry of Y is ny by N_k where N_k is time-varying as the number\n", - " of detections vary.\n", - "\n", - " Returns\n", - " -------\n", - " list, list\n", - " First list contains all initiated tracks, both tentative, deleted\n", - " and confirmed. The second list contains only the confirmed list,\n", - " even if they have died. Hence, the lists contain duplicates (but\n", - " point to the same object!).\n", - "\n", - " \"\"\"\n", - " rng = np.random.default_rng()\n", - " hypothesis = dict() # This will contain hypothesis over time\n", - " init_hypothesis = lambda probability: dict(tracks=[], probability=probability) # For more readable code in the end\n", - " hypothesis[-1] = [init_hypothesis(1)]\n", - "\n", - " ids = 0\n", - " for k, meas_k in tqdm.tqdm(enumerate(Y), desc=\"HOMHT evaluating detections: \", total=len(Y)):\n", - " hypothesis[k] = []\n", - " # For each hypothesis from the last time step\n", - " for hyp in hypothesis[k-1]:\n", - " # Propagate each track to this time step\n", - " for track in hyp['tracks']:\n", - " if track['stage'] != 'deleted':\n", - " x, P = track['filt'].propagate(track['x'][-1], track['P'][-1])\n", - " track['x'].append(x)\n", - " track['P'].append(P)\n", - " track['t'].append(k)\n", - "\n", - " unused_meas = np.ones((meas_k.shape[1],), dtype=bool)\n", - " # For all \"live\" tracks in hypothesis\n", - " live_tracks = [track for track in hyp['tracks'] if track['stage'] in ['confirmed', 'tentative']]\n", - " validation_matrix = self.get_validation_matrix(meas_k[:, unused_meas],\n", - " live_tracks)\n", - " association_matrix, likelihood_matrix = self.get_association_cost(meas_k, \n", - " live_tracks, \n", - " validation_matrix)\n", - " ny = meas_k.shape[1]\n", - " nt = len(live_tracks)\n", - " # The Murty alg. returns the cost of the association and the indices of the association\n", - " for (cost, associations) in murty(-association_matrix):\n", - " unused_meas = np.ones((ny,), dtype=bool)\n", - " \n", - " # Initiate a new hypothesis and copy over all the tracks in the current hypothesis.\n", - " hypothesis[k].append(init_hypothesis(hyp['probability']))\n", - " for track in hyp['tracks']:\n", - " if track['stage'] != 'deleted':\n", - " tmp_track = init_track(np.ones((2, 1)), k, track['identity'], track['filt'])\n", - " tmp_track['x'][-1] = track['x'][-1]\n", - " tmp_track['P'][-1] = track['P'][-1]\n", - " tmp_track['t'] = track['t']\n", - " tmp_track['associations'] = copy.copy(track['associations'])\n", - " tmp_track['stage'] = track['stage']\n", - " tmp_track['Lt'] = track['Lt']\n", - " hypothesis[k][-1]['tracks'].append(tmp_track)\n", - "# hypothesis[k].append(copy.deepcopy(hyp)) # the easy but INCREDIBLY slow way to do it\n", - "\n", - " # Update the tracks with associated measurements\n", - " for j, association in enumerate(associations):\n", - " if association < nt: # i.e. associated to a track\n", - " self._update_track(meas_k[:, j], hypothesis[k][-1]['tracks'][association])\n", - " hypothesis[k][-1]['tracks'][association]['associations'].append(k)\n", - " unused_meas[j] = 0\n", - " # Update the proability of the hypothesis for this particular association\n", - " hypothesis[k][-1]['probability'] *= likelihood_matrix[j, association]\n", - " # Update tracks without an association in this particular hypothesis\n", - " for j in range(nt):\n", - " if j not in associations:\n", - " self._update_track(np.array([]), hypothesis[k][-1]['tracks'][j])\n", - " \n", - " # For any still unused measurements, possibly initiate a new track\n", - " while unused_meas.any():\n", - " # Select an unused measurement at random and initiate a track\n", - " ind = rng.choice(np.arange(unused_meas.size), p=unused_meas/unused_meas.sum())\n", - " track = init_track(meas_k[:, ind], k, ids, filt) # Initialize track\n", - " hypothesis[k][-1]['tracks'].append(track)\n", - " unused_meas[ind] = 0 # Remove measurement from association hypothesis\n", - " validation_matrix = self.get_validation_matrix(meas_k[:, unused_meas],\n", - " [hypothesis[k][-1]['tracks'][-1]])\n", - " ids += 1\n", - " # Remove any gated measurements from further consideration\n", - " if validation_matrix.any():\n", - " unused_meas[(meas_k[:, unused_meas][:, validation_matrix.flatten()]==meas_k).all(axis=0)] = 0 \n", - " \n", - " # Normalize the hypothesis probabilities\n", - " total_score = np.sum([hyp['probability'] for hyp in hypothesis[k]])\n", - " for hyp in hypothesis[k]:\n", - " hyp['probability'] /= total_score\n", - " # Only keep the hypothesis which amount to pthresh probability \"mass\"\n", - " hypothesis[k].sort(key=lambda x: x['probability'], reverse=True)\n", - " prob = [hyp['probability'] for hyp in hypothesis[k]]\n", - " ind = np.argmax(np.cumsum(prob)>self.pthresh)+1 # Find the index to keep\n", - " hypothesis[k] = hypothesis[k][:ind]\n", - " # Only allow a certain number of hypothesis\n", - " if len(hypothesis[k]) > self.Nh:\n", - " hypothesis[k] = hypothesis[k][:self.Nh]\n", - " # Re-normalize the hypothesis probabilities\n", - " total_score = np.sum([hyp['probability'] for hyp in hypothesis[k]])\n", - " for hyp in hypothesis[k]:\n", - " hyp['probability'] /= total_score\n", - " return hypothesis\n" + "The tracks were initialized at the measurement (converted to the positional domain) with a $0$ velocity. The initial uncertainty was set to $\\mathbf{P}_0=\\mathrm{diag}[10, 10, 100, 100]$ to account for the unknown initial velocity. The track score is initially set to $L_t=0$.\n", + "\n", + "##### **Tracker**\n", + "The probability threshold was set to 0.9 and the maximum number of hypothesis to 5. " ] }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "id": "ad8aeebb", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING:absl:No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)\n" - ] - } - ], + "outputs": [], "source": [ "R = np.diag([10, 0.001])**2\n", - "# R = np.diag([0, 0])\n", "PD = 0.9\n", "lam = 2\n", "volume = dict(xmin=0, xmax=2500, ymin=0, ymax=2000)\n", @@ -339,42 +131,15 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "id": "7bd04719-2e24-4f2f-bed4-9930fa8993d5", "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "f1caa747a1c64fa9be8913cbbb8de9e2", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "HOMHT evaluating detections: 0%| | 0/30 [00:00<?, ?it/s]" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ - "# %%prun -s cumulative -q -l 30 -T prun1\n", "pthresh = 0.9\n", "Nh = 5\n", - "mht = MHT(logic.score_logic, logic_params, init_track, filt, gater, clutter_model, pthresh, Nh)\n", - "hypothesis = mht.evaluate(Y[:30])" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "3652a059-9eda-402b-ae08-8d78fe92caf2", - "metadata": {}, - "outputs": [], - "source": [ - "marg_tracks, marg_confirmed_tracks = recreate_trajectories(hypothesis, True)\n", - "map_tracks, map_confirmed_tracks = recreate_trajectories(hypothesis, False)" + "mht = trackers.MHT(logic.score_logic, logic_params, init_track, filt, gater, clutter_model, pthresh, Nh)\n", + "hypothesis = mht.evaluate(Y)" ] }, { @@ -382,28 +147,18 @@ "id": "30f60524-b6cb-48c6-bbee-c69d8f72bdf2", "metadata": {}, "source": [ - "#### Apply GNN and JPDA (or load previous result)" + "#### Apply GNN and JPDA \n", + "\n", + "Tries to load results from exercise-session-2 if that has been run previously. Otherwise runs the GNN and JPDA on the simulated data." ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "id": "31e8ca55-8bb0-4662-977a-857d8cdf4ea2", "metadata": {}, "outputs": [], "source": [ - "def get_tracker_result(filename, tracker):\n", - " try:\n", - " result = util.load_result(filename)\n", - " except:\n", - " tracks, confirmed_tracks = tracker.evaluate(Y)\n", - " matches = util.match_tracks_to_ground_truth(confirmed_tracks, filtered_trajs)\n", - " result = dict(matches=matches, \n", - " tracks=tracks, \n", - " confirmed_tracks=confirmed_tracks,\n", - " Y=Y)\n", - " return result\n", - "\n", "jpda = trackers.JPDA(logic.score_logic, logic_params, init_track, filt, gater, clutter_model)\n", "gnn = trackers.GNN(logic.score_logic, logic_params, init_track, filt, gater, clutter_model)\n", "jpda_result = get_tracker_result('jpda_result_sim', jpda)\n", @@ -422,16 +177,19 @@ "- The initial point of each track is matched to the closest point of each true trajectory.\n", "- The RMSE to each track is then calculated and the lowest RMSE is chosen as the true trajectory for this track.\n", "\n", - "Limitations: several tracks can match a specific true trajectory. (Hopefully not an issue, at least not here)." + "Limitations: several tracks can match a specific true trajectory. (Hopefully not an issue, at least not here).\n", + "\n", + "First recreates target trajectories from the MHT output. It can recreate trajectories marginalized over all hypotheses or the MAP estimate." ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "id": "0c29b3fb-55ac-4c19-9acb-b493e536bd8d", "metadata": {}, "outputs": [], "source": [ + "map_tracks, map_confirmed_tracks = util.recreate_trajectories(hypothesis, marginalize=False)\n", "mht_matches = util.match_tracks_to_ground_truth(map_confirmed_tracks, filtered_trajs)\n", "mht_result = dict(matches=mht_matches,\n", " tracks=map_tracks,\n", @@ -461,11 +219,14 @@ "outputs": [], "source": [ "gnnfig = plotters.plot_result_ex2_2(gnn_result, filtered_trajs)\n", + "gnnfig.set_size_inches(16, 8)\n", "plt.suptitle('GNN', fontsize=20)\n", "jpdafig = plotters.plot_result_ex2_2(jpda_result, filtered_trajs)\n", + "jpdafig.set_size_inches(16, 8)\n", "plt.suptitle('JPDA', fontsize=20)\n", - "# mhtfig = plotters.plot_result_ex2_2(mht_result, filtered_trajs)\n", - "# plt.suptitle('MHT', fontsize=20)\n", + "mhtfig = plotters.plot_result_ex2_2(mht_result, filtered_trajs)\n", + "mhtfig.set_size_inches(16, 8)\n", + "plt.suptitle('MHT', fontsize=20)\n", "plt.show()" ] }, @@ -475,7 +236,7 @@ "metadata": {}, "source": [ "#### Comments\n", - "Both the GNN and JPDA capture all four tracks good. The GNN has a slightly lower RMSE overall which seems reasonable given the \"low\" clutter rate and only a few track cross-overs." + "The MHT performs more or less equally well as the GNN and JPDA. There is not much of a difference in this particular scenario. However, the computational time is quite a lot longer for the MHT, so there is no reason to use the MHT in this particular scenario. The MHT also has one false track at roughly time 60, but quickly discards it." ] }, { @@ -485,7 +246,7 @@ "source": [ "### Task 2.4 - Mysterious Data\n", "\n", - "A GNN and a JPDA tracker were applied to the mysterious data set. The design choices are listed below.\n", + "An MHT was applied to the mysterious data set. The design choices are listed below (identical to the JPDA and GNN from exercise-session-2).\n", "\n", "##### **Sensor Model**\n", "The sensor model is the standard distance and bearing radar as before with the same noise parameters. The probability of detection was set to $P_D=0.9$.\n", @@ -506,7 +267,10 @@ "The filter was chosen as an EKF for simplicity (and it seemed to work fine).\n", "\n", "##### **Initialization**\n", - "The tracks were initialized at the measurement (converted to the positional domain) with a $0$ velocity. The initial uncertainty was set to $\\mathbf{P}_0=\\mathrm{diag}[100, 100, 1000, 1000]$ to account for the unknown initial velocity. The track score is initially set to $L_t=0$.\n" + "The tracks were initialized at the measurement (converted to the positional domain) with a $0$ velocity. The initial uncertainty was set to $\\mathbf{P}_0=\\mathrm{diag}[100, 100, 1000, 1000]$ to account for the unknown initial velocity. The track score is initially set to $L_t=0$.\n", + "\n", + "##### **Tracker**\n", + "The probability threshold was set to 0.8 and the maximum number of hypothesis to 5. At any given time, there are no more than 4 hypothesis active, which yielded decent results." ] }, { @@ -521,7 +285,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "id": "e8add90a-bb80-446d-9380-74d26ee43661", "metadata": {}, "outputs": [], @@ -543,7 +307,10 @@ "logic_params['lam'] = 0.95\n", "logic_params['Bnt'] = Bfa\n", "\n", - "filt = filters.EKF(motion_model, sensor_model)" + "filt = filters.EKF(motion_model, sensor_model)\n", + "# Tracker specific\n", + "pthresh = 0.8\n", + "Nh = 5" ] }, { @@ -558,48 +325,39 @@ }, { "cell_type": "code", - "execution_count": 31, + "execution_count": null, "id": "d955d2bc-9ee6-4ca2-bd0f-4ddf9c88ff68", "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "ba73f29d7071420e8d7450fb930026f7", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "HOMHT evaluating detections: 0%| | 0/190 [00:00<?, ?it/s]" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ - "pthresh = 0.8\n", - "Nh = 5\n", - "mht = MHT(logic.score_logic, logic_params, init_track, filt, gater, clutter_model, pthresh, Nh)\n", - "hypothesis_24 = mht.evaluate(dat['Y'][:-2])" + "mht = trackers.MHT(logic.score_logic, logic_params, init_track, filt, gater, clutter_model, pthresh, Nh)\n", + "hypothesis_24 = mht.evaluate(dat['Y'])" + ] + }, + { + "cell_type": "markdown", + "id": "340baafe-51f8-4a34-87ba-a1abb25bd014", + "metadata": {}, + "source": [ + "#### Trajectory recreation and GNN/JPDA\n", + "Recreates the MHT trajectories from the hypothesis and also applies the JPDA and GNN to the same problem if they have not been applied before." ] }, { "cell_type": "code", - "execution_count": 32, - "id": "1f235f46-3b7b-4a43-a4a5-cdf2cb4a987f", + "execution_count": null, + "id": "625c7efa-6412-41dd-92dc-76b12d29d8a7", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 5, 5, 4, 4, 4, 5, 5, 4, 4, 4, 4, 4, 4, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4]\n" - ] - } - ], + "outputs": [], "source": [ - "print([len(hyp) for hyp in hypothesis_24.values()])#hyp['probability'] for hyp in hypothesis_24[10]]" + "mht_tracks, mht_confirmed_tracks = util.recreate_trajectories(hypothesis_24, False)\n", + "mht_result = dict(Y=dat['Y'],\n", + " tracks=list(mht_tracks),\n", + " confirmed_tracks=list(mht_confirmed_tracks))\n", + "jpda = trackers.JPDA(logic.score_logic, logic_params, init_track, filt, gater, clutter_model)\n", + "gnn = trackers.GNN(logic.score_logic, logic_params, init_track, filt, gater, clutter_model)\n", + "jpda_result = get_tracker_result('jpda_result_myst', jpda)\n", + "gnn_result = get_tracker_result('gnn_result_myst', gnn)" ] }, { @@ -617,25 +375,19 @@ { "cell_type": "code", "execution_count": null, - "id": "059a5880-31fe-40ba-acf7-755e1b002ec5", + "id": "49a66f85-e94d-4ef7-a763-3d0846e0b293", "metadata": {}, "outputs": [], "source": [ - "mht_tracks, mht_confirmed_tracks = util.recreate_trajectories(hypothesis_24, False)\n", - "mht_result = dict(Y=dat['Y'],\n", - " tracks=list(mht_tracks),\n", - " confirmed_tracks=list(mht_confirmed_tracks))\n", - "jpda = trackers.JPDA(logic.score_logic, logic_params, init_track, filt, gater, clutter_model)\n", - "gnn = trackers.GNN(logic.score_logic, logic_params, init_track, filt, gater, clutter_model)\n", - "jpda_result = get_tracker_result('jpda_result_myst', jpda)\n", - "gnn_result = get_tracker_result('gnn_result_myst', gnn)\n", - "\n", - "plotters.plot_result_ex2_24(gnn_result)\n", + "gnnfig = plotters.plot_result_ex2_24(gnn_result)\n", "plt.suptitle('GNN', fontsize=20)\n", - "plotters.plot_result_ex2_24(jpda_result)\n", + "gnnfig.set_size_inches(16, 8)\n", + "jpdafig = plotters.plot_result_ex2_24(jpda_result)\n", "plt.suptitle('JPDA', fontsize=20)\n", - "plotters.plot_result_ex2_24(mht_result)\n", + "jpdafig.set_size_inches(16, 8)\n", + "mhtfig = plotters.plot_result_ex2_24(mht_result)\n", "plt.suptitle('MHT', fontsize=20)\n", + "mhtfig.set_size_inches(16, 8)\n", "plt.show()" ] }, @@ -645,7 +397,7 @@ "metadata": {}, "source": [ "#### Comments\n", - "Both the GNN and JPDA manage to keep both tracks the entire time. The GNN results in two \"U\"-shaped tracks whereas the JPDA results in \"S\"-shaped tracks. Without more information, it is impossible to say which is correct. However, the JPDA tracker results in a smoother trajectory, probably because of the soft measurement assignments." + "The MHT seems almost like a mix of the GNN and JPDA in this scenario. The tracks are more or less identical to the JPDA, but the trajectories themselves (i.e. the actual positional estimates) look very similar to the GNN. This is not surprising given that the associations are \"hard\" rather than soft." ] } ],