\begin{comment} python -m ibeis.scripts.gen_cand_expts --exec-parse_latex_comments_for_commmands --fname figdef3.tex \end{comment} \begin{comment} ibeis ChipMatch.show_ranked_matches --qaid 79 --db PZ_MTEST --show --heatmask=True ibeis ChipMatch.show_ranked_matches --qaid 79 --db PZ_MTEST \ --clip-top=4 --colorbar_=False --show_aid=False --score_precision=2 --stack_larger=True --noshow_truth --draw_lbl=False --stack_side=True --show_timedelta=False \ --dpath ~/latex/crall-thesis-2017 --save figures3/rankedmatches.jpg \ --diskshow --saveparts --dpi=300 --figsize=14,14 \end{comment} \newcommand{\rankedmatches}{ \begin{figure}[h] \centering \begin{subfigure}[h]{0.7\textwidth}\centering\includegraphics[width=\textwidth]{figures3/rankedmatchesA.jpg}\caption{}\label{sub:rankedmatchesa}\end{subfigure} \begin{subfigure}[h]{0.7\textwidth}\centering\includegraphics[width=\textwidth]{figures3/rankedmatchesB.jpg}\caption{}\label{sub:rankedmatchesb}\end{subfigure} \begin{subfigure}[h]{0.7\textwidth}\centering\includegraphics[width=\textwidth]{figures3/rankedmatchesC.jpg}\caption{}\label{sub:rankedmatchesc}\end{subfigure} \captext[\caplbl{rankedmatches}Ranked matches]{ % --- These are the top three results from the ranking algorithm. In each row, the query annotation is on the left and the exemplars of the matched name are on the right. Notice that the number of exemplars for each database name varies. The top-ranked match in \cref{sub:rankedmatchesa} is correct. The other ranks in \cref{sub:rankedmatchesb,sub:rankedmatchesc} are incorrect. The overall matching score is shown on the top of each result. The feature correspondences are overlaid on each result and colored by score. % --- } \label{fig:rankedmatches} \end{figure} } \begin{comment} python -m ibeis.viz.viz_name --test-show_multiple_chips --db GZ_Master1 --aids 2811 2810 --show --notitle --no-inimage --dpath ~/latex/crall-candidacy-2015/ --save figures3/SceneryMatch.jpg --diskshow --clipwhite --figsize=12,6 --dpi 300 python -m ibeis.viz.viz_name --test-show_multiple_chips --db GZ_Master1 --tags SceneryMatch --index 5 --show --notitle --no-inimage python -m ibeis.scripts.specialdraw simple_vsone_matches \ --db GZ_Master1 --aids=2811,2810 \ --figsize=12,6 --dpi 300 \ --dpath ~/latex/crall-thesis-2017/ --save figures3/SceneryMatch2.jpg \ --diskshow \end{comment} \newcommand{\SceneryMatch}{ \begin{figure}[ht!] \centering \includegraphics[width=\textwidth]{figures3/SceneryMatch2.jpg} \captext[A scenery match]{ % --- This is an example of two different animals appearing in front of the same distinctive background, illustrating the importance of background downweighting. The matching regions are highlighted. % --- } \label{fig:SceneryMatch} \end{figure} } \begin{comment} python -m ibeis.scripts.specialdraw featweight_fig --db PZ_MTEST --aid=1 \ --dpath ~/latex/crall-thesis-2017/ --save figures3/fgweight.png \ --figsize=12,3 --dpi=300 --saveparts --diskshow \end{comment} \newcommand{\fgweight}{ \begin{figure}[ht!] \centering \begin{subfigure}[h]{0.32\textwidth}\centering\includegraphics[width=\textwidth]{figures3/fgweightA.png}\caption{}\label{sub:fgweightA}\end{subfigure} \begin{subfigure}[h]{0.32\textwidth}\centering\includegraphics[width=\textwidth]{figures3/fgweightB.png}\caption{}\label{sub:fgweightB}\end{subfigure} \begin{subfigure}[h]{0.32\textwidth}\centering\includegraphics[width=\textwidth]{figures3/fgweightC.png}\caption{}\label{sub:fgweightC}\end{subfigure} \captext[Foregroundness weights]{ % --- \Cref{sub:fgweightA} shows the annotation's cropped chip. This chip is passed to the species detector. \Cref{sub:fgweightB} shows the species detector outputs an intensity image indicating the likelihood that each pixel belongs to the foreground. \Cref{sub:fgweightC} shows the weighted sum of the intensity under each feature is used as that feature's foregroundness score. } \label{fig:fgweight} \end{figure} } \begin{comment} python -m ibeis.viz.viz_nearest_descriptors --test-show_nearest_descriptors --db PZ_MTEST --qaid 3 --qfx 1062 --usetex --texknormplot --diskshow --saveparts --save figures3/knorm.jpg --dpi=300 --figsize 5 8 --dpath ~/latex/crall-thesis-2017/ --hspace .8 --top=1.0 --labelsize=12 --reshape 8 --show \end{comment} \newcommand{\knorm}{ \begin{figure}[ht!] \centering \begin{subfigure}[h]{0.2\textwidth}\centering\includegraphics[width=\textwidth]{figures3/knormA.jpg}\caption{}\label{sub:knorma}\end{subfigure}% \begin{subfigure}[h]{0.2\textwidth}\centering\includegraphics[width=\textwidth]{figures3/knormC.jpg}\caption{}\label{sub:knormb}\end{subfigure}% \begin{subfigure}[h]{0.2\textwidth}\centering\includegraphics[width=\textwidth]{figures3/knormE.jpg}\caption{}\label{sub:knormc}\end{subfigure}% \begin{subfigure}[h]{0.2\textwidth}\centering\includegraphics[width=\textwidth]{figures3/knormG.jpg}\caption{}\label{sub:knormd}\end{subfigure}% \begin{subfigure}[h]{0.2\textwidth}\centering\includegraphics[width=\textwidth]{figures3/knormI.jpg}\caption{}\label{sub:knorme}\end{subfigure} \begin{subfigure}[h]{0.2\textwidth}\centering\includegraphics[height=120pt]{figures3/knormB.jpg}\caption{}\label{sub:knormf}\end{subfigure}% \begin{subfigure}[h]{0.2\textwidth}\centering\includegraphics[height=120pt]{figures3/knormD.jpg}\caption{}\label{sub:knormg}\end{subfigure}% \begin{subfigure}[h]{0.2\textwidth}\centering\includegraphics[height=120pt]{figures3/knormF.jpg}\caption{}\label{sub:knormh}\end{subfigure}% \begin{subfigure}[h]{0.2\textwidth}\centering\includegraphics[height=120pt]{figures3/knormH.jpg}\caption{}\label{sub:knormi}\end{subfigure}% \begin{subfigure}[h]{0.2\textwidth}\centering\includegraphics[height=120pt]{figures3/knormJ.jpg}\caption{}\label{sub:knormj}\end{subfigure} \captext[\caplbl{knorm}LNBNN feature correspondence scoring]{ % --- This shows the nearest four neighbors of a distinctive query feature~\cref{sub:knormf}. The bottom row shows the warped and normalized features with their SIFT descriptors overlaid. The top row shows the annotation from which each feature was extracted. The first two neighbors~\cref{sub:knormg,sub:knormh} are correct matches, the third neighbor~\cref{sub:knormi} is an incorrect match, and the fourth neighbor~\cref{sub:knormj} is used as an LNBNN normalizer to score the first three matches. % --- } \label{fig:knorm} \end{figure} } \begin{comment} python -m ibeis.algo.hots.chip_match --test-show_single_namematch --qaid 1 \ --noshow_truth --show_timedelta=False --show_aid=False \ --dpath ~/latex/crall-thesis-2017 --save figures3/namematch.jpg --diskshow --dpi=300 --figsize=5,5 # python -m ibeis.algo.hots.chip_match --test-show_single_namematch --qaid 2 --dpath ~/latex/crall-candidacy-2015 --save figures3/namematch.jpg --diskshow --dpi=180 --clipwhite python -m ibeis.algo.hots.chip_match --test-show_single_namematch --qaid 3 --dpath ~/latex/crall-candidacy-2015 --save figures3/namematch.jpg --diskshow --dpi=180 --clipwhite python -m ibeis.algo.hots.chip_match --test-show_single_namematch --qaid 4 --dpath ~/latex/crall-candidacy-2015 --save figures3/namematch.jpg --diskshow --dpi=180 --clipwhite --verbose \end{comment} \newcommand{\namematch}{ \begin{figure}[ht!] \centering\includegraphics[width=\textwidth]{figures3/namematch.jpg} \captext[\caplbl{namematch}\Nsumprefix{} \namescoring{}]{ % --- The query annotation is at the top left. Each query feature matches at most one feature in the exemplars for a name. Each line denotes a feature correspondence colored by its matching score. In the top right of each database annotation is its annotation score. Feature scores from multiple views are combined into a name score shown on top. % --- } \label{fig:namematch} \end{figure} } \begin{comment} ibeis sver_single_chipmatch -t default:refine_method=cv2-lmeds-homog,full_homog_checks=True -a default --qaid 18 --dpath ~/latex/crall-candidacy-2015 --save figures3/sverkpts.jpg --label sver --dpi=300 --clipwhite --diskshow --saveparts --figsize=10,10 --norefinelbl \end{comment} \newcommand{\sver}{ \begin{figure}[h] \centering \begin{subfigure}[h]{0.25\textwidth}\centering\includegraphics[height=130pt]{figures3/sverkptsA.jpg}\caption{}\label{sub:svera}\end{subfigure} \begin{subfigure}[h]{0.25\textwidth}\centering\includegraphics[height=130pt]{figures3/sverkptsB.jpg}\caption{}\label{sub:sverb}\end{subfigure} \begin{subfigure}[h]{0.25\textwidth}\centering\includegraphics[height=130pt]{figures3/sverkptsC.jpg}\caption{}\label{sub:sverc}\end{subfigure} \begin{subfigure}[h]{0.35\textwidth}\centering\includegraphics[width=\textwidth]{figures3/sverkptsD.jpg}\caption{}\label{sub:sverd}\end{subfigure} \begin{subfigure}[h]{0.35\textwidth}\centering\includegraphics[width=\textwidth]{figures3/sverkptsE.jpg}\caption{}\label{sub:svere}\end{subfigure} \begin{subfigure}[h]{0.35\textwidth}\centering\includegraphics[width=\textwidth]{figures3/sverkptsF.jpg}\caption{}\label{sub:sverf}\end{subfigure} \begin{subfigure}[h]{0.35\textwidth}\centering\includegraphics[width=\textwidth]{figures3/sverkptsG.jpg}\caption{}\label{sub:sverg}\end{subfigure} \captext[Spatial verification]{ % --- This shows an example of spatial verification process. The three images on the top show~\cref{sub:svera} the original matches, \Cref{sub:sverb} the best set of inliers from affine hypothesis generation, and \Cref{sub:sverc} the final set of homography inliers. The images on the bottom show~\cref{sub:sverd,sub:sverf} the matching images warped and superimposed by both the best affine \Cref{sub:svere} and estimated homography transformation~\cref{sub:sverg}. % --- } \label{fig:sver} \end{figure} } \begin{comment} ALL DATABASE INFO python -m ibeis Chap3.measure dbstats --dbs=PZ_Master1,GZ_Master1,GIRM_Master1,humpbacks_fb python -m ibeis Chap3.agg_dbstats \end{comment} \newcommand{\DatabaseInfo}{ \begin{table}[ht!] \centering \captext[Database statistics]{ These statistics indicate the number of individuals and annotations in each dataset. An encounter is a group of annotations from the same individual taken at the same place and time. Resighted names contain multiple encounters.} \label{tbl:DatabaseStatistics} \input{figures3/agg-enc.tex} \end{table} %-- \begin{table}[h!] \centering \captext[Annotations per quality]{ The ``None'' column indicates the number of annotations without a quality label.} \label{tbl:AnnotationsPerQuality} \input{figures3/agg-qual.tex} \end{table} %-- \begin{table}[h!] \centering \captext[Annotations per viewpoint]{ % --- Columns names are abbreviated using the first letters of front, left, back, and right. The ``None'' column indicates the number of annotations without a viewpoint label. % --- } \label{tbl:AnnotationsPerViewpoint} \input{figures3/agg-view.tex} \end{table} } %------------- % TimeDeltas \begin{comment} python -m ibeis Chap3.measure time_distri --dbs=GIRM_Master1,GZ_Master1,PZ_Master1,humpbacks_fb --diskshow python -m ibeis Chap3.draw time_distri --dbs=GIRM_Master1,GZ_Master1,PZ_Master1,humpbacks_fb --diskshow \end{comment} \newcommand{\timedist}{ \begin{figure}[ht!] \centering \begin{subfigure}[h]{\textwidth}\centering\includegraphics[width=.9\textwidth]{figures3/PZ_Master1/timedist.png}\caption{Plains zebras}\end{subfigure} \begin{subfigure}[h]{\textwidth}\centering\includegraphics[width=.9\textwidth]{figures3/GZ_Master1/timedist.png}\caption{Grévy's zebras}\end{subfigure} \begin{subfigure}[h]{\textwidth}\centering\includegraphics[width=.9\textwidth]{figures3/GIRM_Master1/timedist.png}\caption{Masai giraffes}\end{subfigure} \begin{subfigure}[h]{\textwidth}\centering\includegraphics[width=.9\textwidth]{figures3/humpbacks_fb/timedist.png}\caption{Humpbacks}\end{subfigure} \captext[\caplbl{timedist}Distribution of image timestamps]{ % --- The y-axis is plotted on a square-root scale to emphasize times when only a few images were taken. For plains zebras and Grévy's zebras images were collected over many years. For Masai giraffes all data was collected immediately before and during the \GZC{}. % --- } \label{fig:timedist} \end{figure} } % ------------------------------- % --- Baseline Experiments --- % ------------------------------- \begin{comment} \end{comment} \begin{comment} python -m ibeis Chap3.draw_agg_baseline --diskshow python -m ibeis Chap3.draw_all --dbs=GZ_Master1,PZ_Master1,GIRM_Master1 python -m ibeis Chap3.draw_all --db GZ_Master1 python -m ibeis Chap3.draw_all --db PZ_Master1 python -m ibeis Chap3.draw_all --db GIRM_Master1 \end{comment} \newcommand{\BaselineExpt}{ \begin{figure}[ht!]\centering \begin{subfigure}[h]{\textwidth}\centering\includegraphics[width=\textwidth]{figures3/agg-baseline.png}\end{subfigure} \captext[\caplbl{BaselineExpt}Baseline experiment]{ % --- The baseline experiment is a high-level indicator of the ranking accuracy of each species. We measure ranking accuracy using a single query and database annotation --- selected from different encounters --- per individual. The number of query annotations (\pvar{qsize}) and database annotations (\pvar{dsize}) are given for each species in the legend. % --- } \label{fig:BaselineExpt} \end{figure} } \begin{comment} python -m ibeis Chap3.draw_all --dbs=GZ_Master1,PZ_Master1 \end{comment} \begin{comment} ibeis Chap3.measure smk --db=GZ_Master1 ibeis Chap3.draw smk --db=GZ_Master1 --diskshow ibeis Chap3.measure smk --db=PZ_Master1 ibeis Chap3.draw smk --db=PZ_Master1 --diskshow ibeis Chap3.measure smk --dbs=GZ_Master1,PZ_Master1 ibeis Chap3.draw smk --dbs=GZ_Master1,PZ_Master1 --diskshow \end{comment} \newcommand{\SMKExpt}{ \begin{figure}[ht!]\centering \begin{subfigure}[h]{\textwidth}\centering\includegraphics[width=\textwidth]{figures3/PZ_Master1/smk.png}\caption{Plains zebras}\label{sub:SMKExptA}\end{subfigure} \begin{subfigure}[h]{\textwidth}\centering\includegraphics[width=\textwidth]{figures3/GZ_Master1/smk.png}\caption{Grévy's zebras}\label{sub:SMKExptB}\end{subfigure} \captext[\caplbl{SMKExpt}SMK experiment]{ % --- In this experiment we compare the (VLAD based) SMK algorithm to our LNBNN ranking algorithm. The results demonstrate that LNBNN outperforms the ranking accuracy of SMK. The number of query/database annotations (\pvar{qsize} / \pvar{dsize}) are shown in the lower left. % --- } \label{fig:SMKExpt} \end{figure} } \begin{comment} python -m ibeis Chap3.measure foregroundness --dbs=GZ_Master1,PZ_Master1 python -m ibeis Chap3.draw foregroundness --dbs=GZ_Master1,PZ_Master1 --diskshow python -m ibeis -e draw_rank_cmc --db GZ_Master1 -a timectrl -t baseline:fg_on=[True,False] --show \end{comment} \newcommand{\FGIntraExpt}{ \begin{figure}[ht!]\centering \begin{subfigure}[h]{\textwidth}\centering\includegraphics[width=\textwidth]{figures3/PZ_Master1/foregroundness_intra.png}\caption{Plains zebras}\end{subfigure} \begin{subfigure}[h]{\textwidth}\centering\includegraphics[width=\textwidth]{figures3/GZ_Master1/foregroundness_intra.png}\caption{Grévy's zebras}\end{subfigure} \captext[\caplbl{FGIntraExpt}Foregroundness experiment]{ % --- Applying foregroundness weights to feature correspondences improves the identification accuracy at the top rank by filtering matches in scenery. This experiment was performed by matching annotations within several occurrences. Thus, in this experiment \pvar{qsize} is a sum and \pvar{dsize} is an average. % --- } \label{fig:FGIntraExpt} \end{figure} } % ------------------------------- % --- Invariance Experiments ---- % ------------------------------- \newcommand{\InvarExpt}{ \begin{figure}[ht!]\centering \begin{subfigure}[h]{\textwidth}\centering\includegraphics[width=\textwidth]{figures3/PZ_Master1/invar.png}\caption{Plains zebras}\label{sub:InvarExptA}\end{subfigure} \begin{subfigure}[h]{\textwidth}\centering\includegraphics[width=\textwidth]{figures3/GZ_Master1/invar.png}\caption{Grévy's zebras}\label{sub:InvarExptB}\end{subfigure} \captext[\caplbl{InvarExpt}Feature invariance experiment]{ % --- This experiment tests the effect of affine invariance (AI) and the query-side rotation heuristic (QRH) on identification accuracy. For plains zebras circular keypoints with the QRH are the most accurate. For Grévy's zebras enabling affine invariance works the best. The number of query/database annotations (\pvar{qsize} / \pvar{dsize}) are shown in the lower left. % --- } \label{fig:InvarExpt} \end{figure} } % TODO; http://tex.stackexchange.com/questions/75014/is-it-possible-to-make-a-reference-to-a-subfigure-to-appear-figure-2a-with-cle \begin{comment} python -m ibeis.viz.viz_chip --test-show_chip --aid 44 \ --weight_label=None --ecc --dpi=300 --draw_lbls=False \ --ellalpha=.8 --ell_linewidth=1.4 --notitle \ --dpath ~/latex/crall-thesis-2017/figures3 --save=pzaffkpts.jpg --diskshow --darken python -m ibeis.viz.viz_chip --test-show_chip --aid 44 \ --weight_label=None --ecc --dpi=300 --draw_lbls=False \ --ellalpha=.8 --ell_linewidth=1.4 --notitle \ --affine-invariance=False --augment_orientation=True --ori \ --dpath ~/latex/crall-thesis-2017/figures3 --save=pzcirckpts.jpg --diskshow --darken python -m ibeis.viz.viz_chip --test-show_chip --db GZ_Master1 --aid 1000 \ --weight_label=None --ecc --dpi=300 --draw_lbls=False \ --ellalpha=.8 --ell_linewidth=1.4 --notitle \ --dpath ~/latex/crall-thesis-2017/figures3 --save=gzaffkpts.jpg --diskshow --darken python -m ibeis.viz.viz_chip --test-show_chip --db GZ_Master1 --aid 1000 \ --weight_label=None --ecc --dpi=300 --draw_lbls=False \ --ellalpha=.8 --ell_linewidth=1.4 --notitle \ --affine-invariance=False --augment_orientation=True --ori \ --dpath ~/latex/crall-thesis-2017/figures3 --save=gzcirckpts.jpg --diskshow --darken \end{comment} \newcommand{\kptstype}{ \begin{figure}[ht!]\centering \begin{subfigure}[h]{.48\textwidth}\centering\includegraphics[width=\textwidth]{figures3/pzaffkpts.jpg}\caption{}\label{sub:kptstypeA}\end{subfigure} \begin{subfigure}[h]{.48\textwidth}\centering\includegraphics[width=\textwidth]{figures3/pzcirckpts.jpg}\caption{}\label{sub:kptstypeB}\end{subfigure} \begin{subfigure}[h]{.48\textwidth}\centering\includegraphics[width=\textwidth]{figures3/gzaffkpts.jpg}\caption{}\label{sub:kptstypeC}\end{subfigure} \begin{subfigure}[h]{.48\textwidth}\centering\includegraphics[width=\textwidth]{figures3/gzcirckpts.jpg}\caption{}\label{sub:kptstypeD}\end{subfigure} \captext[\caplbl{kptstype}Examples of keypoint invariance]{ % --- Many affine keypoints detected on plains zebras tend to encompass only one or two stripes. The distinctive stripe patterns on Grévy's zebras are well captured by affine keypoints, whereas circular keypoints are more spread out. For visibility this figure shows a random sample of all keypoints on a darkened image. Elliptical keypoints in~\cref{sub:kptstypeA,sub:kptstypeC} are colored by eccentricity and circular keypoints in~\cref{sub:kptstypeB,sub:kptstypeD} are colored by scale. % --- } \label{fig:kptstype} \end{figure} } % ------------------------------- % --- Namescore Experiments ---- % ------------------------------- \begin{comment} python -m ibeis Chap3.measure nsum --dbs=GZ_Master1 python -m ibeis Chap3.measure nsum --dbs=GZ_Master1,PZ_Master1 python -m ibeis Chap3.draw nsum --dbs=GZ_Master1,PZ_Master1 --diskshow \end{comment} \newcommand{\NScoreExpt}{ \begin{figure}[ht!]\centering \begin{subfigure}[h]{\textwidth}\centering\includegraphics[width=\textwidth]{figures3/PZ_Master1/nsum.png}\caption{Plains zebras}\label{sub:NScoreExptA}\end{subfigure} \begin{subfigure}[h]{\textwidth}\centering\includegraphics[width=\textwidth]{figures3/GZ_Master1/nsum.png}\caption{Grévy's zebras}\label{sub:NScoreExptB}\end{subfigure} \captext[\caplbl{NScoreExpt}Name scoring experiment]{ % --- There is a clear separation between identification accuracy when the number of exemplars per name is $1$ compared to when it is $3$. Feature based name scoring (\nsum{}) is slightly more accurate than scoring using the annotation based name scoring (\csum{}). The number of query /database annotations (\pvar{qsize} / \pvar{dsize}) are shown in the lower left. Database size was normalized using confusors. } \label{fig:NScoreExpt} \end{figure} } % ------------------------------- % --- K Experiments % ------------------------------- \begin{comment} python -m ibeis Chap3.measure kexpt --dbs=GZ_Master1,PZ_Master1 python -m ibeis Chap3.draw kexpt --dbs=GZ_Master1,PZ_Master1 --diskshow \end{comment} \newcommand{\KExptA}{ \begin{figure}[ht!]\centering \centering\includegraphics[width=\textwidth]{figures3/PZ_Master1/kexpt.png} \captext[\caplbl{KExptA}The $K$ experiment for plains zebras]{ % --- This shows the identification accuracy for plains zebras using different values of $\K$ (the number of nearest neighbors assigned to each query feature), different numbers of exemplars (\pvar{dpername}), and different database sizes (\pvar{dsize}). } \label{fig:KExptA} \end{figure} } \newcommand{\KExptB}{ \begin{figure}[ht!]\centering \centering\includegraphics[width=\textwidth]{figures3/GZ_Master1/kexpt.png} \captext[\caplbl{KExptB}The $K$ experiment for Grévy's zebras]{ % --- This shows the identification accuracy for Grévy's zebras using different values of $\K$ (the number of nearest neighbors assigned to each query feature), different numbers of exemplars (\pvar{dpername}), and different database sizes (\pvar{dsize}). } \label{fig:KExptB} \end{figure} } % ------------------------------- % --- Failure Cases % ------------------------------- % --- VIEWPOINT \begin{comment} python -m ibeis draw_match_cases --db PZ_Master1 -a timectrl -t best --filt :fail=True,sortdsc=gtscore --show --qaid=1223 \ --annotmodes=[3] --noshow_truth --dpi=300 --dpath ~/latex/crall-thesis-2017 --save figuresC/failview.jpg --diskshow --saveparts \end{comment} \newcommand{\FailViewpoint}{ \begin{figure}[ht!] \centering \begin{subfigure}[h]{0.47\textwidth}\centering\includegraphics[height=250pt]{figuresC/failviewA.jpg}\caption{}\label{sub:failviewA}\end{subfigure} \begin{subfigure}[h]{0.47\textwidth}\centering\includegraphics[height=250pt]{figuresC/failviewB.jpg}\caption{}\label{sub:failviewB}\end{subfigure} \captext[Unaligned failure case]{ % --- Due to pose and viewpoint variations, the correctly matching pair of annotations \cref{sub:failviewB} is returned at rank $22$ while the incorrect pair of annotations \cref{sub:failviewA} is returned at rank $1$. In the correct pair, the features on the front leg are not aligned and failed to match. %In the incorrect pair, the heads of the animals are in a similar pose and thus creating several correspondences % that are distinctive by coincidence. % --- } \label{fig:FailViewpoint} \end{figure} } % --- OCCLUSION \begin{comment} python -m ibeis draw_match_cases -a timectrl -t best --filt :fail=True,sortdsc=gfscore --db PZ_Master1 --show --qaid=17190 \ --annotmodes=[3] --noshow_truth --dpi=300 --dpath ~/latex/crall-thesis-2017 --save figuresC/failocclu.jpg --diskshow --saveparts \end{comment} \newcommand{\FailOcclusion}{ \begin{figure}[ht!] \centering \begin{subfigure}[h]{0.47\textwidth}\centering\includegraphics[height=260pt]{figuresC/failoccluA.jpg}\caption{}\label{sub:failoccluA}\end{subfigure} \begin{subfigure}[h]{0.47\textwidth}\centering\includegraphics[height=260pt]{figuresC/failoccluB.jpg}\caption{}\label{sub:failoccluB}\end{subfigure} \captext[Occlusion failure case]{ % --- The plants in the query annotation inhibit the creation of feature correspondences, causing the correct pair of annotations \cref{sub:failoccluB} to be returned at rank $2$. %This is exacerbated by pose and viewpoint variations. The incorrect pair of annotations \cref{sub:failoccluA} at rank $1$ are not the same individual even though they share similar features. % --- } \label{fig:FailOcclusion} \end{figure} } % --- QUALITY \begin{comment} python -m ibeis draw_match_cases -a timectrl -t best --filt :fail=True,sortdsc=gfscore --db GIRM_Master1 --qaid=336 --show \ --annotmodes=[3] --noshow_truth --dpi=300 --dpath ~/latex/crall-thesis-2017 --save figuresC/failquality.jpg --diskshow --saveparts python -m ibeis draw_match_cases -a timectrl -t best --filt :fail=True,with_tag=Quality,sortdsc=gfscore --db GIRM_Master1 --qaid 639 \ --hargv=match --render --cmdaug="FailQuality" --vert=False \end{comment} \newcommand{\FailQuality}{ \begin{figure}[ht!] \centering \begin{subfigure}[h]{0.47\textwidth}\centering\includegraphics[height=270pt]{figuresC/failqualityA.jpg}\caption{}\label{sub:failqualityA}\end{subfigure} \begin{subfigure}[h]{0.47\textwidth}\centering\includegraphics[height=270pt]{figuresC/failqualityB.jpg}\caption{}\label{sub:failqualityB}\end{subfigure} \captext[\caplbl{FailQuality}Quality failure case]{ % --- The low resolution of the database annotation causes the correct pair of annotations \cref{sub:failqualityB} to be returned at rank $43$. The incorrect pair of annotation \cref{sub:failqualityA} did not receive a particularly high score, but it was returned at rank $1$ because no feature correspondences were established to the correct match. % ---- } \label{fig:FailQuality} \end{figure} } % --- Scenery Match \begin{comment} python -m ibeis draw_match_cases -a timectrl -t best:fg_on=False --filt :fail=True,sortdsc=gfscore --db GZ_Master1 --show --qaid=2317 \ --annotmodes=[3] --noshow_truth --dpi=300 --dpath ~/latex/crall-thesis-2017 --save figuresC/failscenery.jpg --diskshow --saveparts \end{comment} \newcommand{\FailScenery}{ \begin{figure}[ht!] \centering \begin{subfigure}[h]{0.47\textwidth}\centering\includegraphics[height=270pt]{figuresC/failsceneryA.jpg}\caption{}\label{sub:failsceneryA}\end{subfigure} \begin{subfigure}[h]{0.47\textwidth}\centering\includegraphics[height=270pt]{figuresC/failsceneryB.jpg}\caption{}\label{sub:failsceneryB}\end{subfigure} \captext[\caplbl{FailScenery}Scenery failure case]{ % --- The incorrect pair of annotations \cref{sub:failsceneryA} was returned at rank $1$ because of strong matches in the background scenery. The correct pair \cref{sub:failsceneryB} was returned at rank $2$ and did not produce matches in the front leg due to pose variations. The annotations in the scenery match pair were taken seconds apart in the same location causing their backgrounds to be near duplicates. The foregroundness measure was disabled to produce this example, enabling it addresses nearly all scenery match cases. % --- } \label{fig:FailScenery} \end{figure} } % --- Photobomb \begin{comment} python -m ibeis draw_match_cases --db PZ_Master1 -a timectrl -t best --filt :fail=True,with_tag=Photobomb,sortdsc=gfscore --qaid=6944 \ --annotmodes=[3] --noshow_truth --dpi=300 --dpath ~/latex/crall-thesis-2017 --save figuresC/failpb.jpg --diskshow --saveparts \end{comment} \newcommand{\FailPhotobomb}{ \begin{figure}[ht!] \centering \begin{subfigure}[h]{0.47\textwidth}\centering\includegraphics[height=300pt]{figuresC/failpbA.jpg}\caption{}\label{sub:failpbA}\end{subfigure} \begin{subfigure}[h]{0.47\textwidth}\centering\includegraphics[height=300pt]{figuresC/failpbB.jpg}\caption{}\label{sub:failpbB}\end{subfigure} \captext[\caplbl{FailPhotobomb}Photobomb failure case]{ % --- A photobombing animal in the foreground of the database annotation causes LNBNN to return the incorrect result \cref{sub:failpbA} at rank $1$. The correct match \cref{sub:failpbB}, has a significant number of matches, but there is a difference of $1$ day between the pair. On the other hand, the annotations in the photobomb pair were taken within seconds of each other and therefore have much higher visual similarity. % --- } \label{fig:FailPhotobomb} \end{figure} }