ld2daps/Notebooks/Classification Scores.ipynb

299 lines
5.9 KiB
Plaintext

{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Generic Classification Scores for DFC 2018"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"from sklearn import metrics\n",
"import matplotlib.pyplot as plt\n",
"import pandas as pd\n",
"\n",
"# Triskele\n",
"import sys\n",
"from pathlib import Path\n",
"triskele_path = Path('../triskele/python')\n",
"sys.path.append(str(triskele_path.resolve()))\n",
"import triskele\n",
"\n",
"figsize = np.array((16, 9))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Load Classes Metadata"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_dfc_lbl = pd.read_csv('../labels.csv')\n",
"df_meta_idx = pd.read_csv('../metaclass_indexes.csv')\n",
"df_meta_lbl = pd.read_csv('../metaclass_labels.csv')\n",
"\n",
"df_dfc_lbl.merge(df_meta_idx).merge(df_meta_lbl)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"meta_idx = np.array(df_meta_idx['metaclass_index'], dtype=np.uint8)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Load Ground Truth and Prediction"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"gt = triskele.read('../Data/ground_truth/2018_IEEE_GRSS_DFC_GT_TR.tif')\n",
"pred = triskele.read('../Res/tmppred.tif')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Display Classes"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"fig, (ax_gt, ax_pred) = plt.subplots(2, figsize=figsize * 2)\n",
"ax_gt.imshow(gt)\n",
"ax_gt.set_title('Ground Truth')\n",
"ax_pred.imshow(pred)\n",
"ax_pred.set_title('Prediction')\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Display Meta Classes"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"fig, (ax_gt, ax_pred) = plt.subplots(2, figsize=figsize * 2)\n",
"ax_gt.imshow(meta_idx[gt])\n",
"ax_gt.set_title('Ground Truth')\n",
"ax_pred.imshow(meta_idx[pred])\n",
"ax_pred.set_title('Prediction')\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Metrics\n",
"\n",
"### Classes\n",
"\n",
"#### Confusion"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"f = np.nonzero(pred)\n",
"pred_s = pred[f].flatten()\n",
"gt_s = gt[f].flatten()\n",
"\n",
"ct = pd.crosstab(gt_s, pred_s,\n",
" rownames=['Prediction'], colnames=['Reference'],\n",
" margins=True, margins_name='Total',\n",
" normalize=False # all, index, columns\n",
" )\n",
"ct"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Scores\n",
"\n",
"##### Accuracy"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"metrics.accuracy_score(gt_s, pred_s)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"##### Kappa"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"metrics.cohen_kappa_score(gt_s, pred_s)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"##### Precision, Recall, f1"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"metrics.precision_recall_fscore_support(gt_s, pred_s)\n",
"print(metrics.classification_report(gt_s, pred_s))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Meta Classes\n",
"\n",
"#### Confusion"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"f = np.nonzero(pred)\n",
"m_pred_s = meta_idx[pred_s]\n",
"m_gt_s = meta_idx[gt_s]\n",
"\n",
"ct = pd.crosstab(m_gt_s, m_pred_s,\n",
" rownames=['Prediction'], colnames=['Reference'],\n",
" margins=True, margins_name='Total',\n",
" normalize=False # all, index, columns\n",
" )\n",
"ct"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Scores\n",
"\n",
"##### Accuracy"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"metrics.accuracy_score(m_gt_s, m_pred_s)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"##### Kappa"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"metrics.cohen_kappa_score(m_gt_s, m_pred_s)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"##### Precision, Recall, f1"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"metrics.precision_recall_fscore_support(m_gt_s, m_pred_s)\n",
"print(metrics.classification_report(m_gt_s, m_pred_s))"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3"
}
},
"nbformat": 4,
"nbformat_minor": 2
}