ld2daps/Notebooks/Classification Scores-Copy1.ipynb
2018-09-21 15:51:04 +02:00

315 lines
6.3 KiB
Plaintext

{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Generic Classification Scores for DFC 2018 [TESTING]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"from sklearn import metrics\n",
"import matplotlib.pyplot as plt\n",
"import pandas as pd\n",
"import triskele\n",
"\n",
"figsize = np.array((16, 9))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Load Classes Metadata"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_dfc_lbl = pd.read_csv('../../minigrida/Data/ground_truth/labels.csv')\n",
"df_meta_idx = pd.read_csv('../../minigrida/Data/ground_truth/jurse_meta_idx.csv')\n",
"df_meta_lbl = pd.read_csv('../../minigrida/Data/ground_truth/jurse_meta_lbl.csv')\n",
"\n",
"df_dfc_lbl.merge(df_meta_idx).merge(df_meta_lbl)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"meta_idx = np.array(df_meta_idx['metaclass_index'], dtype=np.uint8)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Load Ground Truth and Prediction"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"gt = triskele.read('../Data/ground_truth/2018_IEEE_GRSS_DFC_GT_TR.tif')\n",
"pred = triskele.read('../../minigrida/Enrichment/Results/tellus_fourhth_95bfcf.tif')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Display Classes"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"fig, (ax_gt, ax_pred) = plt.subplots(2, figsize=figsize * 2)\n",
"ax_gt.imshow(gt)\n",
"ax_gt.set_title('Ground Truth')\n",
"ax_pred.imshow(pred)\n",
"ax_pred.set_title('Prediction')\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Display Meta Classes"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"fig, (ax_gt, ax_pred) = plt.subplots(2, figsize=figsize * 2)\n",
"ax_gt.imshow(meta_idx[gt])\n",
"ax_gt.set_title('Ground Truth')\n",
"ax_pred.imshow(pred)\n",
"ax_pred.set_title('Prediction')\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Metrics\n",
"\n",
"### Classes\n",
"\n",
"#### Confusion"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"f = np.nonzero(pred)\n",
"pred_s = pred[f].flatten()\n",
"gt_s = gt[f].flatten()\n",
"\n",
"ct = pd.crosstab(gt_s, pred_s,\n",
" rownames=['Prediction'], colnames=['Reference'],\n",
" margins=True, margins_name='Total',\n",
" normalize=False # all, index, columns\n",
" )\n",
"ct"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Scores\n",
"\n",
"##### Accuracy"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"metrics.accuracy_score(gt_s, pred_s)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"##### Kappa"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"metrics.cohen_kappa_score(gt_s, pred_s)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"##### Precision, Recall, f1"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"metrics.precision_recall_fscore_support(gt_s, pred_s)\n",
"print(metrics.classification_report(gt_s, pred_s))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Meta Classes\n",
"\n",
"#### Confusion"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"f = np.nonzero(pred)\n",
"m_pred_s = pred_s\n",
"m_gt_s = meta_idx[gt_s]\n",
"\n",
"ct = pd.crosstab(m_gt_s, m_pred_s,\n",
" rownames=['Prediction'], colnames=['Reference'],\n",
" margins=True, margins_name='Total',\n",
" normalize=False # all, index, columns, False\n",
" )\n",
"ct"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"lbl = df_meta_lbl['metaclass_label'][ct.columns[:-1]].tolist()\n",
"lbl.append('Total')\n",
"ct.columns = lbl\n",
"ct.columns.name = 'Reference'\n",
"ct.index = lbl\n",
"ct.index.name = 'Reference'\n",
"ct"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Scores\n",
"\n",
"##### Accuracy"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"metrics.accuracy_score(m_gt_s, m_pred_s)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"##### Kappa"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"metrics.cohen_kappa_score(m_gt_s, m_pred_s)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"##### Precision, Recall, f1"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"metrics.precision_recall_fscore_support(m_gt_s, m_pred_s)\n",
"print(metrics.classification_report(m_gt_s, m_pred_s))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3"
}
},
"nbformat": 4,
"nbformat_minor": 2
}