fastbook/clean/07_sizing_and_tta.ipynb

655 lines
428 KiB
Plaintext
Raw Normal View History

2020-03-06 18:19:03 +00:00
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#hide\n",
"from utils import *"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Training a state-of-the-art model"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Imagenette"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from fastai2.vision.all import *\n",
"path = untar_data(URLs.IMAGENETTE)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"dblock = DataBlock(blocks=(ImageBlock(), CategoryBlock()),\n",
" get_items=get_image_files,\n",
" get_y=parent_label,\n",
" item_tfms=Resize(460),\n",
" batch_tfms=aug_transforms(size=224, min_scale=0.75))\n",
"dls = dblock.dataloaders(path, bs=64)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: left;\">\n",
" <th>epoch</th>\n",
" <th>train_loss</th>\n",
" <th>valid_loss</th>\n",
" <th>accuracy</th>\n",
" <th>time</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <td>0</td>\n",
" <td>1.583403</td>\n",
" <td>2.064317</td>\n",
" <td>0.401792</td>\n",
" <td>01:03</td>\n",
" </tr>\n",
" <tr>\n",
" <td>1</td>\n",
" <td>1.208877</td>\n",
" <td>1.260106</td>\n",
" <td>0.601568</td>\n",
" <td>01:02</td>\n",
" </tr>\n",
" <tr>\n",
" <td>2</td>\n",
" <td>0.925265</td>\n",
" <td>1.036154</td>\n",
" <td>0.664302</td>\n",
" <td>01:03</td>\n",
" </tr>\n",
" <tr>\n",
" <td>3</td>\n",
" <td>0.730190</td>\n",
" <td>0.700906</td>\n",
" <td>0.777819</td>\n",
" <td>01:03</td>\n",
" </tr>\n",
" <tr>\n",
" <td>4</td>\n",
" <td>0.585707</td>\n",
" <td>0.541810</td>\n",
" <td>0.825243</td>\n",
" <td>01:03</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"model = xresnet50()\n",
"learn = Learner(dls, model, loss_func=CrossEntropyLossFlat(), metrics=accuracy)\n",
"learn.fit_one_cycle(5, 3e-3)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Normalization"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(TensorImage([0.4842, 0.4711, 0.4511], device='cuda:5'),\n",
" TensorImage([0.2873, 0.2893, 0.3110], device='cuda:5'))"
]
},
"execution_count": null,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"x,y = dls.one_batch()\n",
"x.mean(dim=[0,2,3]),x.std(dim=[0,2,3])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def get_dls(bs, size):\n",
" dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),\n",
" get_items=get_image_files,\n",
" get_y=parent_label,\n",
" item_tfms=Resize(460),\n",
" batch_tfms=[*aug_transforms(size=size, min_scale=0.75),\n",
" Normalize.from_stats(*imagenet_stats)])\n",
" return dblock.dataloaders(path, bs=bs)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"dls = get_dls(64, 224)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(TensorImage([-0.0787, 0.0525, 0.2136], device='cuda:5'),\n",
" TensorImage([1.2330, 1.2112, 1.3031], device='cuda:5'))"
]
},
"execution_count": null,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"x,y = dls.one_batch()\n",
"x.mean(dim=[0,2,3]),x.std(dim=[0,2,3])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: left;\">\n",
" <th>epoch</th>\n",
" <th>train_loss</th>\n",
" <th>valid_loss</th>\n",
" <th>accuracy</th>\n",
" <th>time</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <td>0</td>\n",
" <td>1.632865</td>\n",
" <td>2.250024</td>\n",
" <td>0.391337</td>\n",
" <td>01:02</td>\n",
" </tr>\n",
" <tr>\n",
" <td>1</td>\n",
" <td>1.294041</td>\n",
" <td>1.579932</td>\n",
" <td>0.517177</td>\n",
" <td>01:02</td>\n",
" </tr>\n",
" <tr>\n",
" <td>2</td>\n",
" <td>0.960535</td>\n",
" <td>1.069164</td>\n",
" <td>0.657207</td>\n",
" <td>01:04</td>\n",
" </tr>\n",
" <tr>\n",
" <td>3</td>\n",
" <td>0.730220</td>\n",
" <td>0.767433</td>\n",
" <td>0.771845</td>\n",
" <td>01:05</td>\n",
" </tr>\n",
" <tr>\n",
" <td>4</td>\n",
" <td>0.577889</td>\n",
" <td>0.550673</td>\n",
" <td>0.824496</td>\n",
" <td>01:06</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"model = xresnet50()\n",
"learn = Learner(dls, model, loss_func=CrossEntropyLossFlat(), metrics=accuracy)\n",
"learn.fit_one_cycle(5, 3e-3)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Progressive resizing"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: left;\">\n",
" <th>epoch</th>\n",
" <th>train_loss</th>\n",
" <th>valid_loss</th>\n",
" <th>accuracy</th>\n",
" <th>time</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <td>0</td>\n",
" <td>1.902943</td>\n",
" <td>2.447006</td>\n",
" <td>0.401419</td>\n",
" <td>00:30</td>\n",
" </tr>\n",
" <tr>\n",
" <td>1</td>\n",
" <td>1.315203</td>\n",
" <td>1.572992</td>\n",
" <td>0.525765</td>\n",
" <td>00:30</td>\n",
" </tr>\n",
" <tr>\n",
" <td>2</td>\n",
" <td>1.001199</td>\n",
" <td>0.767886</td>\n",
" <td>0.759149</td>\n",
" <td>00:30</td>\n",
" </tr>\n",
" <tr>\n",
" <td>3</td>\n",
" <td>0.765864</td>\n",
" <td>0.665562</td>\n",
" <td>0.797984</td>\n",
" <td>00:30</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"dls = get_dls(128, 128)\n",
"learn = Learner(dls, xresnet50(), loss_func=CrossEntropyLossFlat(), \n",
" metrics=accuracy)\n",
"learn.fit_one_cycle(4, 3e-3)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: left;\">\n",
" <th>epoch</th>\n",
" <th>train_loss</th>\n",
" <th>valid_loss</th>\n",
" <th>accuracy</th>\n",
" <th>time</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <td>0</td>\n",
" <td>0.985213</td>\n",
" <td>1.654063</td>\n",
" <td>0.565721</td>\n",
" <td>01:06</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: left;\">\n",
" <th>epoch</th>\n",
" <th>train_loss</th>\n",
" <th>valid_loss</th>\n",
" <th>accuracy</th>\n",
" <th>time</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <td>0</td>\n",
" <td>0.706869</td>\n",
" <td>0.689622</td>\n",
" <td>0.784541</td>\n",
" <td>01:07</td>\n",
" </tr>\n",
" <tr>\n",
" <td>1</td>\n",
" <td>0.739217</td>\n",
" <td>0.928541</td>\n",
" <td>0.712472</td>\n",
" <td>01:07</td>\n",
" </tr>\n",
" <tr>\n",
" <td>2</td>\n",
" <td>0.629462</td>\n",
" <td>0.788906</td>\n",
" <td>0.764003</td>\n",
" <td>01:07</td>\n",
" </tr>\n",
" <tr>\n",
" <td>3</td>\n",
" <td>0.491912</td>\n",
" <td>0.502622</td>\n",
" <td>0.836445</td>\n",
" <td>01:06</td>\n",
" </tr>\n",
" <tr>\n",
" <td>4</td>\n",
" <td>0.414880</td>\n",
" <td>0.431332</td>\n",
" <td>0.863331</td>\n",
" <td>01:06</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"learn.dls = get_dls(64, 224)\n",
"learn.fine_tune(5, 1e-3)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Test time augmentation"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": [
"0.8737863898277283"
]
},
"execution_count": null,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"preds,targs = learn.tta()\n",
"accuracy(preds, targs).item()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Mixup"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Sidebar: Papers and math"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### End sidebar"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hide_input": true
},
"outputs": [
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAqwAAADTCAYAAABEKUENAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nOy9abRmWV3m+dvDmd75zjfujXnIyHkgMwFJTEClsEAUoZd24bBUlLbsRUt1UQ5VKikWZdlSa1mW3dVdit0Cir2cUFtsBZGZTIYkSTIiMmOebtz53nc+0x76w3kjkvpgfuiF3fnhPnetuG/E2e/Z++xz9tnPfv7Pf4fw3rOHPexhD3vYwx72sIc9vFgh//9uwB72sIc97GEPe9jDHvbwQtgjrHvYwx72sIc97GEPe3hRY4+w7mEPe9jDHvawhz3s4UWNPcK6hz3sYQ972MMe9rCHFzX2COse9rCHPexhD3vYwx5e1NgjrHvYwx72sIc97GEPe3hRQ7/QwXvuOOCJX4ZpN1isbyPaC4yCl6ObNYbnPkNnYYr8+jXGKubo0XtZUzGBSbFyijDpUIzPkSQNZG+M8SGB2qUYlowsTDWmKNnk+nNXGY3O8YpXvJ6vfuFp5u97kEYwRS/fYL61zKYs8EbxcGeFC/4Y9sZFClcjjoaUtQOkW5sEoaV35QIzd9+JWy9pDE6xdvhlNHUdZ0pyp3FxQnnl6+ij9/Oag32evQIbJkTnKWWtgzElloJGEZB2L6Ln7kP7EUUQ4fMUHUY4U+K1R+KQBJQFEBi0cYwC6F14hqUDx2mIaQa9pwhEDPEsKxs3OLx0GyXXycR+gsBSZgNq6Q59NUcS5siwQ4LAlwN0fRq7dZbVi2dYuPfVFGVJWFpsUTLYuEZZmyPzGcV4jZaqk117CnnoJOF4yHj3Erp5gPLK5/jEJ/+a9/7a7/OlCz1UkDPsHKS+8iyi6fF5REdozNRxLpz/ODNHX0sju8T2wNI88ghSdNk980Uac0vMnnw552+s0IkkWtUJyCjSPlE0RdOss1EkpDeepnPstYy2v8b5T32I++5bIFi8myc+8WXmFhpM3/69yN5ldnqeWr6GPnocyRq9Z3Y5+UPv4unffQd3n3yQ7XzAiVlJ6GfoHE74i7/+LPtvfxPtpmL94rOo3TPc/fqX8nv/ywf5o9/5U/7tX/w99nqX26dSLi28gkBpsq1rvPbBWU49/gydpTZf/NRnac8d4aG7Wjx91jO9tMnPvP1H+ZnH/k86Bx7mxsUnSOYKRjvLJPYacRwj/Q1Wr64StKaxecFg0OPgwYN8/vOfF/8fjc3/V7Dg+2PDL//Ke0gSwTjNkb0tuoMu3avLnD5zDiHP0WhltOsxeIuQBYEMCJ3HUhJGnk4nwtuISBgCKSiVR0lJXIu48OyQdGgRNsRLRSgU3gJCIqRDymodLDx4AUIINAIhBOCQEgQghQYkMnBIoejZEaubW+AAUZUPhMQIjRQC40qEEHghKLMB1lqUqhOGMDevUUGBwxMIBziMA+89gggpChzgvSJUEmsKtFc4JbC+QBiJ1CFCG4TXeCWR5QhHE8sIpSTCOqzXKGHxLkBGKcaEIEq0qGFtjsTjECA8woFDInFULfIIIVDe45THmervwnnsZHdBqQTelyhCvHc4KZHG4bVAIbE48AKPAaNASRAW70AoifMlWImUEuccHofzoFSAt+CFQaLw3oKsHmUhFM5YlFJYWyKUrvrNu0n7FF56Su9QQuCcwwoQXoIoUQiMU4jJMSlBofDOYUSJ84JQBxTGgyiQPkL4Ei8CnDNY7wjDEGs93lukCClsCl6iVIi1tqrTWoJAY5yv6lECrCcrHCqQeG8xpSSKYmb2Sb73kfsxruSf//rjL9ox+4m//5gfly3+4E/+mFatJHcSl0rGZUbvsmBtYxOXXyCsW2anZ8kBSY6SCaHQGNsjihWNSOJsRCgzpPPkwhMFETpyXH52l/FowHR7iV53SL09jfQBxpdEYUgpASeY0hlDUYM8RXiFUg6vInxZIKXHZhlBs4UwlsCO2IkTBv0R3nkcEpRCpkN8fYrZmqU/MhQocI7SlVhjETIkVoJGLUfVaoBBSsAZHBLvHMgASYlH4pxEK/CmBCEpsiFhHKJthPVjlPIgEjJTUNcaC5RC3PqOMA4nNRKBjDxYjfc5WjewWZdiPCBszeK8Q1qPc+CKEVZFGG/xLidEYYoehHWks7h8hNcJbrzDI698OWfPX6E/AiEMZVhHj/oQCaRTSMAHdUajdcJoAUVKaS0qnkYoSzHYQumEuDnNIE3R0gMKLSW2LJFKEfiS3IItBgS1OWw5ZLh5lU4ngaBGd2dAECmi+gLCpJSFRPoMV0tQoqDo5dQOHKN/9TStRh3jLLFWKBGiYsnG1g71xhJaesZpD18Mmd43y6UL13jw/vu5sNbHj1MaoWAYtgGPdIbpZsBgt0cYJ2xurhKEDabbMd2+Q0cZR/Yvc/bCOjJukw530InHZhG2HKNDDX7MeFAQJnWaHXjoxAGiOOC97/+Hx6t67LHH/sHB9Hv/64ceO3rnK/DXPkVGk2jhTmb9FpuhIOqlLIcdsqbDd/v0utfotA6DzRmORky5MVo1GKW7uKRDGAT0VBvqCYtaYje3WDn19yzNz9OqLRMVQ2Zf8krWs4i2LEiijN2Boq00RkgeumMJsbHGbD1jtzZNnqeEq6cppudpkdA4dA/B9nXc4jLCN6GM6XrHVD7CdRr4UlFv12i4EXrnIhscwq+dpdGYZ+QLAl/Q1gqnHLXmElHQQ9oG37I0YH3XYLMUGScEFqRyFLYkCB3SKozWCBFRz4fMh5Zt3UVawdy+42zJhNmghggG1NFQQplljPIhevp2wkiDD4h1iBqtQg5+9wK9nRvUp47TrtVxq5cZqohk6RipnKLvRywfvxu/s4nWIZ39x1BmSEcmCCeoScvK5nP859/+MCdessh/+6qX8elPf5bbDsLuqEfhZliYmoJYkDuD7BxkumlRzeP0d67TFAaV1KhNR4xFjTvVBeTcfWxcu8xUvUHgLFdWt1DjHawOWL16hkAJkqZh/53fwdzwWZ45f462KOks3kuj1mLf4Zdw8ern2X94P0qXJF4xuNRn9vA8cvcsIVOs9M7RkTlf+OqTfPXMU6xc2OWtP/B9XGOKVncV7TcIdcj5557jIx/+E77ttQ/xV3/7KdKdVczUAUw4xfj6VfZ32nzh6aucPXuJZ774UX7o7d/P2pbjrtuOcOb8RZrpgPVLG1zYKuh2d+gkY2Z0j+tOwVCyuvolMDW89fR6Jc2GQBDR6/d45zt/+pe/edPVNx/z8yce+9uPf5wPf/C3+cCH/ohI5hjjkNkM//3bfpVfe/+P8ObvfTv9nWM8/rlVnDxPksQVsREFXhnqiWS63kD4EhVLXChYvdJn5bxj7TpIFyFchBOSQEmkEAjpQTikVEgpkQiEkoRSEUqN0gIhJFoHOCkIlCTUAVpJnttcYXPQZTRKkUoCAi9AKkkuHKEQGFeAN2RZRlkYms0aMzMJMwueZsvjSUEqlJA4AcYYQhWCBIlHCoUDFIBwCMBLWRE+6RF4pBRYYwgU4KEQDuUcKAUOhJRo6THegABNhBMWZ0FKj/fgBFgBEofwgIRbW117kELiAWf95FrB++r9rLUGURFILRXGO6QK8M7i8FW/eBCS6t+Fw3tfndN7PB4x+bm5v7YChKiIgBACj6/IrWDyB5NzSrzzCCGxzlXtFVWbS+exOEQQ4KwFIfBOABKBRniBr6bmyaJEYIVHIlAIlJQYa5EBWBMglcULiaPqH4HAmKpOISSlKVAqQgiJMeWtczJp8qRZKATCgxMKKRW4qn4pNfP7mhxfnsYLwUPf8bYX7Zh91be+4bHnLlzlXT/1Vj72yceJ4ghlc0rT4o2v+UH++Tu/k5d96+sY78xy6ukb6LBLGGiMLdEiBy3QytCsNVFYXBTgIk26lbJ1OWP18ha1oEEgGwjnqE3NUDhFIC1KGqxVhFKBkHTaCUFRUA8ENgzx3hOUY0wUEUtNXJ8iMDnX8xG9NCdPDUY
"text/plain": [
"<Figure size 864x288 with 3 Axes>"
]
},
"metadata": {
"needs_background": "light"
},
"output_type": "display_data"
}
],
"source": [
"church = PILImage.create(get_image_files_sorted(path/'train'/'n03028079')[0])\n",
"gas = PILImage.create(get_image_files_sorted(path/'train'/'n03425413')[0])\n",
"church = church.resize((256,256))\n",
"gas = gas.resize((256,256))\n",
"tchurch = tensor(church).float() / 255.\n",
"tgas = tensor(gas).float() / 255.\n",
"\n",
"_,axs = plt.subplots(1, 3, figsize=(12,4))\n",
"show_image(tchurch, ax=axs[0]);\n",
"show_image(tgas, ax=axs[1]);\n",
"show_image((0.3*tchurch + 0.7*tgas), ax=axs[2]);"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Label smoothing"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Sidebar: Label smoothing, the paper"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### End sidebar"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Conclusion"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Questionnaire"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Further research\n",
"\n",
"1. Use the fastai documentation to build a function that crops an image to a square in the four corners, then implement a TTA method that averages the predictions on a center crop and those four crops. Did it help? Is it better than the TTA method of fastai?\n",
"1. Find the Mixup paper on arxiv and read it. Pick one or two more recent articles introducing variants of Mixup and read them, then try to implement them on your problem.\n",
"1. Find the script training Imagenette using Mixup and use it as an example to build a script for a long training on your own project. Execute it and see if it helped.\n",
"1. Read the sidebar on the math of label smoothing, and look at the relevant section of the original paper, and see if you can follow it. Don't be afraid to ask for help!"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"jupytext": {
"split_at_heading": true
},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.4"
}
},
"nbformat": 4,
"nbformat_minor": 2
}