mirror of
https://github.com/fastai/fastbook.git
synced 2025-04-04 01:40:44 +00:00
473 lines
13 KiB
Plaintext
473 lines
13 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"#hide\n",
|
|
"! [ -e /content ] && pip install -Uqq fastai # upgrade fastai on colab\n",
|
|
"import fastbook\n",
|
|
"fastbook.setup_book()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"#hide\n",
|
|
"from fastbook import *"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"# The Training Process"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Establishing a Baseline"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def get_data(url, presize, resize):\n",
|
|
" path = untar_data(url)\n",
|
|
" return DataBlock(\n",
|
|
" blocks=(ImageBlock, CategoryBlock), get_items=get_image_files, \n",
|
|
" splitter=GrandparentSplitter(valid_name='val'),\n",
|
|
" get_y=parent_label, item_tfms=Resize(presize),\n",
|
|
" batch_tfms=[*aug_transforms(min_scale=0.5, size=resize),\n",
|
|
" Normalize.from_stats(*imagenet_stats)],\n",
|
|
" ).dataloaders(path, bs=128)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"dls = get_data(URLs.IMAGENETTE_160, 160, 128)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def get_learner(**kwargs):\n",
|
|
" return vision_learner(dls, resnet34, pretrained=False,\n",
|
|
" metrics=accuracy, **kwargs).to_fp16()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"learn = get_learner()\n",
|
|
"learn.fit_one_cycle(3, 0.003)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"learn = get_learner(opt_func=SGD)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"learn.lr_find()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"learn.fit_one_cycle(3, 0.03, moms=(0,0,0))"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## A Generic Optimizer"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def sgd_cb(p, lr, **kwargs): p.data.add_(-lr, p.grad.data)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"opt_func = partial(Optimizer, cbs=[sgd_cb])"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"learn = get_learner(opt_func=opt_func)\n",
|
|
"learn.fit(3, 0.03)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Momentum"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"x = np.linspace(-4, 4, 100)\n",
|
|
"y = 1 - (x/3) ** 2\n",
|
|
"x1 = x + np.random.randn(100) * 0.1\n",
|
|
"y1 = y + np.random.randn(100) * 0.1\n",
|
|
"plt.scatter(x1,y1)\n",
|
|
"idx = x1.argsort()\n",
|
|
"beta,avg,res = 0.7,0,[]\n",
|
|
"for i in idx:\n",
|
|
" avg = beta * avg + (1-beta) * y1[i]\n",
|
|
" res.append(avg/(1-beta**(i+1)))\n",
|
|
"plt.plot(x1[idx],np.array(res), color='red');"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"x = np.linspace(-4, 4, 100)\n",
|
|
"y = 1 - (x/3) ** 2\n",
|
|
"x1 = x + np.random.randn(100) * 0.1\n",
|
|
"y1 = y + np.random.randn(100) * 0.1\n",
|
|
"_,axs = plt.subplots(2,2, figsize=(12,8))\n",
|
|
"betas = [0.5,0.7,0.9,0.99]\n",
|
|
"idx = x1.argsort()\n",
|
|
"for beta,ax in zip(betas, axs.flatten()):\n",
|
|
" ax.scatter(x1,y1)\n",
|
|
" avg,res = 0,[]\n",
|
|
" for i in idx:\n",
|
|
" avg = beta * avg + (1-beta) * y1[i]\n",
|
|
" res.append(avg)#/(1-beta**(i+1)))\n",
|
|
" ax.plot(x1[idx],np.array(res), color='red');\n",
|
|
" ax.set_title(f'beta={beta}')"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def average_grad(p, mom, grad_avg=None, **kwargs):\n",
|
|
" if grad_avg is None: grad_avg = torch.zeros_like(p.grad.data)\n",
|
|
" return {'grad_avg': grad_avg*mom + p.grad.data}"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def momentum_step(p, lr, grad_avg, **kwargs): p.data.add_(-lr, grad_avg)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"opt_func = partial(Optimizer, cbs=[average_grad,momentum_step], mom=0.9)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"learn = get_learner(opt_func=opt_func)\n",
|
|
"learn.fit_one_cycle(3, 0.03)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"learn.recorder.plot_sched()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## RMSProp"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def average_sqr_grad(p, sqr_mom, sqr_avg=None, **kwargs):\n",
|
|
" if sqr_avg is None: sqr_avg = torch.zeros_like(p.grad.data)\n",
|
|
" return {'sqr_avg': sqr_mom*sqr_avg + (1-sqr_mom)*p.grad.data**2}"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def rms_prop_step(p, lr, sqr_avg, eps, grad_avg=None, **kwargs):\n",
|
|
" denom = sqr_avg.sqrt().add_(eps)\n",
|
|
" p.data.addcdiv_(-lr, p.grad, denom)\n",
|
|
"\n",
|
|
"opt_func = partial(Optimizer, cbs=[average_sqr_grad,rms_prop_step],\n",
|
|
" sqr_mom=0.99, eps=1e-7)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"learn = get_learner(opt_func=opt_func)\n",
|
|
"learn.fit_one_cycle(3, 0.003)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Adam"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Decoupled Weight Decay"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Callbacks"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Creating a Callback"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"class ModelResetter(Callback):\n",
|
|
" def begin_train(self): self.model.reset()\n",
|
|
" def begin_validate(self): self.model.reset()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"class RNNRegularizer(Callback):\n",
|
|
" def __init__(self, alpha=0., beta=0.): self.alpha,self.beta = alpha,beta\n",
|
|
"\n",
|
|
" def after_pred(self):\n",
|
|
" self.raw_out,self.out = self.pred[1],self.pred[2]\n",
|
|
" self.learn.pred = self.pred[0]\n",
|
|
"\n",
|
|
" def after_loss(self):\n",
|
|
" if not self.training: return\n",
|
|
" if self.alpha != 0.:\n",
|
|
" self.learn.loss += self.alpha * self.out[-1].float().pow(2).mean()\n",
|
|
" if self.beta != 0.:\n",
|
|
" h = self.raw_out[-1]\n",
|
|
" if len(h)>1:\n",
|
|
" self.learn.loss += self.beta * (h[:,1:] - h[:,:-1]\n",
|
|
" ).float().pow(2).mean()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Callback Ordering and Exceptions"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"class TerminateOnNaNCallback(Callback):\n",
|
|
" run_before=Recorder\n",
|
|
" def after_batch(self):\n",
|
|
" if torch.isinf(self.loss) or torch.isnan(self.loss):\n",
|
|
" raise CancelFitException"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Conclusion"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Questionnaire"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"1. What is the equation for a step of SGD, in math or code (as you prefer)?\n",
|
|
"1. What do we pass to `vision_learner` to use a non-default optimizer?\n",
|
|
"1. What are optimizer callbacks?\n",
|
|
"1. What does `zero_grad` do in an optimizer?\n",
|
|
"1. What does `step` do in an optimizer? How is it implemented in the general optimizer?\n",
|
|
"1. Rewrite `sgd_cb` to use the `+=` operator, instead of `add_`.\n",
|
|
"1. What is \"momentum\"? Write out the equation.\n",
|
|
"1. What's a physical analogy for momentum? How does it apply in our model training settings?\n",
|
|
"1. What does a bigger value for momentum do to the gradients?\n",
|
|
"1. What are the default values of momentum for 1cycle training?\n",
|
|
"1. What is RMSProp? Write out the equation.\n",
|
|
"1. What do the squared values of the gradients indicate?\n",
|
|
"1. How does Adam differ from momentum and RMSProp?\n",
|
|
"1. Write out the equation for Adam.\n",
|
|
"1. Calculate the values of `unbias_avg` and `w.avg` for a few batches of dummy values.\n",
|
|
"1. What's the impact of having a high `eps` in Adam?\n",
|
|
"1. Read through the optimizer notebook in fastai's repo, and execute it.\n",
|
|
"1. In what situations do dynamic learning rate methods like Adam change the behavior of weight decay?\n",
|
|
"1. What are the four steps of a training loop?\n",
|
|
"1. Why is using callbacks better than writing a new training loop for each tweak you want to add?\n",
|
|
"1. What aspects of the design of fastai's callback system make it as flexible as copying and pasting bits of code?\n",
|
|
"1. How can you get the list of events available to you when writing a callback?\n",
|
|
"1. Write the `ModelResetter` callback (without peeking).\n",
|
|
"1. How can you access the necessary attributes of the training loop inside a callback? When can you use or not use the shortcuts that go with them?\n",
|
|
"1. How can a callback influence the control flow of the training loop?\n",
|
|
"1. Write the `TerminateOnNaN` callback (without peeking, if possible).\n",
|
|
"1. How do you make sure your callback runs after or before another callback?"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Further Research"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"1. Look up the \"Rectified Adam\" paper, implement it using the general optimizer framework, and try it out. Search for other recent optimizers that work well in practice, and pick one to implement.\n",
|
|
"1. Look at the mixed-precision callback with the documentation. Try to understand what each event and line of code does.\n",
|
|
"1. Implement your own version of the learning rate finder from scratch. Compare it with fastai's version.\n",
|
|
"1. Look at the source code of the callbacks that ship with fastai. See if you can find one that's similar to what you're looking to do, to get some inspiration."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Foundations of Deep Learning: Wrap up"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"Congratulations, you have made it to the end of the \"foundations of deep learning\" section of the book! You now understand how all of fastai's applications and most important architectures are built, and the recommended ways to train them—and you have all the information you need to build these from scratch. While you probably won't need to create your own training loop, or batchnorm layer, for instance, knowing what is going on behind the scenes is very helpful for debugging, profiling, and deploying your solutions.\n",
|
|
"\n",
|
|
"Since you understand the foundations of fastai's applications now, be sure to spend some time digging through the source notebooks and running and experimenting with parts of them. This will give you a better idea of how everything in fastai is developed.\n",
|
|
"\n",
|
|
"In the next section, we will be looking even further under the covers: we'll explore how the actual forward and backward passes of a neural network are done, and we will see what tools are at our disposal to get better performance. We will then continue with a project that brings together all the material in the book, which we will use to build a tool for interpreting convolutional neural networks. Last but not least, we'll finish by building fastai's `Learner` class from scratch."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
}
|
|
],
|
|
"metadata": {
|
|
"jupytext": {
|
|
"split_at_heading": true
|
|
},
|
|
"kernelspec": {
|
|
"display_name": "Python 3",
|
|
"language": "python",
|
|
"name": "python3"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 2
|
|
}
|