From 3acc79d59a5d345b40325b843fcf44b408aff2a1 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger Date: Wed, 20 May 2020 08:19:05 -0700 Subject: [PATCH] Last updates --- 12_nlp_dive.ipynb | 12 ++++++------ 16_accel_sgd.ipynb | 4 ++-- app_blog.ipynb | 2 +- clean/12_nlp_dive.ipynb | 12 ++++++------ clean/16_accel_sgd.ipynb | 2 +- 5 files changed, 16 insertions(+), 16 deletions(-) diff --git a/12_nlp_dive.ipynb b/12_nlp_dive.ipynb index 607ec13..7be1f7d 100644 --- a/12_nlp_dive.ipynb +++ b/12_nlp_dive.ipynb @@ -975,7 +975,7 @@ ], "source": [ "learn = Learner(dls, LMModel3(len(vocab), 64), loss_func=F.cross_entropy,\n", - " metrics=accuracy, cbs=ModelReseter)\n", + " metrics=accuracy, cbs=ModelResetter)\n", "learn.fit_one_cycle(10, 3e-3)" ] }, @@ -1250,7 +1250,7 @@ ], "source": [ "learn = Learner(dls, LMModel4(len(vocab), 64), loss_func=loss_func,\n", - " metrics=accuracy, cbs=ModelReseter)\n", + " metrics=accuracy, cbs=ModelResetter)\n", "learn.fit_one_cycle(15, 3e-3)" ] }, @@ -1478,7 +1478,7 @@ "source": [ "learn = Learner(dls, LMModel5(len(vocab), 64, 2), \n", " loss_func=CrossEntropyLossFlat(), \n", - " metrics=accuracy, cbs=ModelReseter)\n", + " metrics=accuracy, cbs=ModelResetter)\n", "learn.fit_one_cycle(15, 3e-3)" ] }, @@ -1871,7 +1871,7 @@ "source": [ "learn = Learner(dls, LMModel6(len(vocab), 64, 2), \n", " loss_func=CrossEntropyLossFlat(), \n", - " metrics=accuracy, cbs=ModelReseter)\n", + " metrics=accuracy, cbs=ModelResetter)\n", "learn.fit_one_cycle(15, 1e-2)" ] }, @@ -2065,7 +2065,7 @@ "source": [ "learn = Learner(dls, LMModel7(len(vocab), 64, 2, 0.5),\n", " loss_func=CrossEntropyLossFlat(), metrics=accuracy,\n", - " cbs=[ModelReseter, RNNRegularizer(alpha=2, beta=1)])" + " cbs=[ModelResetter, RNNRegularizer(alpha=2, beta=1)])" ] }, { @@ -2285,7 +2285,7 @@ "1. Why can maintaining the hidden state in an RNN lead to memory and performance problems? How do we fix this problem?\n", "1. What is \"BPTT\"?\n", "1. Write code to print out the first few batches of the validation set, including converting the token IDs back into English strings, as we showed for batches of IMDb data in <>.\n", - "1. What does the `ModelReseter` callback do? Why do we need it?\n", + "1. What does the `ModelResetter` callback do? Why do we need it?\n", "1. What are the downsides of predicting just one output word for each three input words?\n", "1. Why do we need a custom loss function for `LMModel4`?\n", "1. Why is the training of `LMModel4` unstable?\n", diff --git a/16_accel_sgd.ipynb b/16_accel_sgd.ipynb index 447c0af..0b7863d 100644 --- a/16_accel_sgd.ipynb +++ b/16_accel_sgd.ipynb @@ -1037,7 +1037,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Let's take a look at an example. Do you recall how in <> we needed to ensure that our special `reset` method was called at the start of training and validation for each epoch? We used the `ModelReseter` callback provided by fastai to do this for us. But how does it owrk? Here's the full source code for that class:" + "Let's take a look at an example. Do you recall how in <> we needed to ensure that our special `reset` method was called at the start of training and validation for each epoch? We used the `ModelResetter` callback provided by fastai to do this for us. But how does it owrk? Here's the full source code for that class:" ] }, { @@ -1046,7 +1046,7 @@ "metadata": {}, "outputs": [], "source": [ - "class ModelReseter(Callback):\n", + "class ModelResetter(Callback):\n", " def begin_train(self): self.model.reset()\n", " def begin_validate(self): self.model.reset()" ] diff --git a/app_blog.ipynb b/app_blog.ipynb index d5b28a6..de0c290 100644 --- a/app_blog.ipynb +++ b/app_blog.ipynb @@ -49,7 +49,7 @@ "source": [ "A great solution is to host your blog on a platform called [GitHub Pages](https://pages.github.com/), which is free, has no ads or pay wall, and makes your data available in a standard way such that you can at any time move your blog to another host. But all the approaches I’ve seen to using GitHub Pages have required knowledge of the command line and arcane tools that only software developers are likely to be familiar with. For instance, GitHub's [own documentation](https://help.github.com/en/github/working-with-github-pages/creating-a-github-pages-site-with-jekyll) on setting up a blog includes a long list of instructions that involve installing the Ruby programming language, using the `git` command-line tool, copying over version numbers, and more—17 steps in total!\n", "\n", - "To cut down the hassle, weve created an easy approach that allows you to use an *entirely browser-based interface* for all your blogging needs. You will be up and running with your new blog within about five minutes. It doesn’t cost anything, and you can easily add your own custom domain to it if you wish to. In this section, we'll explain how to do it, using a template we've created called *fast\\_template*. (NB: be sure to check the [book's website](https://book.fast.ai) for the latest blog recommendations, since new tools are always coming out; for instance, we're currently working with GitHub on creating a new tool called `fastpages` that is a more advanced version of `fast_template` particularly designed for people using Jupyter notebooks)." + "To cut down the hassle, weve created an easy approach that allows you to use an *entirely browser-based interface* for all your blogging needs. You will be up and running with your new blog within about five minutes. It doesn’t cost anything, and you can easily add your own custom domain to it if you wish to. In this section, we'll explain how to do it, using a template we've created called *fast\\_template*. (NB: be sure to check the [book's website](https://book.fast.ai) for the latest blog recommendations, since new tools are always coming out)." ] }, { diff --git a/clean/12_nlp_dive.ipynb b/clean/12_nlp_dive.ipynb index ba96f27..13c8210 100644 --- a/clean/12_nlp_dive.ipynb +++ b/clean/12_nlp_dive.ipynb @@ -626,7 +626,7 @@ ], "source": [ "learn = Learner(dls, LMModel3(len(vocab), 64), loss_func=F.cross_entropy,\n", - " metrics=accuracy, cbs=ModelReseter)\n", + " metrics=accuracy, cbs=ModelResetter)\n", "learn.fit_one_cycle(10, 3e-3)" ] }, @@ -845,7 +845,7 @@ ], "source": [ "learn = Learner(dls, LMModel4(len(vocab), 64), loss_func=loss_func,\n", - " metrics=accuracy, cbs=ModelReseter)\n", + " metrics=accuracy, cbs=ModelResetter)\n", "learn.fit_one_cycle(15, 3e-3)" ] }, @@ -1022,7 +1022,7 @@ "source": [ "learn = Learner(dls, LMModel5(len(vocab), 64, 2), \n", " loss_func=CrossEntropyLossFlat(), \n", - " metrics=accuracy, cbs=ModelReseter)\n", + " metrics=accuracy, cbs=ModelResetter)\n", "learn.fit_one_cycle(15, 3e-3)" ] }, @@ -1303,7 +1303,7 @@ "source": [ "learn = Learner(dls, LMModel6(len(vocab), 64, 2), \n", " loss_func=CrossEntropyLossFlat(), \n", - " metrics=accuracy, cbs=ModelReseter)\n", + " metrics=accuracy, cbs=ModelResetter)\n", "learn.fit_one_cycle(15, 1e-2)" ] }, @@ -1382,7 +1382,7 @@ "source": [ "learn = Learner(dls, LMModel7(len(vocab), 64, 2, 0.5),\n", " loss_func=CrossEntropyLossFlat(), metrics=accuracy,\n", - " cbs=[ModelReseter, RNNRegularizer(alpha=2, beta=1)])" + " cbs=[ModelResetter, RNNRegularizer(alpha=2, beta=1)])" ] }, { @@ -1565,7 +1565,7 @@ "1. Why can maintaining the hidden state in an RNN lead to memory and performance problems? How do we fix this problem?\n", "1. What is \"BPTT\"?\n", "1. Write code to print out the first few batches of the validation set, including converting the token IDs back into English strings, as we showed for batches of IMDb data in <>.\n", - "1. What does the `ModelReseter` callback do? Why do we need it?\n", + "1. What does the `ModelResetter` callback do? Why do we need it?\n", "1. What are the downsides of predicting just one output word for each three input words?\n", "1. Why do we need a custom loss function for `LMModel4`?\n", "1. Why is the training of `LMModel4` unstable?\n", diff --git a/clean/16_accel_sgd.ipynb b/clean/16_accel_sgd.ipynb index d71d5af..9bf2b01 100644 --- a/clean/16_accel_sgd.ipynb +++ b/clean/16_accel_sgd.ipynb @@ -614,7 +614,7 @@ "metadata": {}, "outputs": [], "source": [ - "class ModelReseter(Callback):\n", + "class ModelResetter(Callback):\n", " def begin_train(self): self.model.reset()\n", " def begin_validate(self): self.model.reset()" ]