From dbd46e99fe4423ff72e6ee7b59244be0cfa81223 Mon Sep 17 00:00:00 2001 From: oscarpocock Date: Mon, 28 Mar 2022 16:46:12 +0100 Subject: [PATCH] Added Weeks 4-8 --- .gitattributes | 1 + content/posts/week-4.md | 10 + content/posts/week-5.md | 12 + content/posts/week-6.md | 16 + content/posts/week-7.md | 16 + content/posts/week-8.md | 30 + public/20-epoch-plot.png | 3 + public/2000-epoch-plot.png | 3 + public/404.html | 2 +- public/categories/index.html | 6 +- public/categories/page/1/index.html | 11 +- public/docs/developer/index.html | 6 +- public/docs/index.html | 6 +- public/docs/user/index.html | 6 +- ...4e8038d26de63339156f8be996cb82fdd8a42dd.js | 1 + ...caee4fd59e7c35b67e065778352b0d790f6b7e6.js | 1 + ...40fe7159f145a30edf28a0847cab90265bd241a.js | 1 + ...52e2046e26585a3339c256e9032788237488197.js | 1 + public/favicon.png | Bin 109 -> 128 bytes public/index.html | 6 +- public/index.xml | 47 +- public/posts/index.html | 105 +- public/posts/index.xml | 47 +- public/posts/page/1/index.html | 11 +- public/posts/week-1/index.html | 6 +- public/posts/week-2/index.html | 108 +- public/posts/week-3/index.html | 76 +- public/posts/week-4/index.html | 65 +- public/posts/week-5/index.html | 296 + public/posts/week-6/index.html | 322 + public/posts/week-7/index.html | 274 + public/posts/week-8/index.html | 329 + public/predictions.png | 3 + public/project-model.svg | 3962 ++++++++++ public/sitemap.xml | 18 +- public/tags/index.html | 6 +- public/tags/page/1/index.html | 11 +- public/transfer-cnn-arch-simpl.svg | 2761 +++++++ public/transfer-cnn-arch.svg | 6675 +++++++++++++++++ static/20-epoch-plot.png | 3 + static/2000-epoch-plot.png | 3 + static/predictions.png | 3 + static/project-model.svg | 3962 ++++++++++ static/transfer-cnn-arch-simpl.svg | 2761 +++++++ static/transfer-cnn-arch.svg | 6675 +++++++++++++++++ 45 files changed, 28499 insertions(+), 168 deletions(-) create mode 100644 .gitattributes create mode 100644 content/posts/week-4.md create mode 100644 content/posts/week-5.md create mode 100644 content/posts/week-6.md create mode 100644 content/posts/week-7.md create mode 100644 content/posts/week-8.md create mode 100644 public/20-epoch-plot.png create mode 100644 public/2000-epoch-plot.png create mode 100644 public/en.search-data.min.3cdd632708ab5b87a1949e77a4e8038d26de63339156f8be996cb82fdd8a42dd.js create mode 100644 public/en.search-data.min.7fa160935458996a6e342c79dcaee4fd59e7c35b67e065778352b0d790f6b7e6.js create mode 100644 public/en.search.min.6bd18a0f048eff9ca273fd5ac40fe7159f145a30edf28a0847cab90265bd241a.js create mode 100644 public/en.search.min.85ef3e958a519745646bb364552e2046e26585a3339c256e9032788237488197.js create mode 100644 public/posts/week-5/index.html create mode 100644 public/posts/week-6/index.html create mode 100644 public/posts/week-7/index.html create mode 100644 public/posts/week-8/index.html create mode 100644 public/predictions.png create mode 100644 public/project-model.svg create mode 100644 public/transfer-cnn-arch-simpl.svg create mode 100644 public/transfer-cnn-arch.svg create mode 100644 static/20-epoch-plot.png create mode 100644 static/2000-epoch-plot.png create mode 100644 static/predictions.png create mode 100644 static/project-model.svg create mode 100644 static/transfer-cnn-arch-simpl.svg create mode 100644 static/transfer-cnn-arch.svg diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..24a8e87 --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +*.png filter=lfs diff=lfs merge=lfs -text diff --git a/content/posts/week-4.md b/content/posts/week-4.md new file mode 100644 index 0000000..c98f8b3 --- /dev/null +++ b/content/posts/week-4.md @@ -0,0 +1,10 @@ +--- +title: "Week 4" +date: 2022-02-27T12:46:59Z +draft: false +--- +This week I did some research into how to build a CNN from scratch, including the different type of layers, loss functions, learning rates, epochs and other core concepts.[^1][^2] +[^1]: deeplizard. "Convolutional Neural Networks (CNNs) explained." (Dec. 9, 2017). Accessed: Feb. 22, 2022. [Online Video]. Available: https://youtube.com/watch?v=YRhxdVk_sIs +[^2]: A Rosebrock. "PyTorch: Training your first Convolutional Neural Network (CNN)". pyimagesearch.com. https://pyimagesearch.com/2021/07/19/pytorch-training-your-first-convolutional-neural-network-cnn/ (accessed Feb. 22, 2022.) + +I also set up and created this blog with [Hugo](https://gohugo.io/) to document my progress and setup [Woodpecker CI](https://woodpecker-ci.org/) to do continuous testing and integration. diff --git a/content/posts/week-5.md b/content/posts/week-5.md new file mode 100644 index 0000000..10b2c3e --- /dev/null +++ b/content/posts/week-5.md @@ -0,0 +1,12 @@ +--- +title: "Week 5" +date: 2022-03-06T12:40:14+01:00 +draft: false +--- +# Starting to write a CNN +This week I started to implement what I had learnt about CNNs in [Week 4](../week-4/). At this point I hadn't designed a CNN architecture to implement, instead I wanted to have a running model regardless of performance just to see if I could implement one and understand it. + +Half way through implementation, I decided to look back at the existing research papers on judging aesthetic judgement to see which aspects of their system and CNN were important to the task. While reading I saw that nearly all research papers on the topic used transfer learning instead of creating their own CNNs from scratch. At this point I stopped writing a CNN from scratch and decided to solve my problem using transfer learning instead. I followed a guide[^1] on implementing transfer learning in pytorch and another guide[^2][^3] on creating a regressive CNN model. +[^1]:A Rosebrock. "PyTorch: Transfer Learning and Image Classification". pyimagesearch.com. https://pyimagesearch.com/2021/10/11/pytorch-transfer-learning-and-image-classification/ (accessed Mar. 1, 2022.) +[^2]:A Rosebrock. "Regression with Keras". pyimagesearch.com. https://pyimagesearch.com/2019/01/21/regression-with-keras/ (accessed Feb. 22, 2022.) +[^3]:A Rosebrock. "Keras, Regression, and CNNs". pyimagesearch.com. https://pyimagesearch.com/2019/01/28/keras-regression-and-cnns/ (accessed Feb. 22, 2022.) diff --git a/content/posts/week-6.md b/content/posts/week-6.md new file mode 100644 index 0000000..453cd3c --- /dev/null +++ b/content/posts/week-6.md @@ -0,0 +1,16 @@ +--- +title: "Week 6" +date: 2022-03-13T12:40:17+01:00 +draft: false +--- + +This week I finished programming the basic CNN model using transfer learning. I decided to train it for 20 epochs to make sure there weren't any runtime errors in my code. As I don't own an Nvidia GPU (I have an AMD GPU), I couldn't make use of the pytorch version that utilised CUDA to speed up processing. There is a RocM version of pytorch for AMD GPUs[^1] but RocM isn't as mature as CUDA and only officially supports a small subset of Linux distributions. Therefore, for this model I trained on the COU for only 20 epochs just to make sure it ran successfully before trying to find some Nvidia compute. +[^1]: https://pytorch.org/blog/pytorch-for-amd-rocm-platform-now-available-as-python-package/ + +## Transfer learning model architecture +### Full +![Transfer learning architecture](/transfer-cnn-arch.svg) +### Simplified +![Simplified Transfer learning architecture](/transfer-cnn-arch-simpl.svg) +## Validation and train loss over 20 epochs +![plot of training and validation loss over 20 epochs](/20-epoch-plot.png) diff --git a/content/posts/week-7.md b/content/posts/week-7.md new file mode 100644 index 0000000..755125a --- /dev/null +++ b/content/posts/week-7.md @@ -0,0 +1,16 @@ +--- +title: "Week 7" +date: 2022-03-20T12:40:18+01:00 +draft: false +--- +Now that I had successfully run my model without any runtime errors, the next step this week was finding some GPU compute so I can train my model on much more powerful hardware to accelerate the training. + +My first idea was to use cloud computing. There are machine learning specific cloud technologies, but I didn't want to use these as I didn't want my code to be dependent on the specific ways cloud platforms want the code in. Instead, I wanted to get a general VM with an attached GPU where I could run my workloads manually. I had already written docker images that contained all the depencies of my code that I could deploy to these VMs to ensure a reproducible and portable environment. + +First place I looked was [Linode](https://www.linode.com/products/gpu/). Although, after I contacted their support about it they said I needed at least $100 of transactions on my account in order to request access to their GPU instances. They also noted I could make a deposit of $100 to start using them straight away. I wasn't sure if my model was going to use up $100 to train yet so I didn't want to risk it. + +I then looked to [Microsoft's Azure](https://azure.microsoft.com). I had used Azure during my industrial year and had previously passed the fundamentals and associate administrator exams for the platform so felt fairly confident in using it for my project. I ran into some issues I couldn't quite explain at the start of the week. For some reason no services were available for me to use. I couldn't use any VMs, create any networks or drives etc... Turns out I noticed that my UK account was defaulting to trying to create resources in the US which I didn't have access to. So I had to manually set my location to the UK in order to create any resources. + +While I was trying to work out the issue with Azure, I looked at using [GCP](https://cloud.google.com). GCP automatically sets new accounts to have a quota of 0 for GPUs. Meaning you can't attach one to any VM. You can increase the quota which requires getting in contact with customer support. Within 10 minutes I got a response and my quota was increases by 1. I wrote [Terraform](https://www.terraform.io/) IaC (Infrastructure as Code) to automatically spin up and spin down cloud resources quickly. + +At this point I realised my code wasn't very portable as it included hard-coded absolute paths among other things. I refactored a lot of my code in order to run it on any machine. I was also granted access to the university's GPU compute servers which allowed me to train my model without paying for cloud fees. To make sure my refactoring worked and that the GPU was actually being utilised during training, I rain the training script on the uni's GPU compute server for 20 epochs. It successfully finished in ~13 minutes. diff --git a/content/posts/week-8.md b/content/posts/week-8.md new file mode 100644 index 0000000..faeaf36 --- /dev/null +++ b/content/posts/week-8.md @@ -0,0 +1,30 @@ +--- +title: "Week 8" +date: 2022-03-27T13:13:48+01:00 +draft: false +--- +# Monday +Towards the end of last week ([Week 7](../week-7)), I managed to refactor my code in order to make it more portable. This allowed me to train my model on different machines. I ran my training script on the uni's GPU compute successfully for 20 epochs. The next stage was to train it for longer and analyse the results. On the Monday morning I adjusted the parameters of my training script to train for 2000 epochs instead. + +# Tuesday +Tuesday afternoon the training had finished and I had a model that was trained on 2000 epochs. This gave me a day to analyse the results and do some rough predictions before my mid-project demo on the Wednesday. +## Training and validation loss graphs +![training and validation loss 20 epochs](/20-epoch-plot.png) +![training and validation loss 2000 epochs](/2000-epoch-plot.png) + +As we can see from the 2000 Epochs graph, the loss seems to plateau at around 60 epochs. The training loss seems to even out with more accuracy than the validation loss. This means that our data isn't fully learning what I want it to. Also it's overfitting slightly as it's better at predicting the training set than the validation set. The variance in the validation set shows that the features it's decided to learn aren't the right features to confidently predict aesthetics in this dataset. + +For the rest of the day I worked on my prediction script so I could use the model to predict new pictures. I also worked on my architecture diagrams and slides for the mid-project demo. + +![images with prediction scores](/predictions.png) + +Due to the nature of how I processed my images (resizing them to 32x32 and then saving them to a tensor then saving them to disk), my prediction script also displayed those down-sized images. This may have also effected the performance of the model. + +# Wednesday +I spent most of Wednesday morning finishing my slides, diagrams and making example predictions using the prediction script. + +![overall project architecture](/project-model.svg) + +# Rest of week +I spent the rest of the week looking at the project's overall pipeline including the non-machine learning filtering. I also started to implement basic focus detection by looking at blur detection using the Laplacian operator[^1]. +[^1]:https://pyimagesearch.com/2015/09/07/blur-detection-with-opencv/ diff --git a/public/20-epoch-plot.png b/public/20-epoch-plot.png new file mode 100644 index 0000000..fdbfa39 --- /dev/null +++ b/public/20-epoch-plot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72405c4fbd139e5018cdacdcd5edf614d2d418ef3313d6e5c1e9fb659be8a5b4 +size 37653 diff --git a/public/2000-epoch-plot.png b/public/2000-epoch-plot.png new file mode 100644 index 0000000..1002b80 --- /dev/null +++ b/public/2000-epoch-plot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91959decf7d6a2a7dec7c4f6b99eea7a4d8ef8dd504b290e516024b48d171e64 +size 33199 diff --git a/public/404.html b/public/404.html index 0bed217..b110836 100644 --- a/public/404.html +++ b/public/404.html @@ -14,7 +14,7 @@ - + + + + + + + + +
+ + +
+
+ +
+ + + Week 5 + + +
+ + + + + + +
+ + + +
+

+ Week 5 +

+ +
March 6, 2022
+ + + + + + + + + +

+ Starting to write a CNN + # +

+

This week I started to implement what I had learnt about CNNs in + Week 4. At this point I hadn’t designed a CNN architecture to implement, instead I wanted to have a running model regardless of performance just to see if I could implement one and understand it.

+

Half way through implementation, I decided to look back at the existing research papers on judging aesthetic judgement to see which aspects of their system and CNN were important to the task. While reading I saw that nearly all research papers on the topic used transfer learning instead of creating their own CNNs from scratch. At this point I stopped writing a CNN from scratch and decided to solve my problem using transfer learning instead. I followed a guide1 on implementing transfer learning in pytorch and another guide23 on creating a regressive CNN model.

+
+
+
    +
  1. +

    A Rosebrock. “PyTorch: Transfer Learning and Image Classification”. pyimagesearch.com. + https://pyimagesearch.com/2021/10/11/pytorch-transfer-learning-and-image-classification/ (accessed Mar. 1, 2022.) ↩︎

    +
  2. +
  3. +

    A Rosebrock. “Regression with Keras”. pyimagesearch.com. + https://pyimagesearch.com/2019/01/21/regression-with-keras/ (accessed Feb. 22, 2022.) ↩︎

    +
  4. +
  5. +

    A Rosebrock. “Keras, Regression, and CNNs”. pyimagesearch.com. + https://pyimagesearch.com/2019/01/28/keras-regression-and-cnns/ (accessed Feb. 22, 2022.) ↩︎

    +
  6. +
+
+
+ + + +
+ +
+ + + + + +
+ + + +
+ + + +
+ +
+ + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + diff --git a/public/posts/week-6/index.html b/public/posts/week-6/index.html new file mode 100644 index 0000000..33b3f81 --- /dev/null +++ b/public/posts/week-6/index.html @@ -0,0 +1,322 @@ + + + + + + + + + + + + + + + +Week 6 | MMP | Oscar Pocock + + + + + + + + + + + + + + +
+ + +
+
+ +
+ + + Week 6 + + +
+ + + + + + +
+ + + +
+

+ Week 6 +

+ +
March 13, 2022
+ + + + + + + + + +

This week I finished programming the basic CNN model using transfer learning. I decided to train it for 20 epochs to make sure there weren’t any runtime errors in my code. As I don’t own an Nvidia GPU (I have an AMD GPU), I couldn’t make use of the pytorch version that utilised CUDA to speed up processing. There is a RocM version of pytorch for AMD GPUs1 but RocM isn’t as mature as CUDA and only officially supports a small subset of Linux distributions. Therefore, for this model I trained on the COU for only 20 epochs just to make sure it ran successfully before trying to find some Nvidia compute.

+

+ Transfer learning model architecture + # +

+

+ Full + # +

+

+ Transfer learning architecture

+

+ Simplified + # +

+

+ Simplified Transfer learning architecture

+

+ Validation and train loss over 20 epochs + # +

+

+ plot of training and validation loss over 20 epochs

+
+
+
    +
  1. +

    + https://pytorch.org/blog/pytorch-for-amd-rocm-platform-now-available-as-python-package/ ↩︎

    +
  2. +
+
+
+ + + +
+ +
+ + + + + +
+ + + +
+ + + +
+ +
+ + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + diff --git a/public/posts/week-7/index.html b/public/posts/week-7/index.html new file mode 100644 index 0000000..3466fb0 --- /dev/null +++ b/public/posts/week-7/index.html @@ -0,0 +1,274 @@ + + + + + + + + + + + + + + + +Week 7 | MMP | Oscar Pocock + + + + + + + + + + + + + + +
+ + +
+
+ +
+ + + Week 7 + + +
+ + + + + + +
+ + + +
+

+ Week 7 +

+ +
March 20, 2022
+ + + + + + + + + +

Now that I had successfully run my model without any runtime errors, the next step this week was finding some GPU compute so I can train my model on much more powerful hardware to accelerate the training.

+

My first idea was to use cloud computing. There are machine learning specific cloud technologies, but I didn’t want to use these as I didn’t want my code to be dependent on the specific ways cloud platforms want the code in. Instead, I wanted to get a general VM with an attached GPU where I could run my workloads manually. I had already written docker images that contained all the depencies of my code that I could deploy to these VMs to ensure a reproducible and portable environment.

+

First place I looked was + Linode. Although, after I contacted their support about it they said I needed at least $100 of transactions on my account in order to request access to their GPU instances. They also noted I could make a deposit of $100 to start using them straight away. I wasn’t sure if my model was going to use up $100 to train yet so I didn’t want to risk it.

+

I then looked to + Microsoft’s Azure. I had used Azure during my industrial year and had previously passed the fundamentals and associate administrator exams for the platform so felt fairly confident in using it for my project. I ran into some issues I couldn’t quite explain at the start of the week. For some reason no services were available for me to use. I couldn’t use any VMs, create any networks or drives etc… Turns out I noticed that my UK account was defaulting to trying to create resources in the US which I didn’t have access to. So I had to manually set my location to the UK in order to create any resources.

+

While I was trying to work out the issue with Azure, I looked at using + GCP. GCP automatically sets new accounts to have a quota of 0 for GPUs. Meaning you can’t attach one to any VM. You can increase the quota which requires getting in contact with customer support. Within 10 minutes I got a response and my quota was increases by 1. I wrote + Terraform IaC (Infrastructure as Code) to automatically spin up and spin down cloud resources quickly.

+

At this point I realised my code wasn’t very portable as it included hard-coded absolute paths among other things. I refactored a lot of my code in order to run it on any machine. I was also granted access to the university’s GPU compute servers which allowed me to train my model without paying for cloud fees. To make sure my refactoring worked and that the GPU was actually being utilised during training, I rain the training script on the uni’s GPU compute server for 20 epochs. It successfully finished in ~13 minutes.

+
+ + + +
+ +
+ + + + + +
+ + + +
+ + + +
+ +
+ + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + diff --git a/public/posts/week-8/index.html b/public/posts/week-8/index.html new file mode 100644 index 0000000..e4de2f6 --- /dev/null +++ b/public/posts/week-8/index.html @@ -0,0 +1,329 @@ + + + + + + + + + + + + + + + +Week 8 | MMP | Oscar Pocock + + + + + + + + + + + + + + +
+ + +
+
+ +
+ + + Week 8 + + +
+ + + + + + +
+ + + +
+

+ Week 8 +

+ +
March 27, 2022
+ + + + + + + + + +

+ Monday + # +

+

Towards the end of last week ( + Week 7), I managed to refactor my code in order to make it more portable. This allowed me to train my model on different machines. I ran my training script on the uni’s GPU compute successfully for 20 epochs. The next stage was to train it for longer and analyse the results. On the Monday morning I adjusted the parameters of my training script to train for 2000 epochs instead.

+

+ Tuesday + # +

+

Tuesday afternoon the training had finished and I had a model that was trained on 2000 epochs. This gave me a day to analyse the results and do some rough predictions before my mid-project demo on the Wednesday.

+

+ Training and validation loss graphs + # +

+

+ training and validation loss 20 epochs + + training and validation loss 2000 epochs

+

As we can see from the 2000 Epochs graph, the loss seems to plateau at around 60 epochs. The training loss seems to even out with more accuracy than the validation loss. This means that our data isn’t fully learning what I want it to. Also it’s overfitting slightly as it’s better at predicting the training set than the validation set. The variance in the validation set shows that the features it’s decided to learn aren’t the right features to confidently predict aesthetics in this dataset.

+

For the rest of the day I worked on my prediction script so I could use the model to predict new pictures. I also worked on my architecture diagrams and slides for the mid-project demo.

+

+ images with prediction scores

+

Due to the nature of how I processed my images (resizing them to 32x32 and then saving them to a tensor then saving them to disk), my prediction script also displayed those down-sized images. This may have also effected the performance of the model.

+

+ Wednesday + # +

+

I spent most of Wednesday morning finishing my slides, diagrams and making example predictions using the prediction script.

+

+ overall project architecture

+

+ Rest of week + # +

+

I spent the rest of the week looking at the project’s overall pipeline including the non-machine learning filtering. I also started to implement basic focus detection by looking at blur detection using the Laplacian operator1.

+
+
+
    +
  1. +

    + https://pyimagesearch.com/2015/09/07/blur-detection-with-opencv/ ↩︎

    +
  2. +
+
+
+ + + +
+ +
+ + + + + +
+ + + +
+ + + +
+ +
+ + + + +
+ + + + +
+ + + + + + + + + + + + + + + + + diff --git a/public/predictions.png b/public/predictions.png new file mode 100644 index 0000000..1fac605 --- /dev/null +++ b/public/predictions.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe9049f11c40a76a23f223c1e0b3e40f0cf327f2d423685475d1b61e88ec9110 +size 58692 diff --git a/public/project-model.svg b/public/project-model.svg new file mode 100644 index 0000000..55d8d8b --- /dev/null +++ b/public/project-model.svg @@ -0,0 +1,3962 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/public/sitemap.xml b/public/sitemap.xml index bdce102..51ffc2a 100644 --- a/public/sitemap.xml +++ b/public/sitemap.xml @@ -7,13 +7,25 @@ https://mmp.oscar.blue/docs/user/ https://mmp.oscar.blue/posts/ - 2022-02-23T12:46:59+00:00 + 2022-03-27T13:13:48+01:00 https://mmp.oscar.blue/ - 2022-02-23T12:46:59+00:00 + 2022-03-27T13:13:48+01:00 + + https://mmp.oscar.blue/posts/week-8/ + 2022-03-27T13:13:48+01:00 + + https://mmp.oscar.blue/posts/week-7/ + 2022-03-20T12:40:18+01:00 + + https://mmp.oscar.blue/posts/week-6/ + 2022-03-13T12:40:17+01:00 + + https://mmp.oscar.blue/posts/week-5/ + 2022-03-06T12:40:14+01:00 https://mmp.oscar.blue/posts/week-4/ - 2022-02-23T12:46:59+00:00 + 2022-02-27T12:46:59+00:00 https://mmp.oscar.blue/posts/week-3/ 2022-02-20T12:46:55+00:00 diff --git a/public/tags/index.html b/public/tags/index.html index be4560e..9649145 100644 --- a/public/tags/index.html +++ b/public/tags/index.html @@ -2,7 +2,7 @@ - + @@ -15,7 +15,7 @@ - + @@ -145,7 +145,7 @@ https://github.com/alex-shpak/hugo-book - + diff --git a/public/tags/page/1/index.html b/public/tags/page/1/index.html index 16a8a4c..c2c93cd 100644 --- a/public/tags/page/1/index.html +++ b/public/tags/page/1/index.html @@ -1 +1,10 @@ -https://mmp.oscar.blue/tags/ \ No newline at end of file + + + + https://mmp.oscar.blue/tags/ + + + + + + diff --git a/public/transfer-cnn-arch-simpl.svg b/public/transfer-cnn-arch-simpl.svg new file mode 100644 index 0000000..a6f1bf8 --- /dev/null +++ b/public/transfer-cnn-arch-simpl.svg @@ -0,0 +1,2761 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/public/transfer-cnn-arch.svg b/public/transfer-cnn-arch.svg new file mode 100644 index 0000000..cc03c58 --- /dev/null +++ b/public/transfer-cnn-arch.svg @@ -0,0 +1,6675 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/static/20-epoch-plot.png b/static/20-epoch-plot.png new file mode 100644 index 0000000..fdbfa39 --- /dev/null +++ b/static/20-epoch-plot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72405c4fbd139e5018cdacdcd5edf614d2d418ef3313d6e5c1e9fb659be8a5b4 +size 37653 diff --git a/static/2000-epoch-plot.png b/static/2000-epoch-plot.png new file mode 100644 index 0000000..1002b80 --- /dev/null +++ b/static/2000-epoch-plot.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91959decf7d6a2a7dec7c4f6b99eea7a4d8ef8dd504b290e516024b48d171e64 +size 33199 diff --git a/static/predictions.png b/static/predictions.png new file mode 100644 index 0000000..1fac605 --- /dev/null +++ b/static/predictions.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe9049f11c40a76a23f223c1e0b3e40f0cf327f2d423685475d1b61e88ec9110 +size 58692 diff --git a/static/project-model.svg b/static/project-model.svg new file mode 100644 index 0000000..55d8d8b --- /dev/null +++ b/static/project-model.svg @@ -0,0 +1,3962 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/static/transfer-cnn-arch-simpl.svg b/static/transfer-cnn-arch-simpl.svg new file mode 100644 index 0000000..a6f1bf8 --- /dev/null +++ b/static/transfer-cnn-arch-simpl.svg @@ -0,0 +1,2761 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/static/transfer-cnn-arch.svg b/static/transfer-cnn-arch.svg new file mode 100644 index 0000000..cc03c58 --- /dev/null +++ b/static/transfer-cnn-arch.svg @@ -0,0 +1,6675 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +