From a90b54d1b7f8ee780920e2ccd30692ab8e783f46 Mon Sep 17 00:00:00 2001
From: Sanghyun Son <thstkdgus35@snu.ac.kr>
Date: Thu, 6 Dec 2018 13:58:37 +0900
Subject: [PATCH] fix some typos

---
 README.md             |  5 ++++-
 src/model/__init__.py | 18 ++++++++++--------
 2 files changed, 14 insertions(+), 9 deletions(-)

diff --git a/README.md b/README.md
index 72f97ca..48dae5c 100755
--- a/README.md
+++ b/README.md
@@ -89,6 +89,10 @@ You can evaluate your models with widely-used benchmark datasets:
 
 For these datasets, we first convert the result images to YCbCr color space and evaluate PSNR on the Y channel only. You can download [benchmark datasets](https://cv.snu.ac.kr/research/EDSR/benchmark.tar) (250MB). Set ``--dir_data <where_benchmark_folder_located>`` to evaluate the EDSR and MDSR with the benchmarks.
 
+You can download some results from [here](https://cv.snu.ac.kr/research/EDSR/result_image/edsr-results.tar).
+The link contains **EDSR+_baseline_x4** and **EDSR+_x4**.
+Otherwise, you can easily generate result images with ``demo.sh`` scripts.
+
 ## How to train EDSR and MDSR
 We used [DIV2K](http://www.vision.ee.ethz.ch/%7Etimofter/publications/Agustsson-CVPRW-2017.pdf) dataset to train our model. Please download it from [here](https://cv.snu.ac.kr/research/EDSR/DIV2K.tar) (7.1GB).
 
@@ -128,7 +132,6 @@ sh demo.sh
 
 * Feb 23, 2018
   * Now PyTorch 0.3.1 is default. Use legacy/0.3.0 branch if you use the old version.
-   
   * With a new ``src/data/DIV2K.py`` code, one can easily create new data class for super-resolution.
   * New binary data pack. (Please remove the ``DIV2K_decoded`` folder from your dataset if you have.)
   * With ``--ext bin``, this code will automatically generates and saves the binary data pack that corresponds to previous ``DIV2K_decoded``. (This requires huge RAM (~45GB, Swap can be used.), so please be careful.)
diff --git a/src/model/__init__.py b/src/model/__init__.py
index dca13ea..f1a1e03 100644
--- a/src/model/__init__.py
+++ b/src/model/__init__.py
@@ -42,7 +42,7 @@ class Model(nn.Module):
 
         if self.training:
             if self.n_GPUs > 1:
-                return P.data_parallel(self.model, x, range(self.n_GPUs)
+                return P.data_parallel(self.model, x, range(self.n_GPUs))
             else:
                 return self.model(x)
         else:
@@ -139,16 +139,18 @@ class Model(nn.Module):
                 else:
                     for y_chop, _y in zip(y_chops, y): y_chop.append(_y)
 
-        top = slice(0, scale * h//2)
-        bottom = slice(scale * (h - h//2), scale * h)
-        bottom_r = slice(scale* (h//2 - h), None)
-        left = slice(0, scale * w//2)
-        right = slice(scale * (w - w//2), scale * w)
-        right_r = slice(scale * w//2, None)
+        h *= scale
+        w *= scale
+        top = slice(0, h//2)
+        bottom = slice(h - h//2, h)
+        bottom_r = slice(h//2 - h, None)
+        left = slice(0, w//2)
+        right = slice(w - w//2, w)
+        right_r = slice(w//2 - w, None)
 
         # batch size, number of color channels
         b, c = y_chops[0][0].size()[:-2]
-        y = [y_chop[0].new(b, c, scale * h, scale * w) for y_chop in y_chops]
+        y = [y_chop[0].new(b, c, h, w) for y_chop in y_chops]
         for y_chop, _y in zip(y_chops, y):
             _y[..., top, left] = y_chop[0][..., top, left]
             _y[..., top, right] = y_chop[1][..., top, right_r]
-- 
GitLab