diff --git a/README.md b/README.md
index 72f97ca27ce3c6def57937ed7002a260d2de00cd..48dae5cfcb8978444b6dd5ca5793e2edfb7e6ddd 100755
--- a/README.md
+++ b/README.md
@@ -89,6 +89,10 @@ You can evaluate your models with widely-used benchmark datasets:
 
 For these datasets, we first convert the result images to YCbCr color space and evaluate PSNR on the Y channel only. You can download [benchmark datasets](https://cv.snu.ac.kr/research/EDSR/benchmark.tar) (250MB). Set ``--dir_data <where_benchmark_folder_located>`` to evaluate the EDSR and MDSR with the benchmarks.
 
+You can download some results from [here](https://cv.snu.ac.kr/research/EDSR/result_image/edsr-results.tar).
+The link contains **EDSR+_baseline_x4** and **EDSR+_x4**.
+Otherwise, you can easily generate result images with ``demo.sh`` scripts.
+
 ## How to train EDSR and MDSR
 We used [DIV2K](http://www.vision.ee.ethz.ch/%7Etimofter/publications/Agustsson-CVPRW-2017.pdf) dataset to train our model. Please download it from [here](https://cv.snu.ac.kr/research/EDSR/DIV2K.tar) (7.1GB).
 
@@ -128,7 +132,6 @@ sh demo.sh
 
 * Feb 23, 2018
   * Now PyTorch 0.3.1 is default. Use legacy/0.3.0 branch if you use the old version.
-   
   * With a new ``src/data/DIV2K.py`` code, one can easily create new data class for super-resolution.
   * New binary data pack. (Please remove the ``DIV2K_decoded`` folder from your dataset if you have.)
   * With ``--ext bin``, this code will automatically generates and saves the binary data pack that corresponds to previous ``DIV2K_decoded``. (This requires huge RAM (~45GB, Swap can be used.), so please be careful.)
diff --git a/src/model/__init__.py b/src/model/__init__.py
index dca13eae6cf659e6d181d2ff6106d7e8e2ad4d0e..f1a1e035f3625b3dc280a7e44f7d387eb9d8ccaa 100644
--- a/src/model/__init__.py
+++ b/src/model/__init__.py
@@ -42,7 +42,7 @@ class Model(nn.Module):
 
         if self.training:
             if self.n_GPUs > 1:
-                return P.data_parallel(self.model, x, range(self.n_GPUs)
+                return P.data_parallel(self.model, x, range(self.n_GPUs))
             else:
                 return self.model(x)
         else:
@@ -139,16 +139,18 @@ class Model(nn.Module):
                 else:
                     for y_chop, _y in zip(y_chops, y): y_chop.append(_y)
 
-        top = slice(0, scale * h//2)
-        bottom = slice(scale * (h - h//2), scale * h)
-        bottom_r = slice(scale* (h//2 - h), None)
-        left = slice(0, scale * w//2)
-        right = slice(scale * (w - w//2), scale * w)
-        right_r = slice(scale * w//2, None)
+        h *= scale
+        w *= scale
+        top = slice(0, h//2)
+        bottom = slice(h - h//2, h)
+        bottom_r = slice(h//2 - h, None)
+        left = slice(0, w//2)
+        right = slice(w - w//2, w)
+        right_r = slice(w//2 - w, None)
 
         # batch size, number of color channels
         b, c = y_chops[0][0].size()[:-2]
-        y = [y_chop[0].new(b, c, scale * h, scale * w) for y_chop in y_chops]
+        y = [y_chop[0].new(b, c, h, w) for y_chop in y_chops]
         for y_chop, _y in zip(y_chops, y):
             _y[..., top, left] = y_chop[0][..., top, left]
             _y[..., top, right] = y_chop[1][..., top, right_r]