diff --git a/.DS_Store b/.DS_Store index 46dcf982d03d7e5153dbdef97bdefe0dda9120fd..d62ce2202f18ad4cb67a2693ef88ec149e8ead63 100644 Binary files a/.DS_Store and b/.DS_Store differ diff --git a/week6/.DS_Store b/week6/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..5008ddfcf53c02e82d7eee2e57c38e5672ef89f6 Binary files /dev/null and b/week6/.DS_Store differ diff --git a/week6/note6.pdf b/week6/note 6.pdf similarity index 67% rename from week6/note6.pdf rename to week6/note 6.pdf index 26f43a3a9b71504e9893de2bea08b8fa69239e1e..0f83e36c140f3110b71eefc582c1e9a827efa161 100644 Binary files a/week6/note6.pdf and b/week6/note 6.pdf differ diff --git a/week7,8/.DS_Store b/week7,8/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..ebdf9d55d7d824deabef82a8c6c572772cd2f95f Binary files /dev/null and b/week7,8/.DS_Store differ diff --git a/week7,8/CNN.ipynb b/week7,8/CNN.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..9ef9de7123cd9d547cfa37d1769edaaa7f7906bf --- /dev/null +++ b/week7,8/CNN.ipynb @@ -0,0 +1,496 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/Users/apple1/Projects/NLP/main/m.model\n", + "/Users/apple1/Projects/NLP/main/.DS_Store\n", + "/Users/apple1/Projects/NLP/main/yelp_stopwords(3m).txt\n", + "/Users/apple1/Projects/NLP/main/CNN.ipynb\n", + "/Users/apple1/Projects/NLP/main/df500k.pkl\n", + "/Users/apple1/Projects/NLP/main/df3m.pkl\n", + "/Users/apple1/Projects/NLP/main/eng_w2v2\n", + "/Users/apple1/Projects/NLP/main/yelp_stopwords(500k).txt\n", + "/Users/apple1/Projects/NLP/main/m.vocab\n", + "/Users/apple1/Projects/NLP/main/.ipynb_checkpoints/CNN-checkpoint.ipynb\n" + ] + } + ], + "source": [ + "import os \n", + "path=os.getcwd()\n", + "for dirname, _, filenames in os.walk(path):\n", + " for filename in filenames:\n", + " print(os.path.join(dirname, filename))\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np \n", + "import pandas as pd\n", + "df = pd.read_pickle(\"/Users/apple1/Projects/NLP/main/df500k.pkl\")" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "050000100000150000200000250000300000350000400000450000" + ] + } + ], + "source": [ + "import sentencepiece as spm\n", + "sp = spm.SentencePieceProcessor()\n", + "sp.load('/Users/apple1/Projects/NLP/main/m.model')\n", + "X2=[]\n", + "for i, line in enumerate(df['text']):\n", + " if i%50000==0:\n", + " print(i,end='')\n", + " X2.append(sp.encode_as_pieces(line))" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "리뷰의 최대 길이 : 1983\n", + "리뷰의 평균 길이 : 198.57\n", + "리뷰의 중앙값 길이 : 142.00\n" + ] + } + ], + "source": [ + "len_result = [len(s) for s in X2]\n", + "print('리뷰의 최대 길이 : {}'.format(np.max(len_result)))\n", + "print('리뷰의 평균 길이 : {:.2f}'.format(np.mean(len_result)))\n", + "print('리뷰의 중앙값 길이 : {:.2f}'.format(np.median(len_result)))" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Collecting gensim\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/6e/a5/3ad929fb1f56c32278994c5fd3419df4d53d4579676cc37734a35bacbbb5/gensim-3.8.2-cp37-cp37m-macosx_10_9_x86_64.whl (23.7MB)\n", + "\u001b[K 100% |████████████████████████████████| 23.7MB 719kB/s ta 0:00:011 40% |████████████▉ | 9.5MB 25.1MB/s eta 0:00:01 50% |████████████████▏ | 12.0MB 47.1MB/s eta 0:00:01\n", + "\u001b[?25hCollecting smart-open>=1.8.1 (from gensim)\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/74/77/744c79da6e66691e3500b6dffff29bdd787015eae817d594791edc7b719b/smart_open-2.0.0.tar.gz (103kB)\n", + "\u001b[K 100% |████████████████████████████████| 112kB 10.1MB/s ta 0:00:01\n", + "\u001b[?25hRequirement already satisfied, skipping upgrade: scipy>=1.0.0 in /anaconda3/lib/python3.7/site-packages (from gensim) (1.2.1)\n", + "Requirement already satisfied, skipping upgrade: six>=1.5.0 in /anaconda3/lib/python3.7/site-packages (from gensim) (1.12.0)\n", + "Requirement already satisfied, skipping upgrade: numpy>=1.11.3 in /anaconda3/lib/python3.7/site-packages (from gensim) (1.16.2)\n", + "Requirement already satisfied, skipping upgrade: requests in /anaconda3/lib/python3.7/site-packages (from smart-open>=1.8.1->gensim) (2.21.0)\n", + "Requirement already satisfied, skipping upgrade: boto in /anaconda3/lib/python3.7/site-packages (from smart-open>=1.8.1->gensim) (2.49.0)\n", + "Collecting boto3 (from smart-open>=1.8.1->gensim)\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/c8/1e/587abcd94e8f6dbd42df730f40eb5f7313b6fd7255f5ef5a0db53d116999/boto3-1.13.1-py2.py3-none-any.whl (128kB)\n", + "\u001b[K 100% |████████████████████████████████| 133kB 23.6MB/s ta 0:00:01\n", + "\u001b[?25hRequirement already satisfied, skipping upgrade: chardet<3.1.0,>=3.0.2 in /anaconda3/lib/python3.7/site-packages (from requests->smart-open>=1.8.1->gensim) (3.0.4)\n", + "Requirement already satisfied, skipping upgrade: certifi>=2017.4.17 in /anaconda3/lib/python3.7/site-packages (from requests->smart-open>=1.8.1->gensim) (2019.3.9)\n", + "Requirement already satisfied, skipping upgrade: idna<2.9,>=2.5 in /anaconda3/lib/python3.7/site-packages (from requests->smart-open>=1.8.1->gensim) (2.8)\n", + "Requirement already satisfied, skipping upgrade: urllib3<1.25,>=1.21.1 in /anaconda3/lib/python3.7/site-packages (from requests->smart-open>=1.8.1->gensim) (1.24.1)\n", + "Collecting botocore<1.17.0,>=1.16.1 (from boto3->smart-open>=1.8.1->gensim)\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/46/b8/588f44ac91f280beabd0d5ce192a65f50e32e39ebb2a4193590ccb3afff2/botocore-1.16.1-py2.py3-none-any.whl (6.2MB)\n", + "\u001b[K 100% |████████████████████████████████| 6.2MB 1.5MB/s ta 0:00:011\n", + "\u001b[?25hCollecting s3transfer<0.4.0,>=0.3.0 (from boto3->smart-open>=1.8.1->gensim)\n", + "\u001b[?25l Downloading https://files.pythonhosted.org/packages/69/79/e6afb3d8b0b4e96cefbdc690f741d7dd24547ff1f94240c997a26fa908d3/s3transfer-0.3.3-py2.py3-none-any.whl (69kB)\n", + "\u001b[K 100% |████████████████████████████████| 71kB 5.2MB/s eta 0:00:01\n", + "\u001b[?25hCollecting jmespath<1.0.0,>=0.7.1 (from boto3->smart-open>=1.8.1->gensim)\n", + " Downloading https://files.pythonhosted.org/packages/a3/43/1e939e1fcd87b827fe192d0c9fc25b48c5b3368902bfb913de7754b0dc03/jmespath-0.9.5-py2.py3-none-any.whl\n", + "Requirement already satisfied, skipping upgrade: python-dateutil<3.0.0,>=2.1 in /anaconda3/lib/python3.7/site-packages (from botocore<1.17.0,>=1.16.1->boto3->smart-open>=1.8.1->gensim) (2.8.0)\n", + "Requirement already satisfied, skipping upgrade: docutils<0.16,>=0.10 in /anaconda3/lib/python3.7/site-packages (from botocore<1.17.0,>=1.16.1->boto3->smart-open>=1.8.1->gensim) (0.14)\n", + "Building wheels for collected packages: smart-open\n", + " Building wheel for smart-open (setup.py) ... \u001b[?25ldone\n", + "\u001b[?25h Stored in directory: /Users/apple1/Library/Caches/pip/wheels/27/65/38/8d7f5fe8d7afb4e4566587b2d1933cec185fba19257836c943\n", + "Successfully built smart-open\n", + "Installing collected packages: jmespath, botocore, s3transfer, boto3, smart-open, gensim\n", + "Successfully installed boto3-1.13.1 botocore-1.16.1 gensim-3.8.2 jmespath-0.9.5 s3transfer-0.3.3 smart-open-2.0.0\n" + ] + } + ], + "source": [ + "!pip install -U gensim" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "from gensim.models import KeyedVectors\n", + "w2v_model = KeyedVectors.load_word2vec_format(\"/Users/apple1/Projects/NLP/main/eng_w2v2\")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Using TensorFlow backend.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0\n", + "50000\n", + "100000\n", + "150000\n", + "200000\n", + "250000\n", + "300000\n", + "350000\n", + "400000\n", + "450000\n" + ] + } + ], + "source": [ + "from keras.preprocessing import sequence\n", + "maxlen=140\n", + "X3=[]\n", + "for index, i in enumerate(X2):\n", + " if index % 50000 == 0: print(index)\n", + " embedding=[w2v_model[i]]\n", + " #한 문장이 140 단어를 가지도록 통일\n", + " embedding2=sequence.pad_sequences(embedding,maxlen=maxlen,dtype='float', padding='post',truncating='post')\n", + " embedding3=embedding2.reshape((maxlen,50))\n", + " #한 문장이 sample 한 개 \n", + " X3.append(embedding3)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "50" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "len(X3[0][0])" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.model_selection import train_test_split\n", + "#train data, test data 분리\n", + "x_train, x_test, y_train, y_test = train_test_split(X3, df['stars'], test_size = 0.1, random_state=1)\n", + "x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size = 0.1, random_state=1)" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "405000 50000 405000 50000\n" + ] + } + ], + "source": [ + "print(len(x_train),len(x_test),len(y_train),len(y_test))" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[ 3.58091784, 0.227751 , 0.40667659, ..., -0.58746982,\n", + " 1.26695299, 3.88480759],\n", + " [ 0.51638752, -3.55406499, 0.19539087, ..., 1.84392953,\n", + " -0.13231976, 0.12722342],\n", + " [ 3.07533288, -1.9783324 , 0.26698241, ..., -0.80393356,\n", + " -0.58639371, -0.85393775],\n", + " ...,\n", + " [-7.35684252, 0.77844363, 9.49768543, ..., 4.138906 ,\n", + " 0.36616307, 1.95301187],\n", + " [-2.52157331, -4.51748991, 1.31354392, ..., -1.24818277,\n", + " 0.39844757, 4.4043889 ],\n", + " [-0.67978507, 0.92991155, -2.68850255, ..., -1.59118426,\n", + " 0.21416074, 1.21653318]])" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "submodels = []\n", + "for kw in (2, 3, 4): # kernel sizes\n", + " submodel = Sequential()\n", + " submodel.add(Embedding(len(word_index) + 1,\n", + " EMBEDDING_DIM,\n", + " weights=[embedding_matrix],\n", + " input_length=MAX_SEQUENCE_LENGTH,\n", + " trainable=False))\n", + " submodel.add(Conv1D(FILTERS,\n", + " kw,\n", + " padding='valid',\n", + " activation='relu',\n", + " strides=1))\n", + " submodel.add(GlobalMaxPooling1D())\n", + " submodels.append(submodel)\n", + "big_model = Sequential()\n", + "big_model.add(Merge(submodels, mode=\"concat\"))\n", + "big_model.add(Dense(HIDDEN_DIMS))\n", + "big_model.add(Dropout(P_DROPOUT))\n", + "big_model.add(Activation('relu'))\n", + "big_model.add(Dense(1))\n", + "big_model.add(Activation('sigmoid'))\n", + "print('Compiling model')\n", + "big_model.compile(loss='binary_crossentropy',\n", + " optimizer='adam',\n", + " metrics=['accuracy'])" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2\n", + "3\n", + "4\n" + ] + } + ], + "source": [ + "from keras.models import Sequential\n", + "from tensorflow.keras import datasets, layers, models\n", + "from keras.layers import Dense, Conv2D, Flatten, Concatenate, Dropout, Activation, Input\n", + "from keras.layers import GlobalMaxPool2D\n", + "HIDDEN_DIMS=100\n", + "P_DROPOUT=0.2\n", + "kernel_size=[2,3,4]\n", + "sub_models=[]\n", + "#create model\n", + "for i in kernel_size:\n", + " print(i)\n", + " model = Sequential()\n", + " #add model layers\n", + " model.add(Conv2D(filters=2,kernel_size=(i,50), activation='relu', input_shape=(140,50,1)))\n", + " model.add(GlobalMaxPool2D())\n", + " sub_models.append(model)\n", + "big_model = Sequential()\n", + "big_model.add(Concatenate(sub_models))\n", + "big_model.add(Dense(HIDDEN_DIMS))\n", + "big_model.add(Dropout(P_DROPOUT))\n", + "big_model.add(Activation('relu'))\n", + "big_model.add(Dense(2, activation='softmax'))" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "#compile model using accuracy to measure model performance\n", + "big_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "ename": "AssertionError", + "evalue": "", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mAssertionError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m<ipython-input-13-4e396384ae43>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;31m#train the model\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mbig_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx_train\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my_train\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalidation_data\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx_val\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my_val\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mepochs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;32m/anaconda3/lib/python3.7/site-packages/keras/engine/training.py\u001b[0m in \u001b[0;36mfit\u001b[0;34m(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs)\u001b[0m\n\u001b[1;32m 1152\u001b[0m \u001b[0msample_weight\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msample_weight\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1153\u001b[0m \u001b[0mclass_weight\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mclass_weight\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1154\u001b[0;31m batch_size=batch_size)\n\u001b[0m\u001b[1;32m 1155\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1156\u001b[0m \u001b[0;31m# Prepare validation data.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/anaconda3/lib/python3.7/site-packages/keras/engine/training.py\u001b[0m in \u001b[0;36m_standardize_user_data\u001b[0;34m(self, x, y, sample_weight, class_weight, check_array_lengths, batch_size)\u001b[0m\n\u001b[1;32m 502\u001b[0m \u001b[0;31m# to match the value shapes.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 503\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 504\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_set_inputs\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 505\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 506\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0my\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/anaconda3/lib/python3.7/site-packages/keras/engine/training.py\u001b[0m in \u001b[0;36m_set_inputs\u001b[0;34m(self, inputs, outputs, training)\u001b[0m\n\u001b[1;32m 412\u001b[0m \u001b[0;31m# since `Sequential` depends on `Model`.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 413\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlist\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 414\u001b[0;31m \u001b[0;32massert\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 415\u001b[0m \u001b[0minputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 416\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbuild\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput_shape\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mAssertionError\u001b[0m: " + ] + } + ], + "source": [ + "#train the model\n", + "big_model.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=2)" + ] + }, + { + "cell_type": "code", + "execution_count": 42, + "metadata": {}, + "outputs": [ + { + "ename": "ValueError", + "evalue": "This model has not yet been built. Build the model first by calling build() or calling fit() with some data. Or specify input_shape or batch_input_shape in the first layer for automatic build. ", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m<ipython-input-42-5cf24b5525a2>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mbig_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msummary\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;32m/anaconda3/lib/python3.7/site-packages/keras/engine/network.py\u001b[0m in \u001b[0;36msummary\u001b[0;34m(self, line_length, positions, print_fn)\u001b[0m\n\u001b[1;32m 1318\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbuilt\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1319\u001b[0m raise ValueError(\n\u001b[0;32m-> 1320\u001b[0;31m \u001b[0;34m'This model has not yet been built. '\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1321\u001b[0m \u001b[0;34m'Build the model first by calling build() '\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1322\u001b[0m \u001b[0;34m'or calling fit() with some data. '\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mValueError\u001b[0m: This model has not yet been built. Build the model first by calling build() or calling fit() with some data. Or specify input_shape or batch_input_shape in the first layer for automatic build. " + ] + } + ], + "source": [ + "big_model.summary()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "score = model.evaluate(x_test, y_test, verbose=0)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#predict first 4 images in the test set\n", + "model.predict(x_test)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[name: \"/device:CPU:0\"\n", + " device_type: \"CPU\"\n", + " memory_limit: 268435456\n", + " locality {\n", + " }\n", + " incarnation: 12359569246933039107]" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from tensorflow.python.client import device_lib\n", + "device_lib.list_local_devices()" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [], + "source": [ + "import tensorflow as tf\n", + "config=tf.ConfigProto()\n", + "#메모리 사용량이 60%이상일때, 나머지 40%을 gpu와 병행해서 사용\n", + "config.gpu_options.per_process_gpu_memory_fraction=0.4" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/week7,8/note7,8.docx b/week7,8/note7,8.docx new file mode 100644 index 0000000000000000000000000000000000000000..3096647458c717eab24c05dbe3edd65d34feb108 Binary files /dev/null and b/week7,8/note7,8.docx differ diff --git a/week7,8/~$ote7,8.docx b/week7,8/~$ote7,8.docx new file mode 100644 index 0000000000000000000000000000000000000000..fbf579ef8580d4f2e825f38a1155610390d29c8d Binary files /dev/null and b/week7,8/~$ote7,8.docx differ