diff --git a/vgg16.ipynb b/vgg16.ipynb new file mode 100644 index 0000000..1ab2e73 --- /dev/null +++ b/vgg16.ipynb @@ -0,0 +1,659 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Задание \"Анализ изображений\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Запуск из Docker gcr.io/tensorflow/tensorflow\n", + "* докер запускает Jupyter Notebook с рабочей папкой /notebooks\n", + "* в докере не хватает библиотек sklearn и Pillow\n", + "\n", + "Рекомендуется запускать докер командой\n", + "\n", + "docker run -it -p 127.0.0.1:8888:8888 -v $PWD:/notebooks gcr.io/tensorflow/tensorflow\n", + "\n", + "при запуске из папки, где лежит данный ноутбук и все нужные файлы, они сразу окажутся в рабочей папке Jupyter \n", + "Notebook.\n", + "\n", + "Следующие две ячейки содержат команды, доустанавливающие нужные библиотеки." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "!pip install sklearn" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "!pip install pillow" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Импортируем всё, что нам нужно для работы." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "# inspired by\n", + "# http://www.cs.toronto.edu/~frossard/post/vgg16/ \n", + "# Model from https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md #\n", + "# Weights from Caffe converted using https://github.com/ethereon/caffe-tensorflow #\n", + "\n", + "import glob\n", + "import os\n", + "import tensorflow as tf\n", + "import numpy as np\n", + "from scipy.misc import imread, imresize\n", + "from imagenet_classes import class_names\n", + "import sys\n", + "from sklearn.svm import SVC" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "В этом классе содержится описание модели VGG - структура, инициализация, загрузка весов. Следует помнить - пока не запущена сессия Tensorflow, никакой реальной работы не производится." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "class vgg16:\n", + " def __init__(self, imgs, weights=None, sess=None):\n", + " self.imgs = imgs\n", + " self.convlayers()\n", + " self.fc_layers()\n", + " self.probs = tf.nn.softmax(self.fc3l)\n", + " if weights is not None and sess is not None:\n", + " self.load_weights(weights, sess)\n", + "\n", + "\n", + " def convlayers(self):\n", + " self.parameters = []\n", + "\n", + " # zero-mean input\n", + " with tf.name_scope('preprocess') as scope:\n", + " mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')\n", + " images = self.imgs-mean\n", + "\n", + " # conv1_1\n", + " with tf.name_scope('conv1_1') as scope:\n", + " kernel = tf.Variable(tf.truncated_normal([3, 3, 3, 64], dtype=tf.float32,\n", + " stddev=1e-1), name='weights')\n", + " conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')\n", + " biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),\n", + " trainable=True, name='biases')\n", + " out = tf.nn.bias_add(conv, biases)\n", + " self.conv1_1 = tf.nn.relu(out, name=scope)\n", + " self.parameters += [kernel, biases]\n", + "\n", + " # conv1_2\n", + " with tf.name_scope('conv1_2') as scope:\n", + " kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 64], dtype=tf.float32,\n", + " stddev=1e-1), name='weights')\n", + " conv = tf.nn.conv2d(self.conv1_1, kernel, [1, 1, 1, 1], padding='SAME')\n", + " biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),\n", + " trainable=True, name='biases')\n", + " out = tf.nn.bias_add(conv, biases)\n", + " self.conv1_2 = tf.nn.relu(out, name=scope)\n", + " self.parameters += [kernel, biases]\n", + "\n", + " # pool1\n", + " self.pool1 = tf.nn.max_pool(self.conv1_2,\n", + " ksize=[1, 2, 2, 1],\n", + " strides=[1, 2, 2, 1],\n", + " padding='SAME',\n", + " name='pool1')\n", + "\n", + " # conv2_1\n", + " with tf.name_scope('conv2_1') as scope:\n", + " kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128], dtype=tf.float32,\n", + " stddev=1e-1), name='weights')\n", + " conv = tf.nn.conv2d(self.pool1, kernel, [1, 1, 1, 1], padding='SAME')\n", + " biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32),\n", + " trainable=True, name='biases')\n", + " out = tf.nn.bias_add(conv, biases)\n", + " self.conv2_1 = tf.nn.relu(out, name=scope)\n", + " self.parameters += [kernel, biases]\n", + "\n", + " # conv2_2\n", + " with tf.name_scope('conv2_2') as scope:\n", + " kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 128], dtype=tf.float32,\n", + " stddev=1e-1), name='weights')\n", + " conv = tf.nn.conv2d(self.conv2_1, kernel, [1, 1, 1, 1], padding='SAME')\n", + " biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32),\n", + " trainable=True, name='biases')\n", + " out = tf.nn.bias_add(conv, biases)\n", + " self.conv2_2 = tf.nn.relu(out, name=scope)\n", + " self.parameters += [kernel, biases]\n", + "\n", + " # pool2\n", + " self.pool2 = tf.nn.max_pool(self.conv2_2,\n", + " ksize=[1, 2, 2, 1],\n", + " strides=[1, 2, 2, 1],\n", + " padding='SAME',\n", + " name='pool2')\n", + "\n", + " # conv3_1\n", + " with tf.name_scope('conv3_1') as scope:\n", + " kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 256], dtype=tf.float32,\n", + " stddev=1e-1), name='weights')\n", + " conv = tf.nn.conv2d(self.pool2, kernel, [1, 1, 1, 1], padding='SAME')\n", + " biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),\n", + " trainable=True, name='biases')\n", + " out = tf.nn.bias_add(conv, biases)\n", + " self.conv3_1 = tf.nn.relu(out, name=scope)\n", + " self.parameters += [kernel, biases]\n", + "\n", + " # conv3_2\n", + " with tf.name_scope('conv3_2') as scope:\n", + " kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32,\n", + " stddev=1e-1), name='weights')\n", + " conv = tf.nn.conv2d(self.conv3_1, kernel, [1, 1, 1, 1], padding='SAME')\n", + " biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),\n", + " trainable=True, name='biases')\n", + " out = tf.nn.bias_add(conv, biases)\n", + " self.conv3_2 = tf.nn.relu(out, name=scope)\n", + " self.parameters += [kernel, biases]\n", + "\n", + " # conv3_3\n", + " with tf.name_scope('conv3_3') as scope:\n", + " kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32,\n", + " stddev=1e-1), name='weights')\n", + " conv = tf.nn.conv2d(self.conv3_2, kernel, [1, 1, 1, 1], padding='SAME')\n", + " biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),\n", + " trainable=True, name='biases')\n", + " out = tf.nn.bias_add(conv, biases)\n", + " self.conv3_3 = tf.nn.relu(out, name=scope)\n", + " self.parameters += [kernel, biases]\n", + "\n", + " # pool3\n", + " self.pool3 = tf.nn.max_pool(self.conv3_3,\n", + " ksize=[1, 2, 2, 1],\n", + " strides=[1, 2, 2, 1],\n", + " padding='SAME',\n", + " name='pool3')\n", + "\n", + " # conv4_1\n", + " with tf.name_scope('conv4_1') as scope:\n", + " kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 512], dtype=tf.float32,\n", + " stddev=1e-1), name='weights')\n", + " conv = tf.nn.conv2d(self.pool3, kernel, [1, 1, 1, 1], padding='SAME')\n", + " biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),\n", + " trainable=True, name='biases')\n", + " out = tf.nn.bias_add(conv, biases)\n", + " self.conv4_1 = tf.nn.relu(out, name=scope)\n", + " self.parameters += [kernel, biases]\n", + "\n", + " # conv4_2\n", + " with tf.name_scope('conv4_2') as scope:\n", + " kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,\n", + " stddev=1e-1), name='weights')\n", + " conv = tf.nn.conv2d(self.conv4_1, kernel, [1, 1, 1, 1], padding='SAME')\n", + " biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),\n", + " trainable=True, name='biases')\n", + " out = tf.nn.bias_add(conv, biases)\n", + " self.conv4_2 = tf.nn.relu(out, name=scope)\n", + " self.parameters += [kernel, biases]\n", + "\n", + " # conv4_3\n", + " with tf.name_scope('conv4_3') as scope:\n", + " kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,\n", + " stddev=1e-1), name='weights')\n", + " conv = tf.nn.conv2d(self.conv4_2, kernel, [1, 1, 1, 1], padding='SAME')\n", + " biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),\n", + " trainable=True, name='biases')\n", + " out = tf.nn.bias_add(conv, biases)\n", + " self.conv4_3 = tf.nn.relu(out, name=scope)\n", + " self.parameters += [kernel, biases]\n", + "\n", + " # pool4\n", + " self.pool4 = tf.nn.max_pool(self.conv4_3,\n", + " ksize=[1, 2, 2, 1],\n", + " strides=[1, 2, 2, 1],\n", + " padding='SAME',\n", + " name='pool4')\n", + "\n", + " # conv5_1\n", + " with tf.name_scope('conv5_1') as scope:\n", + " kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,\n", + " stddev=1e-1), name='weights')\n", + " conv = tf.nn.conv2d(self.pool4, kernel, [1, 1, 1, 1], padding='SAME')\n", + " biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),\n", + " trainable=True, name='biases')\n", + " out = tf.nn.bias_add(conv, biases)\n", + " self.conv5_1 = tf.nn.relu(out, name=scope)\n", + " self.parameters += [kernel, biases]\n", + "\n", + " # conv5_2\n", + " with tf.name_scope('conv5_2') as scope:\n", + " kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,\n", + " stddev=1e-1), name='weights')\n", + " conv = tf.nn.conv2d(self.conv5_1, kernel, [1, 1, 1, 1], padding='SAME')\n", + " biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),\n", + " trainable=True, name='biases')\n", + " out = tf.nn.bias_add(conv, biases)\n", + " self.conv5_2 = tf.nn.relu(out, name=scope)\n", + " self.parameters += [kernel, biases]\n", + "\n", + " # conv5_3\n", + " with tf.name_scope('conv5_3') as scope:\n", + " kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,\n", + " stddev=1e-1), name='weights')\n", + " conv = tf.nn.conv2d(self.conv5_2, kernel, [1, 1, 1, 1], padding='SAME')\n", + " biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),\n", + " trainable=True, name='biases')\n", + " out = tf.nn.bias_add(conv, biases)\n", + " self.conv5_3 = tf.nn.relu(out, name=scope)\n", + " self.parameters += [kernel, biases]\n", + "\n", + " # pool5\n", + " self.pool5 = tf.nn.max_pool(self.conv5_3,\n", + " ksize=[1, 2, 2, 1],\n", + " strides=[1, 2, 2, 1],\n", + " padding='SAME',\n", + " name='pool4')\n", + "\n", + " def fc_layers(self):\n", + " # fc1\n", + " with tf.name_scope('fc1') as scope:\n", + " shape = int(np.prod(self.pool5.get_shape()[1:]))\n", + " fc1w = tf.Variable(tf.truncated_normal([shape, 4096],\n", + " dtype=tf.float32,\n", + " stddev=1e-1), name='weights')\n", + " fc1b = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32),\n", + " trainable=True, name='biases')\n", + " pool5_flat = tf.reshape(self.pool5, [-1, shape])\n", + " fc1l = tf.nn.bias_add(tf.matmul(pool5_flat, fc1w), fc1b)\n", + " self.fc1 = tf.nn.relu(fc1l)\n", + " self.parameters += [fc1w, fc1b]\n", + "\n", + " # fc2\n", + " with tf.name_scope('fc2') as scope:\n", + " fc2w = tf.Variable(tf.truncated_normal([4096, 4096],\n", + " dtype=tf.float32,\n", + " stddev=1e-1), name='weights')\n", + " fc2b = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32),\n", + " trainable=True, name='biases')\n", + " fc2l = tf.nn.bias_add(tf.matmul(self.fc1, fc2w), fc2b)\n", + " self.fc2 = tf.nn.relu(fc2l)\n", + " self.parameters += [fc2w, fc2b]\n", + "\n", + " # fc3\n", + " with tf.name_scope('fc3') as scope:\n", + " fc3w = tf.Variable(tf.truncated_normal([4096, 1000],\n", + " dtype=tf.float32,\n", + " stddev=1e-1), name='weights')\n", + " fc3b = tf.Variable(tf.constant(1.0, shape=[1000], dtype=tf.float32),\n", + " trainable=True, name='biases')\n", + " self.fc3l = tf.nn.bias_add(tf.matmul(self.fc2, fc3w), fc3b)\n", + " self.parameters += [fc3w, fc3b]\n", + "\n", + " def load_weights(self, weight_file, sess):\n", + " weights = np.load(weight_file)\n", + " keys = sorted(weights.keys())\n", + " for i, k in enumerate(keys):\n", + " print i, k, np.shape(weights[k])\n", + " sess.run(self.parameters[i].assign(weights[k]))\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# Функция сохранения в файл ответа, состоящего из одного числа\n", + "def save_answerNum(fname,number):\n", + " with open(fname,\"w\") as fout:\n", + " fout.write(str(number))" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# Функция сохранения в файл ответа, представленного массивом\n", + "def save_answerArray(fname,array):\n", + " with open(fname,\"w\") as fout:\n", + " fout.write(\" \".join([str(el) for el in array]))" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# Загрузка словаря из текстового файла. Словарь у нас используется для сохранения меток классов в выборке data.\n", + "def load_txt(fname):\n", + " line_dict = {}\n", + " for line in open(fname):\n", + " fname, class_id = line.strip().split()\n", + " line_dict[fname] = class_id\n", + "\n", + " return line_dict" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# Функция обработки отдельного изображения, печатает метки TOP-5 классов и уверенность модели в каждом из них.\n", + "def process_image(fname):\n", + " img1 = imread(fname, mode='RGB')\n", + " img1 = imresize(img1, (224, 224))\n", + " \n", + " prob = sess.run(vgg.probs, feed_dict={vgg.imgs: [img1]})[0]\n", + " preds = (np.argsort(prob)[::-1])[0:5]\n", + " for p in preds:\n", + " print class_names[p], prob[p]" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0 conv1_1_W (3, 3, 3, 64)\n", + "1 conv1_1_b (64,)\n", + "2 conv1_2_W (3, 3, 64, 64)\n", + "3 conv1_2_b (64,)\n", + "4 conv2_1_W (3, 3, 64, 128)\n", + "5 conv2_1_b (128,)\n", + "6 conv2_2_W (3, 3, 128, 128)\n", + "7 conv2_2_b (128,)\n", + "8 conv3_1_W (3, 3, 128, 256)\n", + "9 conv3_1_b (256,)\n", + "10 conv3_2_W (3, 3, 256, 256)\n", + "11 conv3_2_b (256,)\n", + "12 conv3_3_W (3, 3, 256, 256)\n", + "13 conv3_3_b (256,)\n", + "14 conv4_1_W (3, 3, 256, 512)\n", + "15 conv4_1_b (512,)\n", + "16 conv4_2_W (3, 3, 512, 512)\n", + "17 conv4_2_b (512,)\n", + "18 conv4_3_W (3, 3, 512, 512)\n", + "19 conv4_3_b (512,)\n", + "20 conv5_1_W (3, 3, 512, 512)\n", + "21 conv5_1_b (512,)\n", + "22 conv5_2_W (3, 3, 512, 512)\n", + "23 conv5_2_b (512,)\n", + "24 conv5_3_W (3, 3, 512, 512)\n", + "25 conv5_3_b (512,)\n", + "26 fc6_W (25088, 4096)\n", + "27 fc6_b (4096,)\n", + "28 fc7_W (4096, 4096)\n", + "29 fc7_b (4096,)\n", + "30 fc8_W (4096, 1000)\n", + "31 fc8_b (1000,)\n" + ] + } + ], + "source": [ + "# Инициируем TF сессию, и инициализируем модель. На этом шаге модель загружает веса. Веса - это 500Мб в сжатом виде,\n", + "# процесс их загрузки послойно выводится ниже этой ячейки, и если вы увидите этот вывод ещё раз - \n", + "# у вас неистово кончается память. Остановитесь. Также, не запускайте эту ячейку на выполнение больше одного раза\n", + "# за запуск ядра Jupyter.\n", + "sess = tf.Session()\n", + "imgs = tf.placeholder(tf.float32, [None, 224, 224, 3])\n", + "vgg = vgg16(imgs, 'vgg16_weights.npz', sess)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "seashore, coast, seacoast, sea-coast 0.543554\n", + "sandbar, sand bar 0.426626\n", + "bikini, two-piece 0.00509702\n", + "promontory, headland, head, foreland 0.00484154\n", + "sarong 0.00466228\n" + ] + } + ], + "source": [ + "process_image('beach.jpg') " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Все ячейки выше не нуждаются в модификации для выполнения задания, и необходимы к исполнению только один раз, в порядке следования. Повторный запуск ячейки с инициализацией модели будет сжирать память. Вы предупреждены." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Задание 1.\n", + "Необходимо получить уверенность первого класса для изображения train/00002.jpg, и сохранить его в файл, округлив до 1-го знака после запятой." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "# Ваш код здесь\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# save_answerNum(\"vgg16_answer1.txt\", __ )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Задание 2.\n", + "Получить fc2 для картинки train/00002.jpg. Записать в файл первые 20 компонент." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "img1 = imread(fname, mode='RGB')\n", + "img1 = imresize('train/00002.jpg', (224, 224))\n", + "\n", + "# Ваш код здесь\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# save_answerArray(\"vgg16_answer2\", __ )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Задание 3.\n", + "* Доработать функцию get_features, чтобы она возвращала fc2\n", + "* Доработать process_folder, чтобы в ней получались X_test, Y_test\n", + "* Воспользоваться в process_folder классификатором SVC с random_state=0\n", + "* Вывести в файл число правильно предсказанных классов для тестовой выборки" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# Функция, возвращающая признаковое описание для каждого файла jpg в заданной папке\n", + "def get_features(folder, ydict):\n", + "\n", + " paths = glob.glob(folder)\n", + " X = np.zeros((len(paths), 4096))\n", + " Y = np.zeros(len(paths))\n", + "\n", + " for i,img_name in enumerate(paths):\n", + " print img_name\n", + " base = os.path.basename(img_name)\n", + " Y[i] = ydict[base]\n", + "\n", + " img1 = imread(img_name, mode='RGB')\n", + " img1 = imresize(img1, (224, 224))\n", + " # Здесь ваш код. Нужно получить слой fc2\n", + " \n", + " \n", + " X[i, :] = fc2\n", + " \n", + " return X, Y" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "# Функция обработки папки. Ожидается, что в этой папке лежит файл results.txt с метками классов, и \n", + "# имеются подразделы train и test с jpg файлами.\n", + "def process_folder(folder):\n", + " ydict = load_txt(os.path.join(folder,'results.txt'))\n", + " \n", + " X, Y = get_features(os.path.join(folder, 'train/*jpg'), ydict)\n", + " # Ваш код здесь. \n", + " # X_test, Y_test = \n", + "\n", + " # Ваш код здесь. \n", + "\n", + " \n", + " Y_test_pred = clf.predict(X_test)\n", + " print(sum(Y_test == Y_test_pred)) # Число правильно предсказанных классов\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "process_folder('.')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# save_answerNum(\"vgg16_answer3.txt\", __ )" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 2", + "language": "python", + "name": "python2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 1 +}