diff --git a/Chap1_CNN_Classification/cnn_keras_cpu.ipynb b/Chap1_CNN_Classification/cnn_keras_cpu.ipynb new file mode 100644 index 0000000..45734b1 --- /dev/null +++ b/Chap1_CNN_Classification/cnn_keras_cpu.ipynb @@ -0,0 +1,211 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import tensorflow.keras as keras\n", + "from tensorflow.keras.models import Sequential\n", + "from tensorflow.keras.layers import Dense, Dropout, Flatten\n", + "from tensorflow.keras.layers import Conv2D, MaxPooling2D" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [], + "source": [ + "#生成虚拟数据, 标准数据集需要GPU,100张100*100RGB数据" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [], + "source": [ + "# 生成虚拟数据\n", + "x_train = np.random.random((100, 100, 100, 3))\n", + "y_train = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)\n", + "x_test = np.random.random((20, 100, 100, 3))\n", + "y_test = keras.utils.to_categorical(np.random.randint(10, size=(20, 1)), num_classes=10)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": {}, + "outputs": [], + "source": [ + "#采用VGG结构,论文见https://arxiv.org/abs/1409.1556\" " + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": {}, + "outputs": [], + "source": [ + "model = Sequential()\n", + "# 输入: 3 通道 100x100 像素图像 -> (100, 100, 3) 张量。\n", + "# 使用 32 个大小为 3x3 的卷积滤波器。\n", + "model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(100, 100, 3)))\n", + "model.add(Conv2D(32, (3, 3), activation='relu'))\n", + "model.add(MaxPooling2D(pool_size=(2, 2)))\n", + "model.add(Dropout(0.25))\n", + "\n", + "model.add(Conv2D(64, (3, 3), activation='relu'))\n", + "model.add(Conv2D(64, (3, 3), activation='relu'))\n", + "model.add(MaxPooling2D(pool_size=(2, 2)))\n", + "model.add(Dropout(0.25))\n", + "\n", + "model.add(Flatten())\n", + "model.add(Dense(256, activation='relu'))\n", + "model.add(Dropout(0.5))\n", + "model.add(Dense(10, activation='softmax'))" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": {}, + "outputs": [], + "source": [ + "from tensorflow.keras.optimizers import SGD" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": {}, + "outputs": [], + "source": [ + "#带动量的随机梯度下降法进行优化,需要Compile,是因为Keras是静态图,在实际运行的时候进行编译,keras对优化器的说明见https://keras.io/zh/optimizers/" + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": {}, + "outputs": [], + "source": [ + "sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\n", + "model.compile(loss='categorical_crossentropy', optimizer=sgd)" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "metadata": {}, + "outputs": [], + "source": [ + "#训练10个epocch,只看loss" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Train on 100 samples\n", + "Epoch 1/10\n", + "100/100 [==============================] - 5s 54ms/sample - loss: 2.3074\n", + "Epoch 2/10\n", + "100/100 [==============================] - 2s 20ms/sample - loss: 2.3743\n", + "Epoch 3/10\n", + "100/100 [==============================] - 2s 19ms/sample - loss: 2.2947\n", + "Epoch 4/10\n", + "100/100 [==============================] - 2s 20ms/sample - loss: 2.2786\n", + "Epoch 5/10\n", + "100/100 [==============================] - 2s 20ms/sample - loss: 2.2792\n", + "Epoch 6/10\n", + "100/100 [==============================] - 2s 19ms/sample - loss: 2.3000\n", + "Epoch 7/10\n", + "100/100 [==============================] - 2s 19ms/sample - loss: 2.2852\n", + "Epoch 8/10\n", + "100/100 [==============================] - 2s 20ms/sample - loss: 2.2815\n", + "Epoch 9/10\n", + "100/100 [==============================] - 2s 20ms/sample - loss: 2.2767\n", + "Epoch 10/10\n", + "100/100 [==============================] - 2s 20ms/sample - loss: 2.2814\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 35, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model.fit(x_train, y_train, batch_size=32, epochs=10)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "#测试集验证一下,另一个MNIST数据集需要同学自己在百度或谷歌的深度学习框架上训练" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "20/1 [========================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================] - 0s 19ms/sample - loss: 2.3132\n" + ] + } + ], + "source": [ + "score = model.evaluate(x_test, y_test, batch_size=32)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/Chap1_CNN_Classification/mnist_cnn_keras_gpu.ipynb b/Chap1_CNN_Classification/mnist_cnn_keras_gpu.ipynb new file mode 100644 index 0000000..45756b9 --- /dev/null +++ b/Chap1_CNN_Classification/mnist_cnn_keras_gpu.ipynb @@ -0,0 +1,559 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import tensorflow" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2.0.0\n" + ] + } + ], + "source": [ + "print(tensorflow.__version__)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "# 注释:安装tensorflow2.0,import tensorflow.keras as keras,可以使用tensorflow内集成的keras模块,神经网络编写接口友善" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "import tensorflow.keras as keras" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Using TensorFlow backend.\n" + ] + } + ], + "source": [ + "from keras.datasets import mnist" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "from keras.datasets import mnist" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "#引入Keras自带手写数字数据集,且自动按照七三开比例划分训练数据集与测试数据集,初次运行,下载时间在5分钟左右" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X_train original shape (60000, 28, 28)\n", + "y_train original shape (60000,)\n", + "X_test original shape (10000, 28, 28)\n", + "y_test original shape (10000,)\n" + ] + } + ], + "source": [ + "from keras.datasets import mnist\n", + "(X_train, y_train), (X_test, y_test) = mnist.load_data()\n", + "print(\"X_train original shape\", X_train.shape)\n", + "print(\"y_train original shape\", y_train.shape)\n", + "print(\"X_test original shape\", X_test.shape)\n", + "print(\"y_test original shape\", y_test.shape)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "#引入python-matlab接口,实现画图功能,看看图像和标签的模样" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Text(0.5, 1.0, 'Class 5')" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPsAAAEICAYAAACZA4KlAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAAQbElEQVR4nO3df6xUZX7H8fenKNmICLJWJKzIYo1WjbINYldpXWNRMRpF3c2ya2OjEZtA4kZLamjT1TZYsv5ol6xrYKMubK2rjVrR7FasqNi1JV4Rf2FdXYO74C3oAvLDn8C3f8zB3MU7z1xmzswZ7vN5JTczc75z5nzvyMfzzJxz7qOIwMwGv9+rugEz6wyH3SwTDrtZJhx2s0w47GaZcNjNMuGwZ0TSDZL+peo+rBoO+yAj6VuSeiRtl9Qr6eeSplTUy1pJHxa9bJe0rIo+rMZhH0QkXQv8M3ATMBoYB/wQuLDCti6IiIOLn7Mr7CN7DvsgIWkE8PfArIh4MCJ2RMSnEfFIRMyps86/Sfo/Se9LWiHphD618yStkbRN0npJf1UsP0zSo5K2SNok6RlJ/ne0H/B/pMHjq8AXgIf2YZ2fA8cAhwOrgHv61O4Ero6I4cCJwPJi+XXAOuD3qY0e5gKpc67vkfSupGWSTt6H3qxkDvvg8UXgvYjYOdAVIuKuiNgWER8DNwAnFyMEgE+B4yUdEhGbI2JVn+VjgKOKkcMzUf8Ci28D44GjgCeBxySN3OffzErhsA8evwUOk3TAQJ4saYik+ZJ+JWkrsLYoHVbcXgKcB7wt6WlJXy2W3wy8CSyT9Jak6+ttIyJ+EREfRsQHEfGPwBbgT/b9V7MyOOyDx38DHwEXDfD536L2xd2fASOo7YEBBBARz0XEhdSG+P8O3F8s3xYR10XEBOAC4FpJZw1wm7Hn9a3zHPZBIiLeB/4OuF3SRZIOknSgpGmSvtfPKsOBj6mNCA6i9g0+AJKGSvq2pBER8SmwFdhV1M6X9AeS1Gf5rr1fXNI4SacXr/UFSXOojRp+Ue5vbgPlsA8iEXEbcC3wt8C7wG+A2dT2zHtbArwNrAfWAP+zV/3PgbXFEP8vgcuK5ccA/wlspzaa+GFEPNXP6w8H7gA2F9s4F5gWEb9t8tezFsl/vMIsD96zm2XCYTfLhMNulgmH3SwTAzoBoyyS/G2gWZtFRL/nMrS0Z5d0rqTXJb2ZOpPKzKrX9KE3SUOAXwJTqV0Y8RwwIyLWJNbxnt2szdqxZ58MvBkRb0XEJ8BPqfa6aTNLaCXsY6mdobXHumLZ75A0s/jLKT0tbMvMWtTKF3T9DRU+N0yPiEXAIvAw3qxKrezZ1wFH9nn8JeCd1toxs3ZpJezPAcdI+rKkocA3gaXltGVmZWt6GB8ROyXNBh4DhgB3RcSrpXVmZqXq6FVv/sxu1n5tOanGzPYfDrtZJhx2s0w47GaZcNjNMuGwm2XCYTfLhMNulgmH3SwTDrtZJhx2s0w47GaZcNjNMuGwm2XCYTfLhMNulgmH3SwTDrtZJhx2s0w47GaZcNjNMuGwm2XCYTfLhMNulgmH3SwTDrtZJhx2s0w47GaZcNjNMtH0lM22fxgyZEiyPmLEiLZuf/bs2XVrBx10UHLdY489NlmfNWtWsn7LLbfUrc2YMSO57kcffZSsz58/P1m/8cYbk/UqtBR2SWuBbcAuYGdETCqjKTMrXxl79jMj4r0SXsfM2sif2c0y0WrYA1gm6XlJM/t7gqSZknok9bS4LTNrQavD+NMj4h1JhwOPS/rfiFjR9wkRsQhYBCApWtyemTWppT17RLxT3G4EHgIml9GUmZWv6bBLGiZp+J77wNnAK2U1ZmblamUYPxp4SNKe1/nXiPiPUroaZMaNG5esDx06NFk/7bTTkvUpU6bUrY0cOTK57iWXXJKsV2ndunXJ+oIFC5L16dOn161t27Ytue6LL76YrD/99NPJejdqOuwR8RZwcom9mFkb+dCbWSYcdrNMOOxmmXDYzTLhsJtlQhGdO6ltsJ5BN3HixGR9+fLlyXq7LzPtVrt3707Wr7jiimR9+/btTW+7t7c3Wd+8eXOy/vrrrze97XaLCPW33Ht2s0w47GaZcNjNMuGwm2XCYTfLhMNulgmH3SwTPs5eglGjRiXrK1euTNYnTJhQZjulatT7li1bkvUzzzyzbu2TTz5Jrpvr+Qet8nF2s8w57GaZcNjNMuGwm2XCYTfLhMNulgmH3SwTnrK5BJs2bUrW58yZk6yff/75yfoLL7yQrDf6k8opq1evTtanTp2arO/YsSNZP+GEE+rWrrnmmuS6Vi7v2c0y4bCbZcJhN8uEw26WCYfdLBMOu1kmHHazTPh69i5wyCGHJOuNphdeuHBh3dqVV16ZXPeyyy5L1u+9995k3bpP09ezS7pL0kZJr/RZNkrS45LeKG4PLbNZMyvfQIbxPwbO3WvZ9cATEXEM8ETx2My6WMOwR8QKYO/zQS8EFhf3FwMXldyXmZWs2XPjR0dEL0BE9Eo6vN4TJc0EZja5HTMrSdsvhImIRcAi8Bd0ZlVq9tDbBkljAIrbjeW1ZGbt0GzYlwKXF/cvBx4upx0za5eGw3hJ9wJfAw6TtA74LjAfuF/SlcCvga+3s8nBbuvWrS2t//777ze97lVXXZWs33fffcl6oznWrXs0DHtEzKhTOqvkXsysjXy6rFkmHHazTDjsZplw2M0y4bCbZcKXuA4Cw4YNq1t75JFHkuueccYZyfq0adOS9WXLliXr1nmestkscw67WSYcdrNMOOxmmXDYzTLhsJtlwmE3y4SPsw9yRx99dLK+atWqZH3Lli3J+pNPPpms9/T01K3dfvvtyXU7+W9zMPFxdrPMOexmmXDYzTLhsJtlwmE3y4TDbpYJh90sEz7Onrnp06cn63fffXeyPnz48Ka3PXfu3GR9yZIlyXpvb2/T2x7MfJzdLHMOu1kmHHazTDjsZplw2M0y4bCbZcJhN8uEj7Nb0oknnpis33bbbcn6WWc1P9nvwoULk/V58+Yl6+vXr2962/uzpo+zS7pL0kZJr/RZdoOk9ZJWFz/nldmsmZVvIMP4HwPn9rP8nyJiYvHzs3LbMrOyNQx7RKwANnWgFzNro1a+oJst6aVimH9ovSdJmimpR1L9P0ZmZm3XbNjvAI4GJgK9wK31nhgRiyJiUkRManJbZlaCpsIeERsiYldE7AZ+BEwuty0zK1tTYZc0ps/D6cAr9Z5rZt2h4XF2SfcCXwMOAzYA3y0eTwQCWAtcHRENLy72cfbBZ+TIkcn6BRdcULfW6Fp5qd/DxZ9Zvnx5sj516tRkfbCqd5z9gAGsOKOfxXe23JGZdZRPlzXLhMNulgmH3SwTDrtZJhx2s0z4ElerzMcff5ysH3BA+mDRzp07k/Vzzjmnbu2pp55Krrs/85+SNsucw26WCYfdLBMOu1kmHHazTDjsZplw2M0y0fCqN8vbSSedlKxfeumlyfopp5xSt9boOHoja9asSdZXrFjR0usPNt6zm2XCYTfLhMNulgmH3SwTDrtZJhx2s0w47GaZ8HH2Qe7YY49N1mfPnp2sX3zxxcn6EUccsc89DdSuXbuS9d7e9F8v3717d5nt7Pe8ZzfLhMNulgmH3SwTDrtZJhx2s0w47GaZcNjNMtHwOLukI4ElwBHAbmBRRHxf0ijgPmA8tWmbvxERm9vXar4aHcueMaO/iXZrGh1HHz9+fDMtlaKnpydZnzdvXrK+dOnSMtsZ9AayZ98JXBcRfwj8MTBL0vHA9cATEXEM8ETx2My6VMOwR0RvRKwq7m8DXgPGAhcCi4unLQYualeTZta6ffrMLmk88BVgJTA6Inqh9j8E4PCymzOz8gz43HhJBwMPAN+JiK1Sv9NJ9bfeTGBmc+2ZWVkGtGeXdCC1oN8TEQ8WizdIGlPUxwAb+1s3IhZFxKSImFRGw2bWnIZhV20XfifwWkTc1qe0FLi8uH858HD57ZlZWRpO2SxpCvAM8DK1Q28Ac6l9br8fGAf8Gvh6RGxq8FpZTtk8evToZP34449P1n/wgx8k68cdd9w+91SWlStXJus333xz3drDD6f3D75EtTn1pmxu+Jk9Iv4LqPcB/axWmjKzzvEZdGaZcNjNMuGwm2XCYTfLhMNulgmH3SwT/lPSAzRq1Ki6tYULFybXnThxYrI+YcKEpnoqw7PPPpus33rrrcn6Y489lqx/+OGH+9yTtYf37GaZcNjNMuGwm2XCYTfLhMNulgmH3SwTDrtZJrI5zn7qqacm63PmzEnWJ0+eXLc2duzYpnoqywcffFC3tmDBguS6N910U7K+Y8eOpnqy7uM9u1kmHHazTDjsZplw2M0y4bCbZcJhN8uEw26WiWyOs0+fPr2leivWrFmTrD/66KPJ+s6dO5P11DXnW7ZsSa5r+fCe3SwTDrtZJhx2s0w47GaZcNjNMuGwm2XCYTfLxEDmZz8SWAIcQW1+9kUR8X1JNwBXAe8WT50bET9r8FpZzs9u1kn15mcfSNjHAGMiYpWk4cDzwEXAN4DtEXHLQJtw2M3ar17YG55BFxG9QG9xf5uk14Bq/zSLme2zffrMLmk88BVgZbFotqSXJN0l6dA668yU1COpp6VOzawlDYfxnz1ROhh4GpgXEQ9KGg28BwTwD9SG+lc0eA0P483arOnP7ACSDgQeBR6LiNv6qY8HHo2IExu8jsNu1mb1wt5wGC9JwJ3Aa32DXnxxt8d04JVWmzSz9hnIt/FTgGeAl6kdegOYC8wAJlIbxq8Fri6+zEu9lvfsZm3W0jC+LA67Wfs1PYw3s8HBYTfLhMNulgmH3SwTDrtZJhx2s0w47GaZcNjNMuGwm2XCYTfLhMNulgmH3SwTDrtZJhx2s0x0esrm94C3+zw+rFjWjbq1t27tC9xbs8rs7ah6hY5ez/65jUs9ETGpsgYSurW3bu0L3FuzOtWbh/FmmXDYzTJRddgXVbz9lG7trVv7AvfWrI70VulndjPrnKr37GbWIQ67WSYqCbukcyW9LulNSddX0UM9ktZKelnS6qrnpyvm0Nso6ZU+y0ZJelzSG8Vtv3PsVdTbDZLWF+/daknnVdTbkZKelPSapFclXVMsr/S9S/TVkfet45/ZJQ0BfglMBdYBzwEzImJNRxupQ9JaYFJEVH4ChqQ/BbYDS/ZMrSXpe8CmiJhf/I/y0Ij46y7p7Qb2cRrvNvVWb5rxv6DC967M6c+bUcWefTLwZkS8FRGfAD8FLqygj64XESuATXstvhBYXNxfTO0fS8fV6a0rRERvRKwq7m8D9kwzXul7l+irI6oI+1jgN30er6O75nsPYJmk5yXNrLqZfozeM81WcXt4xf3sreE03p201zTjXfPeNTP9eauqCHt/U9N00/G/0yPij4BpwKxiuGoDcwdwNLU5AHuBW6tspphm/AHgOxGxtcpe+uqnr468b1WEfR1wZJ/HXwLeqaCPfkXEO8XtRuAhah87usmGPTPoFrcbK+7nMxGxISJ2RcRu4EdU+N4V04w/ANwTEQ8Wiyt/7/rrq1PvWxVhfw44RtKXJQ0FvgksraCPz5E0rPjiBEnDgLPpvqmolwKXF/cvBx6usJff0S3TeNebZpyK37vKpz+PiI7/AOdR+0b+V8DfVNFDnb4mAC8WP69W3RtwL7Vh3afURkRXAl8EngDeKG5HdVFvP6E2tfdL1II1pqLeplD7aPgSsLr4Oa/q9y7RV0feN58ua5YJn0FnlgmH3SwTDrtZJhx2s0w47GaZcNjNMuGwm2Xi/wHO+E7f7vaaSwAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "import matplotlib.pyplot as plt\n", + "%matplotlib inline\n", + "plt.imshow(X_train[0], cmap='gray')\n", + "plt.title('Class '+ str(y_train[0]))" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "#图像reshape到28*28*1灰度图(宽度、高度、像素通道)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "X_train = X_train.reshape(X_train.shape[0], 28, 28, 1)\n", + "X_test = X_test.reshape(X_test.shape[0], 28, 28, 1)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "#图像像素是0-255(2^8),在实际处理中,为了便于梯度相关的优化算法,需要对图像像素做归一化处理" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(60000, 28, 28, 1)" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "X_train = X_train.astype('float32')\n", + "X_test = X_test.astype('float32')\n", + "\n", + "X_train/=255\n", + "X_test/=255\n", + "\n", + "X_train.shape" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "#0-9的数字一共十类,对标签进行独热编码" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "#引入numpy库,python矩阵运算相关的library" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [], + "source": [ + "from keras.utils import np_utils" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(5, array([0., 0., 0., 0., 0., 1., 0., 0., 0., 0.], dtype=float32))" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "number_of_classes = 10\n", + "\n", + "Y_train = np_utils.to_categorical(y_train, number_of_classes)\n", + "Y_test = np_utils.to_categorical(y_test, number_of_classes)\n", + "\n", + "y_train[0], Y_train[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "from keras.models import Sequential\n", + "from keras.layers import Dense, Dropout, Activation, Flatten\n", + "\n", + "from keras.layers.normalization import BatchNormalization\n", + "from keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D, GlobalAveragePooling2D\n", + "from keras.layers.advanced_activations import LeakyReLU" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [], + "source": [ + "# CNN三步走\n", + "# 1. 卷积层\n", + "# 2. 非线性激活函数\n", + "# 3. 池化层\n", + "# 重复堆叠操作\n", + "\n", + "# 4.全连接层\n", + "#为了正则化,可以加入Dropout\n", + "#为了防止分布漂移,可以加入BathcNormalization\n", + "#这里先是使用32个感受野为3*3的卷积滤波器,再加入Relu激活函数,BatcchNormalization为正则化的操作,再卷积+激活函数+最大池化,在64个滤波器的卷积核操作上重复\n", + "\n", + "model = Sequential()\n", + "\n", + "model.add(Conv2D(32, (3, 3), input_shape=(28,28,1)))\n", + "model.add(Activation('relu'))\n", + "model.add(BatchNormalization())\n", + "model.add(Conv2D(32, (3, 3)))\n", + "model.add(Activation('relu'))\n", + "model.add(MaxPooling2D(pool_size=(2,2)))\n", + "\n", + "model.add(BatchNormalization())\n", + "model.add(Conv2D(64,(3, 3)))\n", + "model.add(Activation('relu'))\n", + "model.add(BatchNormalization())\n", + "model.add(Conv2D(64, (3, 3)))\n", + "model.add(Activation('relu'))\n", + "model.add(MaxPooling2D(pool_size=(2,2)))\n", + "\n", + "model.add(Flatten())\n", + "# Fully connected layer\n", + "\n", + "model.add(BatchNormalization())\n", + "model.add(Dense(512))\n", + "model.add(Activation('relu'))\n", + "model.add(BatchNormalization())\n", + "model.add(Dropout(0.05))\n", + "model.add(Dense(10))\n", + "\n", + "# model.add(Convolution2D(10,3,3, border_mode='same'))\n", + "# model.add(GlobalAveragePooling2D())\n", + "model.add(Activation('softmax'))" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [], + "source": [ + "#model.summary()看结构" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Model: \"sequential_1\"\n", + "_________________________________________________________________\n", + "Layer (type) Output Shape Param # \n", + "=================================================================\n", + "conv2d_1 (Conv2D) (None, 26, 26, 32) 320 \n", + "_________________________________________________________________\n", + "activation_1 (Activation) (None, 26, 26, 32) 0 \n", + "_________________________________________________________________\n", + "batch_normalization_1 (Batch (None, 26, 26, 32) 128 \n", + "_________________________________________________________________\n", + "conv2d_2 (Conv2D) (None, 24, 24, 32) 9248 \n", + "_________________________________________________________________\n", + "activation_2 (Activation) (None, 24, 24, 32) 0 \n", + "_________________________________________________________________\n", + "max_pooling2d_1 (MaxPooling2 (None, 12, 12, 32) 0 \n", + "_________________________________________________________________\n", + "batch_normalization_2 (Batch (None, 12, 12, 32) 128 \n", + "_________________________________________________________________\n", + "conv2d_3 (Conv2D) (None, 10, 10, 64) 18496 \n", + "_________________________________________________________________\n", + "activation_3 (Activation) (None, 10, 10, 64) 0 \n", + "_________________________________________________________________\n", + "batch_normalization_3 (Batch (None, 10, 10, 64) 256 \n", + "_________________________________________________________________\n", + "conv2d_4 (Conv2D) (None, 8, 8, 64) 36928 \n", + "_________________________________________________________________\n", + "activation_4 (Activation) (None, 8, 8, 64) 0 \n", + "_________________________________________________________________\n", + "max_pooling2d_2 (MaxPooling2 (None, 4, 4, 64) 0 \n", + "_________________________________________________________________\n", + "flatten_1 (Flatten) (None, 1024) 0 \n", + "_________________________________________________________________\n", + "batch_normalization_4 (Batch (None, 1024) 4096 \n", + "_________________________________________________________________\n", + "dense_1 (Dense) (None, 512) 524800 \n", + "_________________________________________________________________\n", + "activation_5 (Activation) (None, 512) 0 \n", + "_________________________________________________________________\n", + "batch_normalization_5 (Batch (None, 512) 2048 \n", + "_________________________________________________________________\n", + "dropout_1 (Dropout) (None, 512) 0 \n", + "_________________________________________________________________\n", + "dense_2 (Dense) (None, 10) 5130 \n", + "_________________________________________________________________\n", + "activation_6 (Activation) (None, 10) 0 \n", + "=================================================================\n", + "Total params: 601,578\n", + "Trainable params: 598,250\n", + "Non-trainable params: 3,328\n", + "_________________________________________________________________\n" + ] + } + ], + "source": [ + "model.summary()" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [], + "source": [ + "from keras.optimizers import Adam" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [], + "source": [ + "#Adam 作为优化器,更多其介绍,参考Adam论文https://arxiv.org/abs/1412.6980" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [], + "source": [ + "model.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=['accuracy'])" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": {}, + "outputs": [], + "source": [ + "#ImageDataGenerator引入Keras图像处理接口,参考https://keras.io/zh/preprocessing/image/" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": {}, + "outputs": [], + "source": [ + "from keras.preprocessing.image import ImageDataGenerator" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": {}, + "outputs": [], + "source": [ + "#训练数据增强应对潜在的过拟合风险,测试数据不增强" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "metadata": {}, + "outputs": [], + "source": [ + "gen = ImageDataGenerator(rotation_range=8, width_shift_range=0.08, shear_range=0.3,\n", + " height_shift_range=0.08, zoom_range=0.08)\n", + "\n", + "test_gen = ImageDataGenerator()" + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "metadata": {}, + "outputs": [], + "source": [ + "train_generator=gen.flow(X_train, Y_train, batch_size=64)\n", + "test_generator = test_gen.flow(X_test, Y_test, batch_size=64)" + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "metadata": {}, + "outputs": [], + "source": [ + "#训练5个epoch,看效果" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epoch 1/5\n" + ] + } + ], + "source": [ + "model.fit_generator(train_generator, steps_per_epoch=60000//64, epochs=5, \n", + " validation_data=test_generator, validation_steps=10000//64)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#在测试集上看效果" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "score = model.evaluate(X_test, Y_test)\n", + "print()\n", + "print('Test accuracy: ', score[1])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#请同学们配置GPU运行本程序" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/Chap2_NN_Regression/boston_housing_mlp.ipynb b/Chap2_NN_Regression/boston_housing_mlp.ipynb new file mode 100644 index 0000000..88b6131 --- /dev/null +++ b/Chap2_NN_Regression/boston_housing_mlp.ipynb @@ -0,0 +1,565 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import tensorflow\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2.0.0\n" + ] + } + ], + "source": [ + "print(tensorflow.__version__)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "#我们将要预测 20 世纪 70 年代中期波士顿郊区房屋价格的中位数,已知当时郊区的一些数 据点,比如犯罪率、当地房产税率等。\n", + "#这里数据集所包含的数据点相对较少,只有 506 个,分为 404 个训练样本和 102 个测试样本。\n", + "#输入数据的 每个特征(比如犯罪率)都有不同的取值范围。\n", + "#例如,有些特性是比例,取值范围为 0~1;有 的取值范围为 1~12;还有的取值范围为 0~100,等等。" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "from tensorflow.keras.datasets import boston_housing" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "#看一下数据集大小" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(404, 13)" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "train_data.shape" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(102, 13)" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + " test_data.shape" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([ 1.23247, 0. , 8.14 , 0. , 0.538 , 6.142 ,\n", + " 91.7 , 3.9769 , 4. , 307. , 21. , 396.9 ,\n", + " 18.72 ])" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "train_data[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "#dataset downloading and introduction:https://www.kaggle.com/altavish/boston-housing-dataset" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "#数据特征的英语介绍\n", + "#CRIM - per capita crime rate by town\n", + "#ZN - proportion of residential land zoned for lots over 25,000 sq.ft.\n", + "#INDUS - proportion of non-retail business acres per town.\n", + "#CHAS - Charles River dummy variable (1 if tract bounds river; 0 otherwise)\n", + "#NOX - nitric oxides concentration (parts per 10 million)\n", + "#RM - average number of rooms per dwelling\n", + "#AGE - proportion of owner-occupied units built prior to 1940\n", + "#DIS - weighted distances to five Boston employment centres\n", + "#RAD - index of accessibility to radial highways\n", + "#TAX - full-value property-tax rate per $10,000\n", + "#PTRATIO - pupil-teacher ratio by town\n", + "#B - 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town\n", + "#LSTAT - % lower status of the population\n", + "#MEDV - Median value of owner-occupied homes in $1000's" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "#目标是预测房价中位数,单位为千美元" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "#numpy实现标准化,因为各个数据的量纲不一样,比如面积、比入税率" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "mean = train_data.mean(axis=0) \n", + "train_data -= mean\n", + "std = train_data.std(axis=0) \n", + "train_data /= std\n", + "test_data -= mean \n", + "test_data /= std" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "#由于样本数量很少,我们将使用一个非常小的网络,其中包含两个隐藏层,每层有 64 个单元。一般来说,训练数据越少,过拟合会越严重,而较小的网络可以降低过拟合。" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "#网络的最后一层只有一个单元,没有激活,是一个线性层。\n", + "#这是标量回归(标量回归是预 测单一连续值的回归)的典型设置。添加激活函数将会限制输出范围。\n", + "#例如,如果向最后一层 添加 sigmoid 激活函数,网络只能学会预测 0~1 范围内的值。\n", + "#这里最后一层是纯线性的,所以 网络可以学会预测任意范围内的值。" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "from tensorflow.keras import models\n", + "from tensorflow.keras import layers" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [], + "source": [ + "#函数封装模型" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [], + "source": [ + "def build_model():\n", + " model = models.Sequential() \n", + " model.add(layers.Dense(64, activation='relu',\n", + " input_shape=(train_data.shape[1],))) \n", + " model.add(layers.Dense(64, activation='relu'))\n", + " model.add(layers.Dense(1))\n", + " model.compile(optimizer='rmsprop', loss='mse', metrics=['mae']) \n", + " return model" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [], + "source": [ + "#MSE, 均方误差,作为损失函数,同时会监控平均绝对误差" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#做K折验证" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "k=4\n", + "num_val_samples = len(train_data) // k \n", + "num_epochs = 100\n", + "all_scores = []" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "为了在调节网络参数(比如训练的轮数)的同时对网络进行评估,你可以将数据划分为训 练集和验证集,正如前面例子中所做的那样。但由于数据点很少,验证集会非常小(比如大约 100 个样本)。因此,验证分数可能会有很大波动,这取决于你所选择的验证集和训练集。也就 是说,验证集的划分方式可能会造成验证分数上有很大的方差,这样就无法对模型进行可靠的 评估。\n", + "在这种情况下,最佳做法是使用 K 折交叉验证。这种方法将可用数据划分为 K 个分区(K 通常取 4 或 5),实例化 K 个相同的模型,将每个模型在 K-1 个分区上训练,并在剩 下的一个分区上进行评估。模型的验证分数等于 K 个验证分数的平均值。这种方法的代码实现 很简单。" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "processing fold # 0\n", + "processing fold # 1\n", + "processing fold # 2\n", + "processing fold # 3\n" + ] + } + ], + "source": [ + "for i in range(k):\n", + " print('processing fold #', i)\n", + " # Prepare the validation data: data from partition # k\n", + " val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]\n", + " val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]\n", + "\n", + " # Prepare the training data: data from all other partitions\n", + " partial_train_data = np.concatenate(\n", + " [train_data[:i * num_val_samples],\n", + " train_data[(i + 1) * num_val_samples:]],\n", + " axis=0)\n", + " partial_train_targets = np.concatenate(\n", + " [train_targets[:i * num_val_samples],\n", + " train_targets[(i + 1) * num_val_samples:]],\n", + " axis=0)\n", + "\n", + " # Build the Keras model (already compiled)\n", + " model = build_model()\n", + " # Train the model (in silent mode, verbose=0)\n", + " model.fit(partial_train_data, partial_train_targets,\n", + " epochs=num_epochs, batch_size=1, verbose=0)\n", + " # Evaluate the model on the validation data\n", + " val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)\n", + " all_scores.append(val_mae)" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [], + "source": [ + "#得到结果" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[2.4735055, 2.3732738, 2.4464724, 2.3988996]" + ] + }, + "execution_count": 24, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "all_scores" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [], + "source": [ + "#取均值" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "2.423038" + ] + }, + "execution_count": 26, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "np.mean(all_scores)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "我们让训练时间更长一点,达到 200 个轮次。为了记录模型在每轮的表现,我们需要修改 训练循环,以保存每轮的验证分数记录。" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "processing fold # 0\n", + "processing fold # 1\n", + "processing fold # 2\n", + "processing fold # 3\n" + ] + } + ], + "source": [ + "num_epochs = 100\n", + "all_mae_histories = []\n", + "for i in range(k):\n", + " print('processing fold #', i)\n", + " # Prepare the validation data: data from partition # k\n", + " val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]\n", + " val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]\n", + "\n", + " # Prepare the training data: data from all other partitions\n", + " partial_train_data = np.concatenate(\n", + " [train_data[:i * num_val_samples],\n", + " train_data[(i + 1) * num_val_samples:]],\n", + " axis=0)\n", + " partial_train_targets = np.concatenate(\n", + " [train_targets[:i * num_val_samples],\n", + " train_targets[(i + 1) * num_val_samples:]],\n", + " axis=0)\n", + "\n", + " # Build the Keras model (already compiled)\n", + " model = build_model()\n", + " # Train the model (in silent mode, verbose=0)\n", + " history = model.fit(partial_train_data, partial_train_targets,\n", + " validation_data=(val_data, val_targets),\n", + " epochs=num_epochs, batch_size=1, verbose=0)\n", + " mae_history = history.history['mae']\n", + " all_mae_histories.append(mae_history)" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [], + "source": [ + "average_mae_history = [\n", + " np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "画出MAE和Epoch的曲线" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "\n", + "plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)\n", + "plt.xlabel('Epochs')\n", + "plt.ylabel('Validation MAE')\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def smooth_curve(points, factor=0.9):\n", + " smoothed_points = []\n", + " for point in points:\n", + " if smoothed_points:\n", + " previous = smoothed_points[-1]\n", + " smoothed_points.append(previous * factor + point * (1 - factor))\n", + " else:\n", + " smoothed_points.append(point)\n", + " return smoothed_points\n", + "\n", + "smooth_mae_history = smooth_curve(average_mae_history[10:])\n", + "\n", + "plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)\n", + "plt.xlabel('Epochs')\n", + "plt.ylabel('Validation MAE')\n", + "plt.show()\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "训练到80个epoch就可以了,不然会过拟合" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Get a fresh, compiled model.\n", + "model = build_model()\n", + "# Train it on the entirety of the data.\n", + "model.fit(train_data, train_targets,\n", + " epochs=80, batch_size=16, verbose=0)\n", + "test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "test_mae_score" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "参考Francis Chollet《Python 深度学习》3.6的实验,请大家务必阅读此书" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/README.md b/README.md index 4218a08..fe45a14 100644 --- a/README.md +++ b/README.md @@ -11,3 +11,11 @@ Bitte versuchen Sie es! First notebook about MNIST-CNN experiment in the architecture of VGG. Erste Beispiel ueber MNIST-CNN Experiment mit der Architektur von VGG. + +Second Chapter about Boston Housing prediction. + +Please check: + +https://www.kaggle.com/schirmerchad/bostonhoustingmlnd + +