Skip to content
This repository was archived by the owner on May 10, 2023. It is now read-only.

Commit 11fd52c

Browse files
committed
CNN section added
1 parent 0a29faf commit 11fd52c

File tree

1 file changed

+74
-0
lines changed
  • Part 8 - Deep Learning/Section 22 - Convolutional Neural Networks (CNN)

1 file changed

+74
-0
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
# -*- coding: utf-8 -*-
2+
"""
3+
Created on Mon Sep 24 22:48:03 2018
4+
5+
@author: Mohammad Doosti Lakhani
6+
"""
7+
8+
9+
"""Important Note: This implementation take about hours on CPU. Use GPU or colab.research.google.com"""
10+
11+
from keras.models import Sequential
12+
from keras.layers import Conv2D
13+
from keras.layers import MaxPool2D
14+
from keras.layers import Dense
15+
from keras.layers import Flatten
16+
17+
18+
# Preprocessing
19+
"""In this step, we just need to put training and test files to the folders with below structure :
20+
-test_set
21+
->class1
22+
->class2
23+
->...
24+
-training_set
25+
->class1
26+
->class2
27+
->...
28+
"""
29+
30+
31+
# Defining some control parameters
32+
image_x = 128
33+
image_y = 128
34+
image_channels = 3
35+
36+
training_set_path = 'dataset/training_set'
37+
test_set_path = 'dataset/test_set'
38+
39+
model = Sequential() # Building sequential model
40+
41+
# First layer of convolution
42+
model.add(Conv2D(64, kernel_size=(5,5), strides=(1,1), activation='relu', input_shape=(image_x,image_y,image_channels)))
43+
model.add(MaxPool2D(strides=(2,2), pool_size=(2,2)))
44+
45+
# Second layer of convolution
46+
model.add(Conv2D(64, kernel_size=(5,5), strides=(1,1), activation='relu'))
47+
model.add(MaxPool2D(strides=(2,2), pool_size=(2,2)))
48+
49+
# Flatten convolved tensors for feeding as input to Fully Connected layer
50+
model.add(Flatten())
51+
52+
# First Hidden layer of fully connected
53+
model.add(Dense(units=256, activation='relu'))
54+
55+
# Second Hidden layer of fully connected
56+
model.add(Dense(units=1, activation='sigmoid'))
57+
58+
# Compiling the model
59+
model.compile(optimizer='adam', metrics=['accuracy'], loss='binary_crossentropy')
60+
61+
# Data Augmentation
62+
# Because we have small dataset, to prevent overfitting and train better, we use this method
63+
# We do augmentation on both test_set and training_set
64+
65+
from keras.preprocessing.image import ImageDataGenerator
66+
67+
train_datagen = ImageDataGenerator(rescale = 1./255, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True)
68+
test_datagen = ImageDataGenerator(rescale = 1./255)
69+
training_set = train_datagen.flow_from_directory(training_set_path, target_size = (image_x, image_y), batch_size = 32, class_mode = 'binary')
70+
test_set = test_datagen.flow_from_directory(test_set_path, target_size = (image_x, image_y), batch_size = 32, class_mode = 'binary')
71+
72+
# Firring model with original and augmented data
73+
model.fit_generator(training_set, steps_per_epoch = 8000, epochs = 25, validation_data = test_set, validation_steps = 2000)
74+

0 commit comments

Comments
 (0)