Skip to content

Commit

Permalink
MagmaDNN v1.2 (#37)
Browse files Browse the repository at this point in the history
* minor fix in distributed memory SGD algo

* minor

* Added cmake option to load MKL-dnn libs

* set MKL option to OFF as default

* added memory manager include

* fixed compilation issues: added missing magma/config.h includes

* Added Resnet example using basic blocks

* Using GPU training in cnn_2d example when CUDA is enabled

* fixed bug in cnn_2d example when MPI is enabled

* fixed bug in lenet5 and resnet example when MPI is enabled

* fixed make build system

* added example implementing resnet model for cifar10 dataset

* added simple argument parser for examples

* progressed on resnet model for cifar dataset; Added argument parser for examples

* updated resnet cifar network

* added support for a model summary

* added virtual function to model class

* minor

* Added routines for building resnet model in new folder called models

* added resnet model

* Added bottleneck block

* These ImageNet2012 references seem to be causing the build process to fail.
Temporarily commenting them out for now.

* Create basic Github CI (#36)

This is just a test for now...

* Updates to Github Action

* run make within build directory

* trigger build for all pull requests

* update to build and run tests

* Use same job to run tests
Folder was deleted between jobs for some reason

* cd into build dir before running ctest

* Fixed crossentropy issue

* Fixed MSE error

* update name of job

* Updated Co-Authors in README

* check format with clang-format job

* Restyle all files according to .clang-format style guideline (#38)

Co-authored-by: flipflapflop <[email protected]>
Co-authored-by: Rocco Febbo <[email protected]>
  • Loading branch information
3 people authored Jul 31, 2020
1 parent 76d23ff commit 20820ee
Show file tree
Hide file tree
Showing 133 changed files with 4,349 additions and 3,785 deletions.
69 changes: 69 additions & 0 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
# This is a basic workflow to help you get started with Actions

name: CI

# Controls when the action will run. Triggers the workflow on push or pull request
# events but only for the master branch
on:
push:
branches: [ dev ]
pull_request:

# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
# This workflow contains a single job called "build"
build-and-test:
# The type of runner that the job will run on
runs-on: ubuntu-latest

# Steps represent a sequence of tasks that will be executed as part of the job
steps:
- name: Setup CMake
uses: jwlawson/[email protected]
with:
cmake-version: '3.16.x'

# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v2

# Build makefiles
- name: Run CMake
run: |
mkdir build
cd build
cmake ..
# Run makefiles
- name: Build MagmaDNN
run: |
cd build
make -j8
# Build Tests
- name: Build Tests
run: |
cd build
cmake .. -DMAGMADNN_BUILD_TESTS=TRUE
make -j8
# Run Tests
- name: Run Tests
run: |
cd build
ctest
lint:
runs-on: ubuntu-latest


steps:
- uses: actions/checkout@v2
- uses: DoozyX/[email protected]
with:
source: './src ./include'
exclude: ''
extensions: 'h,cpp'
clangFormatVersion: 9
style: file


37 changes: 33 additions & 4 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ option(MAGMADNN_ENABLE_CUDA "Enable use of CUDA library and compilation of CUDA
option(MAGMADNN_ENABLE_MPI "Enable distributed memory routines using MPI" OFF)
option(MAGMADNN_ENABLE_OMP "Enable parallelization using OpenMP library" OFF)
option(MAGMADNN_ENABLE_MKLDNN "Enable use of MKLDNN library" OFF)
option(MAGMADNN_BUILD_MKLDNN "Enable build of MKLDNN from source" OFF)
option(MAGMADNN_BUILD_DOC "Generate documentation" OFF)
option(MAGMADNN_BUILD_EXAMPLES "Build MagmaDNN examples" ON)
option(MAGMADNN_BUILD_TESTS "Generate build files for unit tests" OFF)
Expand All @@ -36,6 +37,9 @@ list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/Modules/")
set(MAGMADNN_HAVE_CUDA FALSE)
set(MAGMADNN_HAVE_OMP FALSE)


add_definitions(-DMAGMADNN_CMAKE_BUILD)

########################################
# BLAS
set(LBLAS "" CACHE STRING "BLAS library")
Expand Down Expand Up @@ -134,7 +138,8 @@ if (MAGMADNN_ENABLE_CUDA)

########################################
# Find cuBLAS library
find_library(CUBLAS cublas)
find_library(CUBLAS cublas
HINT ${CMAKE_CUDA_IMPLICIT_LINK_DIRECTORIES})

message(STATUS "cuBLAS: ${CUBLAS}")

Expand Down Expand Up @@ -229,11 +234,35 @@ endif()
# MKLDNN

if (MAGMADNN_ENABLE_MKLDNN)

include(cmake/mkldnn.cmake)

add_definitions(-D MAGMADNN_HAVE_MKLDNN)

# add_definitions(-D MAGMADNN_HAVE_MKLDNN)
set(MAGMADNN_HAVE_MKLDNN TRUE)

if (MAGMADNN_BUILD_MKLDNN)
include(cmake/mkldnn.cmake)
else()


find_library(ONEDNN_LIBRARIES mkldnn)


if (ONEDNN_LIBRARIES)
set(ONEDNN_FOUND TRUE)
else ()
SET(ONEDNN_FOUND FALSE)
endif ()

if(ONEDNN_FOUND)
message(STATUS "oneDNN libraries: ${ONEDNN_LIBRARIES}")
else()
message(FATAL_ERROR " oneDNN NOT found")
endif()


set(LIBS ${LIBS} ${ONEDNN_LIBRARIES})
endif()


endif()

Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -36,4 +36,4 @@ All development takes place on the [github site](https://github.com/MagmaDNN/mag

_author:_ Daniel Nichols

_co-author:_ Sedrick Keh
_co-authors:_ Florent Lopez, Sedrick Keh, Rocco Febbo
66 changes: 66 additions & 0 deletions examples/Arguments.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
#pragma once

#include <string>
#include <cstring>
#include <iostream>

namespace magmadnn {

class Arguments {
public:
Arguments():
enable_shortcut(true),
learning_rate(0)
{}

int parse(std::string const& context, int argc, char** argv) {

int ret = 0;

std::string help =
"Usage: " + context + " [options]\n"
R"(
Options:
--disable-shorcut Disable shorcut in residual layers
)";



for( int i = 1; i < argc; ++i ) {

if ( !strcmp("--help", argv[i]) ) {

std::cout << help;
return 1;
}

// Resnet
else if ( !strcmp("--disable-shortcut", argv[i])) {
enable_shortcut = false;
std::cout << "Resnet: disable shortcuts" << std::endl;
}

// SGD
else if ( !strcmp("--learning-rate", argv[i]) && i+1 < argc ) {
learning_rate = std::stod( argv[++i] );
std::cout << "SGD: Learning rate set to " << learning_rate << std::endl;
}

}

return ret;
}

public:

// Resnet

// Enable shorcut for residual layers. If set to `false`, simply
// implements plain convolutional networks.
bool enable_shortcut;

// SGD
double learning_rate;
};

} // End of namespace magmadnn
3 changes: 3 additions & 0 deletions examples/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,8 +1,11 @@
magmadnn_add_example(alexnet.cpp)
#magmadnn_add_example(alexnet_imagenet2012.cpp)
magmadnn_add_example(cifar10_interactive.cpp)
magmadnn_add_example(cnn_2d.cpp)
magmadnn_add_example(lenet5.cpp)
magmadnn_add_example(mnist_interactive.cpp)
magmadnn_add_example(resnet.cpp)
magmadnn_add_example(resnet_cifar10.cpp)
magmadnn_add_example(simple_network.cpp)
magmadnn_add_example(tensor_math.cpp)
magmadnn_add_example(vgg16.cpp)
8 changes: 8 additions & 0 deletions examples/alexnet.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,10 @@ int main(int argc, char** argv) {
// Data type
using T = float;

#if defined(MAGMADNN_HAVE_MPI)
MPI_Init(&argc, &argv);
#endif

magmadnn_init();

// Location of the CIFAR-10 dataset
Expand Down Expand Up @@ -125,6 +129,10 @@ int main(int argc, char** argv) {

magmadnn_finalize();

#if defined(MAGMADNN_HAVE_MPI)
MPI_Finalize();
#endif

return 0;

}
55 changes: 34 additions & 21 deletions examples/cnn_2d.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,13 @@ Tensor<float> *read_mnist_labels(const char *file_name, uint32_t &n_labels, uint
void print_image(uint32_t image_idx, Tensor<float> *images, Tensor<float> *labels, uint32_t n_rows, uint32_t n_cols);

int main(int argc, char **argv) {
magmadnn_init();

#if defined(MAGMADNN_HAVE_MPI)
MPI_Init(&argc, &argv);
#endif

magmadnn_init();

Tensor<float> *images_host, *labels_host;
uint32_t n_images, n_rows, n_cols, n_labels, n_classes = 10;
memory_t training_memory_type;
Expand All @@ -45,9 +50,9 @@ int main(int argc, char **argv) {
params.n_epochs = 20;
params.learning_rate = 0.05;

#if defined(USE_GPU)
// training_memory_type = DEVICE;
training_memory_type = HOST;
#if defined(MAGMADNN_HAVE_CUDA)
training_memory_type = DEVICE;
// training_memory_type = HOST;
#else
training_memory_type = HOST;
#endif
Expand All @@ -57,40 +62,44 @@ int main(int argc, char **argv) {
auto input = layer::input(x_batch);

auto conv2d1 = layer::conv2d(input->out(), {5, 5}, 32, {0, 0}, {1, 1}, {1, 1}, true, false);
// auto conv2d1 = layer::conv2d(input->out(), {2, 2}, 32, {0, 0}, {1, 1}, {0, 0}, true, false);
auto act1 = layer::activation(conv2d1->out(), layer::RELU);
// auto pool1 = layer::pooling(act1->out(), {2, 2}, {0, 0}, {2, 2}, MAX_POOL);
auto pool1 = layer::pooling<float>(act1->out(), {2, 2}, {0, 0}, {2, 2}, AVERAGE_POOL);
// auto dropout1 = layer::dropout(pool1->out(), 0.25);
auto pool1 = layer::pooling(act1->out(), {2, 2}, {0, 0}, {2, 2}, MAX_POOL);

// auto flatten = layer::flatten(input->out());
// auto flatten = layer::flatten(act1->out());
// auto flatten = layer::flatten(dropout1->out());
auto flatten = layer::flatten(pool1->out());
auto conv2d2 = layer::conv2d(pool1->out(), {5, 5}, 32, {0, 0}, {1, 1}, {1, 1}, true, false);
auto act2 = layer::activation(conv2d2->out(), layer::RELU);
auto pool2 = layer::pooling(act2->out(), {2, 2}, {0, 0}, {2, 2}, MAX_POOL);

auto flatten = layer::flatten(pool2->out());

auto fc1 = layer::fullyconnected(flatten->out(), 128, true);
auto act2 = layer::activation(fc1->out(), layer::RELU);
auto fc2 = layer::fullyconnected(act2->out(), n_classes, false);
// auto fc2 = layer::fullyconnected(flatten->out(), n_classes, false);
auto fc1 = layer::fullyconnected(flatten->out(), 768, true);
auto act3 = layer::activation(fc1->out(), layer::RELU);

auto act3 = layer::activation(fc2->out(), layer::SOFTMAX);
auto fc2 = layer::fullyconnected(act3->out(), 500, true);
auto act4 = layer::activation(fc2->out(), layer::RELU);

auto output = layer::output(act3->out());
auto fc3 = layer::fullyconnected(act4->out(), n_classes, false);
auto act5 = layer::activation(fc3->out(), layer::SOFTMAX);

auto output = layer::output(act5->out());

std::vector<layer::Layer<float> *> layers =
{input,
conv2d1, act1,
pool1,
// dropout1,
conv2d2, act2,
pool2,
flatten,
fc1, act2,
fc2, act3,
fc1, act3,
fc2, act4,
fc3, act5,
output};

model::NeuralNetwork<float> model(layers, optimizer::CROSS_ENTROPY, optimizer::SGD, params);

model::metric_t metrics;

model.summary();

model.fit(images_host, labels_host, metrics, true);

delete images_host;
Expand All @@ -99,6 +108,10 @@ int main(int argc, char **argv) {

magmadnn_finalize();

#if defined(MAGMADNN_HAVE_MPI)
MPI_Finalize();
#endif

return 0;
}

Expand Down
8 changes: 8 additions & 0 deletions examples/lenet5.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,10 @@ int main(int argc, char** argv) {

// Data type
using T = float;

#if defined(MAGMADNN_HAVE_MPI)
MPI_Init(&argc, &argv);
#endif

magmadnn_init();

Expand Down Expand Up @@ -121,5 +125,9 @@ int main(int argc, char** argv) {

magmadnn_finalize();

#if defined(MAGMADNN_HAVE_MPI)
MPI_Finalize();
#endif

return 0;
}
Loading

0 comments on commit 20820ee

Please sign in to comment.