Commit 430424ea authored by Alison Carrera's avatar Alison Carrera
Browse files

First Commit.

parent d49675ef
clean:
rm -rf pytest_cache/
rm -rf dist/
rm -rf onn.egg-info/
build:
python setup.py sdist
setup:
pip install bandit
pip install -r requirements.txt
pip install -r requirements_test.txt
deploy: clean build
twine upload dist/*
\ No newline at end of file
import torch
import torch.nn as nn
import torch.nn.functional as F
class ONN():
def __init__(self, features_size, max_num_hidden_layers, qtd_neuron_per_hidden_layer, n_classes, batch_size=1,
b=0.99, n=0.01, s=0.2, use_cuda=False):
super(ONN, self).__init__()
if torch.cuda.is_available() and use_cuda:
print("Using CUDA :]")
self.device = torch.device("cuda:0" if torch.cuda.is_available() and use_cuda else "cpu")
self.features_size = features_size
self.max_num_hidden_layers = max_num_hidden_layers
self.qtd_neuron_per_hidden_layer = qtd_neuron_per_hidden_layer
self.n_classes = n_classes
self.batch_size = batch_size
self.b = torch.tensor(b).to(self.device)
self.n = torch.tensor(n).to(self.device)
self.s = torch.tensor(s).to(self.device)
self.hidden_layers = []
self.output_layers = []
self.hidden_layers.append(nn.Linear(features_size, qtd_neuron_per_hidden_layer))
for i in range(max_num_hidden_layers - 1):
self.hidden_layers.append(nn.Linear(qtd_neuron_per_hidden_layer, qtd_neuron_per_hidden_layer))
for i in range(max_num_hidden_layers):
self.output_layers.append(nn.Linear(qtd_neuron_per_hidden_layer, n_classes))
self.hidden_layers = nn.ModuleList(self.hidden_layers).to(self.device)
self.output_layers = nn.ModuleList(self.output_layers).to(self.device)
self.alpha = torch.Tensor(self.max_num_hidden_layers).fill_(1 / (self.max_num_hidden_layers + 1)).to(
self.device)
self.loss_array = []
def zero_grad(self):
for i in range(self.max_num_hidden_layers):
self.output_layers[i].weight.grad.data.fill_(0)
self.output_layers[i].bias.grad.data.fill_(0)
self.hidden_layers[i].weight.grad.data.fill_(0)
self.hidden_layers[i].bias.grad.data.fill_(0)
def update_weights(self, X, Y, show_loss):
Y = torch.from_numpy(Y).to(self.device)
predictions_per_layer = self.forward(X)
losses_per_layer = []
for out in predictions_per_layer:
criterion = nn.CrossEntropyLoss().to(self.device)
loss = criterion(out.view(self.batch_size, self.n_classes), Y.view(self.batch_size).long())
losses_per_layer.append(loss)
w = []
b = []
for i in range(len(losses_per_layer)):
losses_per_layer[i].backward(retain_graph=True)
self.output_layers[i].weight.data -= self.n * self.alpha[i] * self.output_layers[i].weight.grad.data
self.output_layers[i].bias.data -= self.n * self.alpha[i] * self.output_layers[i].bias.grad.data
w.append(self.alpha[i] * self.hidden_layers[i].weight.grad.data)
b.append(self.alpha[i] * self.hidden_layers[i].bias.grad.data)
self.zero_grad()
for i in range(1, len(losses_per_layer)):
self.hidden_layers[i].weight.data -= self.n * torch.sum(torch.cat(w[i:]))
self.hidden_layers[i].bias.data -= self.n * torch.sum(torch.cat(b[i:]))
for i in range(len(losses_per_layer)):
self.alpha[i] *= torch.pow(self.b, losses_per_layer[i])
self.alpha[i] = torch.max(self.alpha[i], self.s / self.max_num_hidden_layers)
z_t = torch.sum(self.alpha)
self.alpha = self.alpha / z_t
if show_loss:
real_output = torch.sum(torch.mul(
self.alpha.view(self.max_num_hidden_layers, 1).repeat(1, self.batch_size).view(
self.max_num_hidden_layers, self.batch_size, 1), predictions_per_layer), 0)
criterion = nn.CrossEntropyLoss().to(self.device)
loss = criterion(real_output.view(self.batch_size, self.n_classes), Y.view(self.batch_size).long())
self.loss_array.append(loss)
if (len(self.loss_array) % 1000) == 0:
print("WARNING: Set 'show_loss' to 'False' when not debugging. "
"It will deteriorate the fitting performance.")
loss = torch.Tensor(self.loss_array).mean().cpu().numpy()
print("Alpha:" + str(self.alpha.data.cpu().numpy()))
print("Training Loss: " + str(loss))
self.loss_array.clear()
def forward(self, X):
hidden_connections = []
X = torch.from_numpy(X).float().to(self.device)
x = F.relu(self.hidden_layers[0](X))
hidden_connections.append(x)
for i in range(1, self.max_num_hidden_layers):
hidden_connections.append(F.relu(self.hidden_layers[i](hidden_connections[i - 1])))
output_class = []
for i in range(self.max_num_hidden_layers):
output_class.append(self.output_layers[i](hidden_connections[i]))
pred_per_layer = torch.stack(output_class)
return pred_per_layer
def validate_input_X(self, data):
if len(data.shape) != 2:
raise Exception("Wrong dimension for this X data. It should have only two dimensions.")
def validate_input_Y(self, data):
if len(data.shape) != 1:
raise Exception("Wrong dimension for this Y data. It should have only one dimensions.")
def partial_fit(self, X_data, Y_data, show_loss=True):
self.validate_input_X(X_data)
self.validate_input_Y(Y_data)
self.update_weights(X_data, Y_data, show_loss)
def predict_proba(self, X_data):
self.validate_input_X(X_data)
result = torch.softmax(
torch.sum(torch.mul(
self.alpha.view(self.max_num_hidden_layers, 1).repeat(1, len(X_data)).view(
self.max_num_hidden_layers, len(X_data), 1), self.forward(X_data)), 0), 0)
result[torch.isnan(result)] = 0
return result.data.cpu().numpy()
def predict(self, X_data):
self.validate_input_X(X_data)
return torch.argmax(torch.sum(torch.mul(
self.alpha.view(self.max_num_hidden_layers, 1).repeat(1, len(X_data)).view(
self.max_num_hidden_layers, len(X_data), 1), self.forward(X_data)), 0), dim=1).cpu().numpy()
torch
numpy
\ No newline at end of file
pytest==4.1.1
pycodestyle==2.4.0
\ No newline at end of file
from setuptools import setup, find_packages
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(name='onn',
version='0.0.1',
description='Online Neural Network',
url='https://github.com/alison-carrera/onn',
author='Alison Carrera',
author_email='alison.carrera2007@gmail.com',
packages=find_packages(),
install_requires=['numpy', 'pytorch'],
long_description=long_description,
long_description_content_type='text/markdown',
license='Apache 2.0',
zip_safe=False)
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment