import numpy
import scipy.special
%matplotlib
def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):
self.inodes = inputnodes
self.hnodes = hiddennodes
self.onodes = outputnodes
self.wih = numpy.random.normal(0.0, pow(self.hnodes, -0.5),
(self.hnodes, self.inodes))
self.who = numpy.random.normal(0.0, pow(self.onodes, -0.5),
(self.onodes, self.hnodes))
self.lr = learningrate
self.activation_function = lambda x: scipy.special.expit(x)
pass
def train(self, inputs_list, targets_list):
inputs = numpy.array(inputs_list, ndmin=2).T
targets = numpy.array(targets_list, ndmin=2).T
hidden_inputs = numpy.dot(self.wih, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
final_inputs = numpy.dot(self.who, hidden_outputs)
final_outputs = self.activation_function(final_inputs)
ouput_errors = (target - actual)
hidden_errors = numpy.dot(self.who.T, outputs_errors)
layers
self.who += self.lr * numpy.dot((outputs_errors *
final_outputs * (1.0 - final_outputs)),
numpy.transpose(hidden_outputs))
layers
self.wih += self.lr * numpy.dot((hidden_errors *
hidden_outputs * (1.0 - hidden_outputs)),
numpy.transpose(inputs))
pass
def query(self, inouts_list):
inputs = numpy.array(inputs_list, ndmin=2).T
hidden_inputs = numpy.dot(self.wih, inputs)
hidden_outputs = self.ativation_function(hidden_inputs)
final_inputs = numpy.dot(self.who, hidden_outputs)
final_outputs = self.activation_function(final_inputs)
return final_outputs
def neuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate):
return 0
input_nodes = 784
hidden_nodes = 200
output_nodes = 10
learning_rate = 1.0
n = neuralNetwork(input_nodes, hidden_nodes, output_nodes,
learning_rate)
training_data_file = open("mnist_dataset/mnist_train.csv", 'r')
training_data_list = training_data_file.readline()
training_data_file.close()
epochs = 5
for e in range(epochs):
for record in training_data_list:
all_values = record.split(',')
inputs = (numpy.asfarray(all_values[1:] / 255.0 * 0.99) + 0.01151)
targets = numpy.zeros(output_nodes) + 0.01
targets[int(all_values[0])] = 0.99
n.train(inputs, targets)
pass
pass
test_data_file = open("mnist_dataset/mnist_test.csv", 'r')
test_data_list = test_data_file.readline()
test_data_file.close()
for record in test_data_list:
all_values = record.split(',')
correct_label = int(all_values[0])
inputs = (numpy.asfarray(all_values[1:] / 255.0 * 0.99) + 1.0)
outputs = n.query(inputs)
label = numpy.argmax(outputs)
if (label == correct_label):
scorecard.append(1)
else:
scorecard.append(0)
pass
pass
scorecard_array = numpy.asarray(scorecard)
print("performmance = ", secordcard_array.sum()/scorecard_array.size)
import keras
from keras.datasets import minst
from keras.models import Sequential
from keras.layer.core import Dense, Dropout, Activation
from keras.optimizers import RMSprop
from keras.callbacks import EailyStopping, CSVLogger
%matplotlib inline
import matplotlib.pyplot as plt
batch_size = 128
num_classes = 10
epochs = 20
(x_train, y_train), (x_test, y_test) = minst.load_data()
for i in range (10):
plt.subplot(2, 5, i+1)
plt.title("M_%d" % i)
plt.axis("off")
plt.imshow(x_train[i].reshape(28, 28), cmap=None)
plt.show()
x_train = x_train.reshape(60000, 784).astype('float32')
x_test = x_test.reshape(10000, 784).astype('float32')
x_train /= 255
x_test /= 255
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
model = Sequential()
model.add(Dense(512, input_shape=(784, )))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
es = EarlyStopping(monitor='val_loss', patience=2)
csv_logger = CSVLogger('training.log')
hist = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs,
verbose=1, validation_split=0.1, callbacks=[es, csv_logger])
score = model.evaluate(x_text, y_text, verbose=0)
print('test loss:', score[0])
print('test acc:', score[1])
loss = hist.history['loss']
val_loss = hist.history['val_loss']
epochs = len(loss)
plt.plot(range(epochs), loss, marker='.', label='loss(training data)')
plt.plot(range(epochs), val_loss, marker='.', label='val_loss(evaluate data)')
plt.legend(loc='best')
plt.grid()
plt.xlabel('epochs')
plt.ylabel('loss')
plt.show()
如果針對你的問題回答。
我會回答說直接放在一起不就好了。
至於能不能跑,可不可以用。
「我啊災」!!
我不是神啊.........
這邊確實有很多大神級人物,但卻沒有通靈級的大師,不知用途,也沒頭沒尾,這種忙,應該很難幫!
試試看
import numpy
import scipy.special
import keras
from keras.datasets import minst
from keras.models import Sequential
from keras.layer.core import Dense, Dropout, Activation
from keras.optimizers import RMSprop
from keras.callbacks import EailyStopping, CSVLogger
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):
self.inodes = inputnodes
self.hnodes = hiddennodes
self.onodes = outputnodes
self.wih = numpy.random.normal(0.0, pow(self.hnodes, -0.5),
(self.hnodes, self.inodes))
self.who = numpy.random.normal(0.0, pow(self.onodes, -0.5),
(self.onodes, self.hnodes))
self.lr = learningrate
self.activation_function = lambda x: scipy.special.expit(x)
pass
def train(self, inputs_list, targets_list):
inputs = numpy.array(inputs_list, ndmin=2).T
targets = numpy.array(targets_list, ndmin=2).T
hidden_inputs = numpy.dot(self.wih, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
final_inputs = numpy.dot(self.who, hidden_outputs)
final_outputs = self.activation_function(final_inputs)
ouput_errors = (target - actual)
hidden_errors = numpy.dot(self.who.T, outputs_errors)
layers
self.who += self.lr * numpy.dot((outputs_errors *
final_outputs * (1.0 - final_outputs)),
numpy.transpose(hidden_outputs))
layers
self.wih += self.lr * numpy.dot((hidden_errors *
hidden_outputs * (1.0 - hidden_outputs)),
numpy.transpose(inputs))
pass
def query(self, inouts_list):
inputs = numpy.array(inputs_list, ndmin=2).T
hidden_inputs = numpy.dot(self.wih, inputs)
hidden_outputs = self.ativation_function(hidden_inputs)
final_inputs = numpy.dot(self.who, hidden_outputs)
final_outputs = self.activation_function(final_inputs)
return final_outputs
def neuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate):
return 0
input_nodes = 784
hidden_nodes = 200
output_nodes = 10
learning_rate = 1.0
n = neuralNetwork(input_nodes, hidden_nodes, output_nodes,
learning_rate)
training_data_file = open("mnist_dataset/mnist_train.csv", 'r')
training_data_list = training_data_file.readline()
training_data_file.close()
epochs = 5
for e in range(epochs):
for record in training_data_list:
all_values = record.split(',')
inputs = (numpy.asfarray(all_values[1:] / 255.0 * 0.99) + 0.01151)
targets = numpy.zeros(output_nodes) + 0.01
targets[int(all_values[0])] = 0.99
n.train(inputs, targets)
pass
pass
test_data_file = open("mnist_dataset/mnist_test.csv", 'r')
test_data_list = test_data_file.readline()
test_data_file.close()
for record in test_data_list:
all_values = record.split(',')
correct_label = int(all_values[0])
inputs = (numpy.asfarray(all_values[1:] / 255.0 * 0.99) + 1.0)
outputs = n.query(inputs)
label = numpy.argmax(outputs)
if (label == correct_label):
scorecard.append(1)
else:
scorecard.append(0)
pass
pass
scorecard_array = numpy.asarray(scorecard)
print("performmance = ", secordcard_array.sum()/scorecard_array.size)
batch_size = 128
num_classes = 10
epochs = 20
(x_train, y_train), (x_test, y_test) = minst.load_data()
for i in range (10):
plt.subplot(2, 5, i+1)
plt.title("M_%d" % i)
plt.axis("off")
plt.imshow(x_train[i].reshape(28, 28), cmap=None)
plt.show()
x_train = x_train.reshape(60000, 784).astype('float32')
x_test = x_test.reshape(10000, 784).astype('float32')
x_train /= 255
x_test /= 255
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
model = Sequential()
model.add(Dense(512, input_shape=(784, )))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
es = EarlyStopping(monitor='val_loss', patience=2)
csv_logger = CSVLogger('training.log')
hist = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs,
verbose=1, validation_split=0.1, callbacks=[es, csv_logger])
score = model.evaluate(x_text, y_text, verbose=0)
print('test loss:', score[0])
print('test acc:', score[1])
loss = hist.history['loss']
val_loss = hist.history['val_loss']
epochs = len(loss)
plt.plot(range(epochs), loss, marker='.', label='loss(training data)')
plt.plot(range(epochs), val_loss, marker='.', label='val_loss(evaluate data)')
plt.legend(loc='best')
plt.grid()
plt.xlabel('epochs')
plt.ylabel('loss')
plt.show()