1
0
mirror of https://github.com/newnius/YAO-optimizer.git synced 2025-12-15 09:06:43 +00:00

add files

This commit is contained in:
2020-04-29 18:35:22 +08:00
parent 348d580f11
commit 3e58e8ac13
23 changed files with 7106 additions and 1 deletions

0
model/__init__.py Normal file
View File

35
model/model_keras.py Normal file
View File

@@ -0,0 +1,35 @@
from keras.layers import Input, Dense, LSTM
from keras.models import Model
from keras.callbacks import ModelCheckpoint, EarlyStopping
def get_keras_model(config):
input1 = Input(shape=(config.time_step, config.input_size))
lstm = input1
for i in range(config.lstm_layers):
lstm = LSTM(units=config.hidden_size, dropout=config.dropout_rate, return_sequences=True)(lstm)
output = Dense(config.output_size)(lstm)
model = Model(input1, output)
model.compile(loss='mse', optimizer='adam')
return model
def train(config, train_X, train_Y, valid_X, valid_Y):
model = get_keras_model(config)
model.summary()
if config.add_train:
model.load_weights(config.model_save_path + config.model_name)
check_point = ModelCheckpoint(filepath=config.model_save_path + config.model_name, monitor='val_loss',
save_best_only=True, mode='auto')
early_stop = EarlyStopping(monitor='val_loss', patience=config.patience, mode='auto')
model.fit(train_X, train_Y, batch_size=config.batch_size, epochs=config.epoch, verbose=2,
validation_data=(valid_X, valid_Y), callbacks=[check_point, early_stop])
def predict(config, test_X):
model = get_keras_model(config)
model.load_weights(config.model_save_path + config.model_name)
result = model.predict(test_X, batch_size=1)
result = result.reshape((-1, config.output_size))
return result

98
model/model_pytorch.py Normal file
View File

@@ -0,0 +1,98 @@
import torch
from torch.nn import Module, LSTM, Linear
from torch.utils.data import DataLoader, TensorDataset
import numpy as np
class Net(Module):
def __init__(self, config):
super(Net, self).__init__()
self.lstm = LSTM(input_size=config.input_size, hidden_size=config.hidden_size,
num_layers=config.lstm_layers, batch_first=True, dropout=config.dropout_rate)
self.linear = Linear(in_features=config.hidden_size, out_features=config.output_size)
def forward(self, x, hidden=None):
lstm_out, hidden = self.lstm(x, hidden)
linear_out = self.linear(lstm_out)
return linear_out, hidden
def train(config, train_X, train_Y, valid_X, valid_Y):
train_X, train_Y = torch.from_numpy(train_X).float(), torch.from_numpy(train_Y).float()
train_loader = DataLoader(TensorDataset(train_X, train_Y), batch_size=config.batch_size)
valid_X, valid_Y = torch.from_numpy(valid_X).float(), torch.from_numpy(valid_Y).float()
valid_loader = DataLoader(TensorDataset(valid_X, valid_Y), batch_size=config.batch_size)
model = Net(config)
if config.add_train:
model.load_state_dict(torch.load(config.model_save_path + config.model_name))
optimizer = torch.optim.Adam(model.parameters(), lr=config.learning_rate)
criterion = torch.nn.MSELoss()
valid_loss_min = float("inf")
bad_epoch = 0
for epoch in range(config.epoch):
print("Epoch {}/{}".format(epoch, config.epoch))
model.train()
train_loss_array = []
hidden_train = None
for i, _data in enumerate(train_loader):
_train_X, _train_Y = _data
optimizer.zero_grad()
pred_Y, hidden_train = model(_train_X, hidden_train)
if not config.do_continue_train:
hidden_train = None
else:
h_0, c_0 = hidden_train
h_0.detach_(), c_0.detach_()
hidden_train = (h_0, c_0)
loss = criterion(pred_Y, _train_Y)
loss.backward()
optimizer.step()
train_loss_array.append(loss.item())
model.eval()
valid_loss_array = []
hidden_valid = None
for _valid_X, _valid_Y in valid_loader:
pred_Y, hidden_valid = model(_valid_X, hidden_valid)
if not config.do_continue_train: hidden_valid = None
loss = criterion(pred_Y, _valid_Y)
valid_loss_array.append(loss.item())
valid_loss_cur = np.mean(valid_loss_array)
print("The train loss is {:.4f}. ".format(np.mean(train_loss_array)),
"The valid loss is {:.4f}.".format(valid_loss_cur))
if valid_loss_cur < valid_loss_min:
valid_loss_min = valid_loss_cur
bad_epoch = 0
torch.save(model.state_dict(), config.model_save_path + config.model_name)
else:
bad_epoch += 1
if bad_epoch >= config.patience:
print(" The training stops early in epoch {}".format(epoch))
break
def predict(config, test_X):
test_X = torch.from_numpy(test_X).float()
test_set = TensorDataset(test_X)
test_loader = DataLoader(test_set, batch_size=1)
model = Net(config)
model.load_state_dict(torch.load(config.model_save_path + config.model_name))
result = torch.Tensor()
model.eval()
hidden_predict = None
for _data in test_loader:
data_X = _data[0]
pred_X, hidden_predict = model(data_X, hidden_predict)
cur_pred = torch.squeeze(pred_X, dim=0)
result = torch.cat((result, cur_pred), dim=0)
return result.detach().numpy()

97
model/model_tensorflow.py Normal file
View File

@@ -0,0 +1,97 @@
import tensorflow as tf
import numpy as np
class Model:
def __init__(self, config):
self.config = config
self.placeholders()
self.net()
self.operate()
def placeholders(self):
self.X = tf.placeholder(tf.float32, [None, self.config.time_step, self.config.input_size])
self.Y = tf.placeholder(tf.float32, [None, self.config.time_step, self.config.output_size])
def net(self):
def dropout_cell():
basicLstm = tf.nn.rnn_cell.LSTMCell(self.config.hidden_size)
dropoutLstm = tf.nn.rnn_cell.DropoutWrapper(basicLstm, output_keep_prob=1 - self.config.dropout_rate)
return dropoutLstm
cell = tf.nn.rnn_cell.MultiRNNCell([dropout_cell() for _ in range(self.config.lstm_layers)])
output_rnn, _ = tf.nn.dynamic_rnn(cell=cell, inputs=self.X, dtype=tf.float32)
# shape of output_rnn is: [batch_size, time_step, hidden_size]
self.pred = tf.layers.dense(inputs=output_rnn, units=self.config.output_size)
def operate(self):
self.loss = tf.reduce_mean(tf.square(tf.reshape(self.pred, [-1]) - tf.reshape(self.Y, [-1])))
self.optim = tf.train.AdamOptimizer(self.config.learning_rate).minimize(self.loss)
self.saver = tf.train.Saver(tf.global_variables())
def train(config, train_X, train_Y, valid_X, valid_Y):
with tf.variable_scope("stock_predict"):
model = Model(config)
train_len = len(train_X)
valid_len = len(valid_X)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
valid_loss_min = float("inf")
bad_epoch = 0
for epoch in range(config.epoch):
print("Epoch {}/{}".format(epoch, config.epoch))
# 训练
train_loss_array = []
for step in range(train_len // config.batch_size):
feed_dict = {model.X: train_X[step * config.batch_size: (step + 1) * config.batch_size],
model.Y: train_Y[step * config.batch_size: (step + 1) * config.batch_size]}
train_loss, _ = sess.run([model.loss, model.optim], feed_dict=feed_dict)
train_loss_array.append(train_loss)
# 验证与早停
valid_loss_array = []
for step in range(valid_len // config.batch_size):
feed_dict = {model.X: valid_X[step * config.batch_size: (step + 1) * config.batch_size],
model.Y: valid_Y[step * config.batch_size: (step + 1) * config.batch_size]}
valid_loss = sess.run(model.loss, feed_dict=feed_dict)
valid_loss_array.append(valid_loss)
valid_loss_cur = np.mean(valid_loss_array)
print("The train loss is {:.4f}. ".format(np.mean(train_loss_array)),
"The valid loss is {:.4f}.".format(valid_loss_cur))
if valid_loss_cur < valid_loss_min:
valid_loss_min = valid_loss_cur
bad_epoch = 0
path = model.saver.save(sess, config.model_save_path + config.model_name)
print(path)
else:
bad_epoch += 1
if bad_epoch >= config.patience:
print(" The training stops early in epoch {}".format(epoch))
break
def predict(config, test_X):
config.dropout_rate = 1
with tf.variable_scope("stock_predict", reuse=tf.AUTO_REUSE):
model = Model(config)
test_len = len(test_X)
with tf.Session() as sess:
module_file = tf.train.latest_checkpoint(config.model_save_path)
model.saver.restore(sess, module_file)
result = np.zeros((test_len * config.time_step, config.output_size))
for step in range(test_len):
feed_dict = {model.X: test_X[step: (step + 1)]}
test_pred = sess.run(model.pred, feed_dict=feed_dict)
result[step * config.time_step: (step + 1) * config.time_step] = test_pred[0, :, :]
return result