1
0
mirror of https://github.com/newnius/YAO-optimizer.git synced 2025-06-06 06:41:55 +00:00

add files

This commit is contained in:
Newnius 2020-04-29 22:18:18 +08:00
parent 96eba1f850
commit e41f541f3b
18 changed files with 170 additions and 420 deletions

View File

@ -3,6 +3,17 @@
<component name="ChangeListManager">
<list default="true" id="0aedafd8-e57e-462a-beda-65af0b91f3df" name="Default Changelist" comment="">
<change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/figure/continue_predict_high_with_pytorch.png" beforeDir="false" />
<change beforePath="$PROJECT_DIR$/figure/continue_predict_low_with_pytorch.png" beforeDir="false" />
<change beforePath="$PROJECT_DIR$/figure/predict_high_with_keras.png" beforeDir="false" />
<change beforePath="$PROJECT_DIR$/figure/predict_high_with_pytorch.png" beforeDir="false" />
<change beforePath="$PROJECT_DIR$/figure/predict_high_with_tensorflow.png" beforeDir="false" />
<change beforePath="$PROJECT_DIR$/figure/predict_low_with_keras.png" beforeDir="false" />
<change beforePath="$PROJECT_DIR$/figure/predict_low_with_pytorch.png" beforeDir="false" />
<change beforePath="$PROJECT_DIR$/figure/predict_low_with_tensorflow.png" beforeDir="false" />
<change beforePath="$PROJECT_DIR$/main.py" beforeDir="false" afterPath="$PROJECT_DIR$/main.py" afterDir="false" />
<change beforePath="$PROJECT_DIR$/serve.py" beforeDir="false" afterPath="$PROJECT_DIR$/serve.py" afterDir="false" />
<change beforePath="$PROJECT_DIR$/test.py" beforeDir="false" />
</list>
<ignored path="$PROJECT_DIR$/out/" />
<option name="EXCLUDED_CONVERTED_TO_IGNORED" value="true" />
@ -23,35 +34,37 @@
<counts>
<entry key="csv" value="1" />
<entry key="iml" value="1" />
<entry key="py" value="4" />
<entry key="png" value="7" />
<entry key="py" value="6" />
</counts>
</usages-collector>
<usages-collector id="statistics.file.types.open">
<counts>
<entry key="IDEA_MODULE" value="1" />
<entry key="Image" value="7" />
<entry key="PLAIN_TEXT" value="1" />
<entry key="Python" value="4" />
<entry key="Python" value="6" />
</counts>
</usages-collector>
<usages-collector id="statistics.file.extensions.edit">
<counts>
<entry key="py" value="333" />
<entry key="py" value="522" />
</counts>
</usages-collector>
<usages-collector id="statistics.file.types.edit">
<counts>
<entry key="Python" value="333" />
<entry key="Python" value="522" />
</counts>
</usages-collector>
</session>
</component>
<component name="FileEditorManager">
<leaf>
<file pinned="false" current-in-tab="true">
<file pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/main.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="123">
<caret line="11" column="23" lean-forward="true" selection-start-line="11" selection-start-column="23" selection-end-line="11" selection-end-column="23" />
<state relative-caret-position="1408">
<caret line="161" column="25" lean-forward="true" selection-start-line="161" selection-start-column="25" selection-end-line="161" selection-end-column="25" />
<folding>
<element signature="e#0#19#0" expanded="true" />
</folding>
@ -59,11 +72,24 @@
</provider>
</entry>
</file>
<file pinned="false" current-in-tab="true">
<entry file="file://$PROJECT_DIR$/serve.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="283">
<caret line="34" column="43" lean-forward="true" selection-start-line="34" selection-start-column="43" selection-end-line="34" selection-end-column="43" />
<folding>
<element signature="e#18#46#0" expanded="true" />
<marker date="1588169420688" expanded="true" signature="237:273" ph="..." />
</folding>
</state>
</provider>
</entry>
</file>
<file pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/model/model_tensorflow.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="1410">
<caret line="94" column="20" selection-start-line="94" selection-start-column="20" selection-end-line="94" selection-end-column="20" />
<state relative-caret-position="105">
<caret line="7" lean-forward="true" selection-start-line="7" selection-end-line="7" />
<folding>
<element signature="e#0#23#0" expanded="true" />
</folding>
@ -71,15 +97,6 @@
</provider>
</entry>
</file>
<file pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/data/stock_data.csv">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="135">
<caret line="9" column="12" selection-start-line="9" selection-start-column="12" selection-end-line="9" selection-end-column="12" />
</state>
</provider>
</entry>
</file>
</leaf>
</component>
<component name="FileTemplateManagerImpl">
@ -101,9 +118,9 @@
<option name="CHANGED_PATHS">
<list>
<option value="$PROJECT_DIR$/test.py" />
<option value="$PROJECT_DIR$/serve.py" />
<option value="$PROJECT_DIR$/model/model_tensorflow.py" />
<option value="$PROJECT_DIR$/main.py" />
<option value="$PROJECT_DIR$/serve.py" />
</list>
</option>
</component>
@ -113,10 +130,9 @@
<detection-done>true</detection-done>
<sorting>DEFINITION_ORDER</sorting>
</component>
<component name="ProjectFrameBounds" extendedState="6">
<option name="y" value="23" />
<component name="ProjectFrameBounds" fullScreen="true">
<option name="width" value="1280" />
<option name="height" value="704" />
<option name="height" value="800" />
</component>
<component name="ProjectView">
<navigator proportions="" version="1">
@ -137,11 +153,6 @@
<item name="yao-optimizer" type="462c0819:PsiDirectoryNode" />
<item name="data" type="462c0819:PsiDirectoryNode" />
</path>
<path>
<item name="yao-optimizer" type="b2602c69:ProjectViewProjectNode" />
<item name="yao-optimizer" type="462c0819:PsiDirectoryNode" />
<item name="figure" type="462c0819:PsiDirectoryNode" />
</path>
<path>
<item name="yao-optimizer" type="b2602c69:ProjectViewProjectNode" />
<item name="yao-optimizer" type="462c0819:PsiDirectoryNode" />
@ -157,7 +168,7 @@
<component name="PropertiesComponent">
<property name="WebServerToolWindowFactoryState" value="false" />
<property name="aspect.path.notification.shown" value="true" />
<property name="com.android.tools.idea.instantapp.provision.ProvisionBeforeRunTaskProvider.myTimeStamp" value="1588158359232" />
<property name="com.android.tools.idea.instantapp.provision.ProvisionBeforeRunTaskProvider.myTimeStamp" value="1588169466518" />
<property name="go.gopath.indexing.explicitly.defined" value="true" />
<property name="nodejs_interpreter_path.stuck_in_default_project" value="undefined stuck path" />
<property name="nodejs_npm_path_reset_for_default_project" value="true" />
@ -188,15 +199,15 @@
<option name="number" value="Default" />
<option name="presentableId" value="Default" />
<updated>1588152877746</updated>
<workItem from="1588152880522" duration="4816000" />
<workItem from="1588152880522" duration="7941000" />
</task>
<servers />
</component>
<component name="TimeTrackingManager">
<option name="totallyTimeSpent" value="4816000" />
<option name="totallyTimeSpent" value="7941000" />
</component>
<component name="ToolWindowManager">
<frame x="0" y="23" width="1280" height="704" extended-state="6" />
<frame x="0" y="0" width="1280" height="800" extended-state="0" />
<editor active="true" />
<layout>
<window_info id="Designer" order="0" />
@ -204,10 +215,10 @@
<window_info id="Capture Tool" order="2" />
<window_info id="Favorites" order="3" side_tool="true" />
<window_info id="Image Layers" order="4" />
<window_info content_ui="combo" id="Project" order="5" visible="true" weight="0.24959612" />
<window_info active="true" content_ui="combo" id="Project" order="5" visible="true" weight="0.24959612" />
<window_info id="Structure" order="6" side_tool="true" weight="0.25" />
<window_info anchor="bottom" id="Version Control" order="0" />
<window_info active="true" anchor="bottom" id="Terminal" order="1" visible="true" weight="0.32843137" />
<window_info anchor="bottom" id="Terminal" order="1" weight="0.32843137" />
<window_info anchor="bottom" id="Event Log" order="2" side_tool="true" />
<window_info anchor="bottom" id="Database Changes" order="3" show_stripe_button="false" />
<window_info anchor="bottom" id="Docker" order="4" show_stripe_button="false" />
@ -240,6 +251,23 @@
<entry file="file://$PROJECT_DIR$/yao-optimizer.iml">
<provider selected="true" editor-type-id="text-editor" />
</entry>
<entry file="file://$PROJECT_DIR$/data/stock_data.csv">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="105">
<caret line="7" column="33" selection-start-line="7" selection-start-column="33" selection-end-line="7" selection-end-column="33" />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/model/model_tensorflow.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="105">
<caret line="7" lean-forward="true" selection-start-line="7" selection-end-line="7" />
<folding>
<element signature="e#0#23#0" expanded="true" />
</folding>
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/test.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="150">
@ -250,43 +278,48 @@
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/serve.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="161">
<caret line="63" column="29" lean-forward="true" selection-start-line="63" selection-start-column="29" selection-end-line="63" selection-end-column="29" />
<folding>
<element signature="e#0#23#0" expanded="true" />
</folding>
</state>
</provider>
<entry file="file://$PROJECT_DIR$/figure/continue_predict_high_with_pytorch.png">
<provider selected="true" editor-type-id="images" />
</entry>
<entry file="file://$PROJECT_DIR$/data/stock_data.csv">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="135">
<caret line="9" column="12" selection-start-line="9" selection-start-column="12" selection-end-line="9" selection-end-column="12" />
</state>
</provider>
<entry file="file://$PROJECT_DIR$/figure/continue_predict_low_with_pytorch.png">
<provider selected="true" editor-type-id="images" />
</entry>
<entry file="file://$PROJECT_DIR$/model/model_tensorflow.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="1410">
<caret line="94" column="20" selection-start-line="94" selection-start-column="20" selection-end-line="94" selection-end-column="20" />
<folding>
<element signature="e#0#23#0" expanded="true" />
</folding>
</state>
</provider>
<entry file="file://$PROJECT_DIR$/figure/predict_high_with_keras.png">
<provider selected="true" editor-type-id="images" />
</entry>
<entry file="file://$PROJECT_DIR$/figure/predict_low_with_tensorflow.png">
<provider selected="true" editor-type-id="images" />
</entry>
<entry file="file://$PROJECT_DIR$/figure/predict_low_with_keras.png">
<provider selected="true" editor-type-id="images" />
</entry>
<entry file="file://$PROJECT_DIR$/figure/predict_high_with_tensorflow.png">
<provider selected="true" editor-type-id="images" />
</entry>
<entry file="file://$PROJECT_DIR$/figure/predict_high_with_pytorch.png">
<provider selected="true" editor-type-id="images" />
</entry>
<entry file="file://$PROJECT_DIR$/main.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="123">
<caret line="11" column="23" lean-forward="true" selection-start-line="11" selection-start-column="23" selection-end-line="11" selection-end-column="23" />
<state relative-caret-position="1408">
<caret line="161" column="25" lean-forward="true" selection-start-line="161" selection-start-column="25" selection-end-line="161" selection-end-column="25" />
<folding>
<element signature="e#0#19#0" expanded="true" />
</folding>
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/serve.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="283">
<caret line="34" column="43" lean-forward="true" selection-start-line="34" selection-start-column="43" selection-end-line="34" selection-end-column="43" />
<folding>
<element signature="e#18#46#0" expanded="true" />
<marker date="1588169420688" expanded="true" signature="237:273" ph="..." />
</folding>
</state>
</provider>
</entry>
</component>
<component name="masterDetails">
<states>

View File

@ -1,21 +0,0 @@
## Predict stock with LSTM
This project includes training and predicting processes with LSTM for stock data. The characteristics is as fellow:
- Concise and modular
- Support three mainstream deep learning frameworks of pytorch, keras and tensorflow
- Parameters, models and frameworks can be highly customized and modified
- Supports incremental training
- Support predicting multiple indicators at the same time
- Support predicting any number of days in the future
Chinese introduction can refer to : <https://blog.csdn.net/songyunli1111/article/details/78513811>
The simultaneous predict results for stock high and low price with pytorch show as follow:
![predict_high_with_pytorch](https://github.com/hichenway/stock_predict_with_LSTM/blob/master/figure/predict_high_with_pytorch.png)
![predict_low_with_pytorch](https://github.com/hichenway/stock_predict_with_LSTM/blob/master/figure/predict_low_with_pytorch.png)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 47 KiB

21
main.py
View File

@ -4,8 +4,7 @@ import os
from sklearn.model_selection import train_test_split
frame = "tensorflow"
from model.model_tensorflow import train, predict
from model_tensorflow import train, predict
class Config:
feature_columns = list([2,5])
@ -139,24 +138,6 @@ def draw(config, origin_data, predict_norm_data):
print(label_data)
print(predict_data)
'''
for i in range(label_column_num):
plt.figure(i + 1)
plt.plot(label_X, label_data[:, i], label='label')
plt.plot(predict_X, predict_data[:, i], label='predict')
plt.legend(loc='upper right')
plt.xlabel("Day")
plt.ylabel("Price")
plt.title("Predict stock {} price with {}".format(label_name[i], config.used_frame))
print("The predicted stock {} for the next {} day(s) is: ".format(label_name[i], config.predict_day),
np.squeeze(predict_data[-config.predict_day:, i]))
if config.do_figure_save:
plt.savefig(config.figure_save_path + "{}predict_{}_with_{}.png".format(config.continue_flag, label_name[i],
config.used_frame))
plt.show()
'''
def main(config):
np.random.seed(config.random_seed)

View File

View File

@ -1,35 +0,0 @@
from keras.layers import Input, Dense, LSTM
from keras.models import Model
from keras.callbacks import ModelCheckpoint, EarlyStopping
def get_keras_model(config):
input1 = Input(shape=(config.time_step, config.input_size))
lstm = input1
for i in range(config.lstm_layers):
lstm = LSTM(units=config.hidden_size, dropout=config.dropout_rate, return_sequences=True)(lstm)
output = Dense(config.output_size)(lstm)
model = Model(input1, output)
model.compile(loss='mse', optimizer='adam')
return model
def train(config, train_X, train_Y, valid_X, valid_Y):
model = get_keras_model(config)
model.summary()
if config.add_train:
model.load_weights(config.model_save_path + config.model_name)
check_point = ModelCheckpoint(filepath=config.model_save_path + config.model_name, monitor='val_loss',
save_best_only=True, mode='auto')
early_stop = EarlyStopping(monitor='val_loss', patience=config.patience, mode='auto')
model.fit(train_X, train_Y, batch_size=config.batch_size, epochs=config.epoch, verbose=2,
validation_data=(valid_X, valid_Y), callbacks=[check_point, early_stop])
def predict(config, test_X):
model = get_keras_model(config)
model.load_weights(config.model_save_path + config.model_name)
result = model.predict(test_X, batch_size=1)
result = result.reshape((-1, config.output_size))
return result

View File

@ -1,98 +0,0 @@
import torch
from torch.nn import Module, LSTM, Linear
from torch.utils.data import DataLoader, TensorDataset
import numpy as np
class Net(Module):
def __init__(self, config):
super(Net, self).__init__()
self.lstm = LSTM(input_size=config.input_size, hidden_size=config.hidden_size,
num_layers=config.lstm_layers, batch_first=True, dropout=config.dropout_rate)
self.linear = Linear(in_features=config.hidden_size, out_features=config.output_size)
def forward(self, x, hidden=None):
lstm_out, hidden = self.lstm(x, hidden)
linear_out = self.linear(lstm_out)
return linear_out, hidden
def train(config, train_X, train_Y, valid_X, valid_Y):
train_X, train_Y = torch.from_numpy(train_X).float(), torch.from_numpy(train_Y).float()
train_loader = DataLoader(TensorDataset(train_X, train_Y), batch_size=config.batch_size)
valid_X, valid_Y = torch.from_numpy(valid_X).float(), torch.from_numpy(valid_Y).float()
valid_loader = DataLoader(TensorDataset(valid_X, valid_Y), batch_size=config.batch_size)
model = Net(config)
if config.add_train:
model.load_state_dict(torch.load(config.model_save_path + config.model_name))
optimizer = torch.optim.Adam(model.parameters(), lr=config.learning_rate)
criterion = torch.nn.MSELoss()
valid_loss_min = float("inf")
bad_epoch = 0
for epoch in range(config.epoch):
print("Epoch {}/{}".format(epoch, config.epoch))
model.train()
train_loss_array = []
hidden_train = None
for i, _data in enumerate(train_loader):
_train_X, _train_Y = _data
optimizer.zero_grad()
pred_Y, hidden_train = model(_train_X, hidden_train)
if not config.do_continue_train:
hidden_train = None
else:
h_0, c_0 = hidden_train
h_0.detach_(), c_0.detach_()
hidden_train = (h_0, c_0)
loss = criterion(pred_Y, _train_Y)
loss.backward()
optimizer.step()
train_loss_array.append(loss.item())
model.eval()
valid_loss_array = []
hidden_valid = None
for _valid_X, _valid_Y in valid_loader:
pred_Y, hidden_valid = model(_valid_X, hidden_valid)
if not config.do_continue_train: hidden_valid = None
loss = criterion(pred_Y, _valid_Y)
valid_loss_array.append(loss.item())
valid_loss_cur = np.mean(valid_loss_array)
print("The train loss is {:.4f}. ".format(np.mean(train_loss_array)),
"The valid loss is {:.4f}.".format(valid_loss_cur))
if valid_loss_cur < valid_loss_min:
valid_loss_min = valid_loss_cur
bad_epoch = 0
torch.save(model.state_dict(), config.model_save_path + config.model_name)
else:
bad_epoch += 1
if bad_epoch >= config.patience:
print(" The training stops early in epoch {}".format(epoch))
break
def predict(config, test_X):
test_X = torch.from_numpy(test_X).float()
test_set = TensorDataset(test_X)
test_loader = DataLoader(test_set, batch_size=1)
model = Net(config)
model.load_state_dict(torch.load(config.model_save_path + config.model_name))
result = torch.Tensor()
model.eval()
hidden_predict = None
for _data in test_loader:
data_X = _data[0]
pred_X, hidden_predict = model(data_X, hidden_predict)
cur_pred = torch.squeeze(pred_X, dim=0)
result = torch.cat((result, cur_pred), dim=0)
return result.detach().numpy()

View File

@ -6,4 +6,3 @@ tensorflow==1.15.2
matplotlib>=3.0.2
numpy>=1.14.6
scipy>=1.1.0
torch>=1.1.0

156
serve.py
View File

@ -1,87 +1,89 @@
import tensorflow as tf
import numpy as np
import tushare as ts
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
#!/usr/bin/python
from threading import Thread
from threading import Lock
from http.server import BaseHTTPRequestHandler, HTTPServer
import cgi
import json
from urllib import parse
timesteps = seq_length = 7
data_dim = 5
output_dim = 1
PORT_NUMBER = 8000
stock_data = ts.get_k_data('600000', start='2015-01-01', end='2017-12-01')
xy = stock_data[['open', 'close', 'high', 'low', 'volume']]
# xy_new = pd.DataFrame()
# scaler = MinMaxScaler()
# scaler.fit(xy)
# t = scaler.transform(xy)
# for col in xy.columns:
# xy_new.ix[:, col] = t[col]
x = xy
y = xy[['close']]
dataX = []
dataY = []
for i in range(0, len(y) - seq_length):
_x = x[i:i + seq_length]
_y = y.loc[i + seq_length]
#print(_x, "->", _y)
dataX.append(_x)
dataY.append(_y)
x_real = np.vstack(dataX).reshape(-1, seq_length, data_dim)
y_real = np.vstack(dataY).reshape(-1, output_dim)
print(x_real.shape)
print(y_real.shape)
dataX = x_real
dataY = y_real
train_size = int(len(dataY) * 0.7)
test_size = len(dataY) - train_size
trainX, testX = np.array(dataX[0:train_size]), np.array(dataX[train_size:len(dataX)])
trainY, testY = np.array(dataY[0:train_size]), np.array(dataY[train_size:len(dataY)])
X = tf.placeholder(tf.float32, [None, seq_length, data_dim])
Y = tf.placeholder(tf.float32, [None, 1])
lock = Lock()
def add_layer(inputs, in_size, out_size, activation_function=None):
inputs = tf.reshape(inputs, [-1, in_size])
Weights = tf.Variable(tf.random_normal([in_size, out_size]))
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
Wx_plus_b = tf.matmul(inputs, Weights) + biases
if activation_function is None:
outputs = Wx_plus_b
def train_models():
lock.acquire()
lock.release()
class MyHandler(BaseHTTPRequestHandler):
# Handler for the GET requests
def do_GET(self):
req = parse.urlparse(self.path)
query = parse.parse_qs(req.query)
if req.path == "/ping":
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(bytes("pong", "utf-8"))
elif req.path == "/predict":
try:
job = query.get('job')[0]
gpu_model = query.get('gpu_model')[0]
time = query.get('time')[0]
msg = {'code': 1, 'error': "container not exist"}
except Exception as e:
msg = {'code': 2, 'error': str(e)}
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(bytes(json.dumps(msg), "utf-8"))
else:
outputs = activation_function(Wx_plus_b)
return outputs
self.send_error(404, 'File Not Found: %s' % self.path)
# Handler for the POST requests
def do_POST(self):
if self.path == "/train":
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type'],
})
try:
job = form.getvalue('job')[0]
data = form.getvalue('records')[0]
records = json.load(data)
t = Thread(target=train_models(), name='train_models', args=(job, records,))
t.start()
msg = {"code": 0, "error": ""}
except Exception as e:
msg = {"code": 1, "error": str(e)}
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(bytes(json.dumps(msg), "utf-8"))
else:
self.send_error(404, 'File Not Found: %s' % self.path)
outsize_first = 5
l1 = add_layer(X, data_dim, outsize_first, activation_function=tf.nn.relu)
l1_output = tf.reshape(l1, [-1, seq_length, outsize_first])
if __name__ == '__main__':
try:
# Create a web server and define the handler to manage the
# incoming request
server = HTTPServer(('', PORT_NUMBER), MyHandler)
print('Started http server on port ', PORT_NUMBER)
cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=output_dim, state_is_tuple=True)
outputs, _states = tf.nn.dynamic_rnn(cell, l1_output, dtype=tf.float32)
Y_pred = outputs[:, -1]
# Wait forever for incoming http requests
server.serve_forever()
loss = tf.reduce_sum(tf.square(Y_pred - Y))
except KeyboardInterrupt:
print('^C received, shutting down the web server')
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(100):
_, l = sess.run(
[train, loss],
feed_dict={X: trainX, Y: trainY}
)
#print(i, l)
testPredict = sess.run(Y_pred, feed_dict={X: testX})
print(testY)
print(testPredict)
server.socket.close()

111
test.py
View File

@ -1,111 +0,0 @@
import pandas as pd
import numpy as np
import tensorflow as tf
import tushare as ts
rnn_unit = 10
input_size = 7
output_size = 1
lr = 0.0006
stock_data = ts.get_k_data('600000', start='2015-01-01', end='2017-12-01')
data = stock_data.iloc[:, 2:10].values
# ——————————获取训练集——————————
def get_train_data(batch_size=60, time_step=20, train_begin=0, train_end=5800):
batch_index = []
data_train = data[train_begin:train_end]
normalized_train_data = (data_train - np.mean(data_train, axis=0)) / np.std(data_train, axis=0) # 标准化
train_x, train_y = [], [] # 训练集x和y初定义
for i in range(len(normalized_train_data) - time_step):
if i % batch_size == 0:
batch_index.append(i)
x = normalized_train_data[i:i + time_step, :7]
y = normalized_train_data[i:i + time_step, 7, np.newaxis]
train_x.append(x.tolist())
train_y.append(y.tolist())
batch_index.append((len(normalized_train_data) - time_step))
return batch_index, train_x, train_y
# ——————————获取测试集——————————
def get_test_data(time_step=20, test_begin=5800):
data_test = data[test_begin:]
mean = np.mean(data_test, axis=0)
std = np.std(data_test, axis=0)
normalized_test_data = (data_test - mean) / std # 标准化
size = (len(normalized_test_data) + time_step - 1) // time_step # 有size个sample
test_x, test_y = [], []
for i in range(size - 1):
x = normalized_test_data[i * time_step:(i + 1) * time_step, :7]
y = normalized_test_data[i * time_step:(i + 1) * time_step, 7]
test_x.append(x.tolist())
test_y.extend(y)
test_x.append((normalized_test_data[(i + 1) * time_step:, :7]).tolist())
test_y.extend((normalized_test_data[(i + 1) * time_step:, 7]).tolist())
return mean, std, test_x, test_y
# ——————————————————定义神经网络变量——————————————————
def lstm(X):
batch_size = tf.shape(X)[0]
time_step = tf.shape(X)[1]
w_in = weights['in']
b_in = biases['in']
input = tf.reshape(X, [-1, input_size]) # 需要将tensor转成2维进行计算计算后的结果作为隐藏层的输入
input_rnn = tf.matmul(input, w_in) + b_in
input_rnn = tf.reshape(input_rnn, [-1, time_step, rnn_unit]) # 将tensor转成3维作为lstm cell的输入
cell = tf.nn.rnn_cell.BasicLSTMCell(rnn_unit)
init_state = cell.zero_state(batch_size, dtype=tf.float32)
output_rnn, final_states = tf.nn.dynamic_rnn(cell, input_rnn, initial_state=init_state,
dtype=tf.float32) # output_rnn是记录lstm每个输出节点的结果final_states是最后一个cell的结果
output = tf.reshape(output_rnn, [-1, rnn_unit]) # 作为输出层的输入
w_out = weights['out']
b_out = biases['out']
pred = tf.matmul(output, w_out) + b_out
return pred, final_states
# ——————————————————训练模型——————————————————
def train_lstm(batch_size=80, time_step=15, train_begin=0, train_end=5800):
X = tf.placeholder(tf.float32, shape=[None, time_step, input_size])
Y = tf.placeholder(tf.float32, shape=[None, time_step, output_size])
batch_index, train_x, train_y = get_train_data(batch_size, time_step, train_begin, train_end)
pred, _ = lstm(X)
# 损失函数
loss = tf.reduce_mean(tf.square(tf.reshape(pred, [-1]) - tf.reshape(Y, [-1])))
train_op = tf.train.AdamOptimizer(lr).minimize(loss)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=15)
module_file = tf.train.latest_checkpoint()
with tf.Session() as sess:
# sess.run(tf.global_variables_initializer())
saver.restore(sess, module_file)
# 重复训练2000次
for i in range(2000):
for step in range(len(batch_index) - 1):
_, loss_ = sess.run([train_op, loss], feed_dict={X: train_x[batch_index[step]:batch_index[step + 1]],
Y: train_y[batch_index[step]:batch_index[step + 1]]})
print(i, loss_)
if i % 200 == 0:
print("保存模型:", saver.save(sess, 'stock2.model', global_step=i))
# ————————————————预测模型————————————————————
def prediction(time_step=20):
X = tf.placeholder(tf.float32, shape=[None, time_step, input_size])
mean, std, test_x, test_y = get_test_data(time_step)
pred, _ = lstm(X)
saver = tf.train.Saver(tf.global_variables())
with tf.Session() as sess:
# 参数恢复
module_file = tf.train.latest_checkpoint()
saver.restore(sess, module_file)
test_predict = []
for step in range(len(test_x) - 1):
prob = sess.run(pred, feed_dict={X: [test_x[step]]})
predict = prob.reshape((-1))
test_predict.extend(predict)
test_y = np.array(test_y) * std[7] + mean[7]
test_predict = np.array(test_predict) * std[7] + mean[7]
acc = np.average(np.abs(test_predict - test_y[:len(test_predict)]) / test_y[:len(test_predict)])