標籤:

AI-challenger-Stock Training代碼

Training

模型1:訓練代碼流程 冗長解讀

返回代碼解讀系列

"""n如何用keras訓練ai-challenger-stock的數據n**Author**: `https://github.com/EmbraceLife/shendusuipian`nn"""nnimport kerasnfrom keras.layers import Dense, LSTM, Activation, BatchNormalization, Dropout, SimpleRNN, Inputn# from renormalization import BatchRenormalizationnfrom keras.layers.advanced_activations import LeakyReLUnfrom keras.models import Sequentialnfrom keras.optimizers import SGD, RMSprop, Adamnfrom keras.models import load_modelnfrom keras.callbacks import TensorBoard, ModelCheckpointnimport bcolznimport numpy as npnn############## load 不同處理方式生成的 2-d 數據 ##########################################n######################################################################################n# train_dev_test_no_norm_no_group_path = "/Users/Natsume/Documents/AI-challenger-stocks/prepared_dataset/no_norm_no_group"n# train_feature_array, dev_feature_array, test_feature_array = bcolz.open(train_dev_test_no_norm_no_group_path)nn# train_dev_test_stad_no_group_path = "/Users/Natsume/Documents/AI-challenger-stocks/prepared_dataset/features_stad_no_group"n# train_feature_stad_array, dev_feature_stad_array, test_feature_stad_array = bcolz.open(train_dev_test_stad_no_group_path)n#n# train_dev_test_mimax_no_group_path = "/Users/Natsume/Documents/AI-challenger-stocks/prepared_dataset/features_mimax_no_group"n# train_feature_mimax_array, dev_feature_mimax_array, test_feature_mimax_array = bcolz.open(train_dev_test_mimax_no_group_path)n#n# train_dev_test_group_no_norm_path = "/Users/Natsume/Documents/AI-challenger-stocks/prepared_dataset/group_no_norm/"n# train_feature_group_array, dev_feature_group_array, test_feature_group_array = bcolz.open(train_dev_test_group_no_norm_path)n#n# train_dev_test_features_group_norm_path = "/Users/Natsume/Documents/AI-challenger-stocks/prepared_dataset/features_group_norm/"n# train_feature_group_norm_array, dev_feature_group_norm_array, test_feature_group_norm_array = bcolz.open(train_dev_test_features_group_norm_path)n#ntrain_dev_test_sequence_path = "/Users/Natsume/Documents/AI-challenger-stocks/prepared_dataset/train_dev_test_sequence.npz"nnnpzfiles = np.load(train_dev_test_sequence_path)ntrain_feature_stad_group_norm_array = npzfiles[train_sequence]ndev_feature_stad_group_norm_array = npzfiles[dev_sequence]ntest_feature_stad_group_norm_array = npzfiles[test_sequence]nntrain_dev_target_weight_path = "/Users/Natsume/Documents/AI-challenger-stocks/prepared_dataset/target_weight_norm_dir/"ntrain_target_array, dev_target_array, train_weight_array, dev_weight_array = bcolz.open(train_dev_target_weight_path)nnprint("dataset loaded")n############################################################################################################nnn############## 將 2-d 數據 轉化為 3-d 數據, 周期為1 ##########################################n###########################################################################################n# train_feature_group_norm_array = train_feature_group_norm_array.reshape((-1, 1, 89))n# dev_feature_group_norm_array = dev_feature_group_norm_array.reshape((-1, 1, 89))n# train_feature_stad_group_norm_array = train_feature_stad_group_norm_array.reshape((-1, 1, 89))n# dev_feature_stad_group_norm_array = dev_feature_stad_group_norm_array.reshape((-1, 1, 89))n###########################################################################################nnn############## load 3-d 數據, 周期為5 ######################################################n###########################################################################################n# train_dev_test_sequence_path = "/Users/Natsume/Documents/AI-challenger-stocks/prepared_dataset/features_stad_group_stad/"n# train_feature_stad_group_norm_array, dev_feature_stad_group_norm_array, test_feature_stad_group_norm_array = bcolz.open(train_dev_test_sequence_path)n#n# train_dev_target_weight_sequence_path = "/Users/Natsume/Documents/AI-challenger-stocks/prepared_dataset/target_weight_norm/"n# train_target_array, dev_target_array, train_weight_array, dev_weight_array = bcolz.open(train_dev_target_weight_sequence_path)n##########################################################################################nnn############## 調超參數 ###########n##################################n# no standardization to training set (no need, 0, 1)nn_neurons = 10n# n_neurons0=10nn_neurons1 = 8nn_neurons2 = 6nn_neurons3 = 4nlr = 0.00001 # 0.001 # 0.01 # 0.1nrate_dropout = 0. # 0.5 # 0.3 # 0.4 # 0.7 # 0.5, 0.3n# 選用不同處理的 樣本權重n# train_weight_flat = Nonen# dev_weight_flat = Nonentrain_weight_flat = train_weight_array.flatten() # 使用weight,mimaxndev_weight_flat = dev_weight_array.flatten() # stad 有負數,計算損失函數時會出問題nn##################################nnnnn############## 構建模型 ######################################################n#############################################################################nmodel = Sequential()nn# 在輸入層後增加拋棄層 dropoutnmodel.add(Dropout(rate=rate_dropout, input_shape=(88,)))n# model.add(BatchNormalization())n# 增加一個簡單的RNN層n# model.add(SimpleRNN( # simple RNNn# # for batch_input_shape, if using tensorflow as the backend, we have to put None for the batch_size.n# # Otherwise, model.evaluate() will get error.n# batch_input_shape=(None, 1, 89), # Or: input_dim=INPUT_SIZE, input_length=TIME_STEPS,n# output_dim=10,n# unroll=True,n# ))nn# 在輸入層後增加簡單的 dense 層n# 需要對 輸入層 做說明 input_shape = (88,), 如果訓練數據維度是(樣本數,88特徵值), 88 是因為沒有加上『group』n# model.add(Dense(n_neurons))#, input_shape=(88,))) # 88, 89nmodel.add(Dense(n_neurons))nmodel.add(BatchNormalization())n# 為該 dense層 選用不同的 激勵函數nmodel.add(Activation(relu))n# model.add(LeakyReLU(alpha=0.3))n# model.add(Activation(tanh))nnn# 對輸入層做 拋棄層 處理,針對訓練數據 維度 (樣本,5, 89), 時間周期 5, 特徵數 89n# model.add(Dropout(rate=rate_dropout, input_shape=(5, 89)))nn# 在輸入層後面,增加一個LSTM 層n# model.add(LSTM(2, # 本層神經元個數n# input_shape=(1, 89), # 處理輸入層的輸出個數n# return_sequences=False, # 每個time_step(共30個time_step周期)都要產生一個輸出值n# activation=tanh, # 針對LSTM; 這些默認值都是認為普適性最強的值n# recurrent_activation=hard_sigmoid, # 針對recurrentn# kernel_initializer=glorot_uniform,n# recurrent_initializer=orthogonal,n# bias_initializer=zeros,n# dropout=rate_dropout, # 針對LSTM層,扔掉輸入層神經元個數n# recurrent_dropout=rate_dropout # 針對recurrent 扔掉循環層神經元個數n# tttt)) # 上述都只有字面理解,真正裡面發生了什麼,至少需要上完吳恩達RNN課程(類似功課)n# 對每一個訓練值模塊進行標準化n# model.add(Dense(n_neurons0))n# model.add(Activation(relu))nnmodel.add(Dense(n_neurons1))nmodel.add(BatchNormalization())nmodel.add(Activation(relu))nn# 增加第二隱藏層,以及激勵函數nmodel.add(Dense(n_neurons2))nmodel.add(BatchNormalization())nmodel.add(Activation(relu))n# model.add(LeakyReLU(alpha=0.3))n# model.add(Activation(tanh))nnn# 增加第三隱藏層nmodel.add(Dense(n_neurons3))nmodel.add(BatchNormalization())nmodel.add(Activation(relu))n# model.add(LeakyReLU(alpha=0.3))n# model.add(Activation(tanh))nnnn# 增加輸出層,和激勵函數nmodel.add(Dense(1)) # 多分類問題,神經元 3 或更多nmodel.add(Activation(sigmoid)) # 『softmax』n##################################################################nnn###################### 設置優化演算法,損失函數,metric######################n#######################################################################n# learning_rate, lr, 首選 0.001 默認值n# try learning rate decay: 0.1, default 0.0nopt = Adam(lr=lr)n# metrics 這裡選用 accuracy 或者 binary_accuracynmodel.compile(optimizer=opt,n loss=binary_crossentropy, # categorical_crossentropyn metrics=[binary_accuracy]) # categorical_accuracynn# 列印模型結構nmodel.summary()n#######################################################################nnn####################### 模型訓練 以及重要數據保存 ##############################n############################################################################nnnn# 儲存可視化文件, 最優模型參數文件 的地址nlog_dir = "/Users/Natsume/Documents/AI-challenger-stocks/model_output/logs"n# model_file = "/Users/Natsume/Documents/AI-challenger-stocks/model_output/best.h5"nmodel_file="/Users/Natsume/Documents/AI-challenger-stocks/model_output/best.{epoch:02d}-{val_loss:.4f}.hdf5" # 記錄依次產生的最優損失值的模型,以及產生於在哪一次訓練nnn# 選用不同處理方式的 特徵值, 目標值n# train_set_features = train_feature_arrayn# train_set_features = train_feature_group_norm_array # features 不標準化,group 標準化ntrain_set_features = train_feature_stad_group_norm_array # features 標準化,group 標準化ntrain_set_target = train_target_arrayn# dev_set_features = dev_feature_arrayn# dev_set_features = dev_feature_group_norm_array # features 不標準化,group 標準化ndev_set_features = dev_feature_stad_group_norm_array # # features 標準化,group 標準化ndev_set_target = dev_target_arraynn# 開始訓練,n# history = model.fit(...) 訓練完畢後,將loss 和 accuracy 保存下來nhistory = model.fit(train_set_features, # x: 訓練特徵值n train_set_target, # y: 訓練目標值n batch_size=1024, # 一次性使用多少個樣本一起計算n epochs=300, # 訓練次數n verbose=1, # 是否列印每次訓練的損失值和準確度nn # validation_split=0.2, # 從訓練數據集中取多少作為驗證數據 0.2,就是取剩下的20%作為驗證n validation_data=(dev_set_features, dev_set_target, dev_weight_flat), # 或者另外使用獨立的驗證數據,None 是沒有另外數據; 如果dev set 也有權重,添加在這裡n shuffle=True, # 是否每次訓練前打亂樣本順序n class_weight=None, # 目標值的權重設置n sample_weight=train_weight_flat, # 樣本的權重設置,必須是1-d的維度n initial_epoch=0, # 從第幾個訓練開始訓練nttt# 是否使用其他預處理功能n callbacks=[TensorBoard(histogram_freq=1,n log_dir=log_dir), # 畫圖保存n ModelCheckpoint(filepath=model_file, save_best_only=True, mode=min)]) # 訓練時保存最優秀的模型,並非最後一輪訓練的模型版本nn# 將最後一次訓練的模型保存下來nmodel_last_file = "/Users/Natsume/Documents/AI-challenger-stocks/model_output/last.h5"nmodel.save(model_last_file)n###############################################################################################nnn#################### 保存不同模型和訓練出來的 loss, accuracy ######################################n###############################################################################################nhistory.history.keys()nloss = history.history[loss]nval_loss = history.history[val_loss]nacc = history.history[binary_accuracy]nval_acc = history.history[val_binary_accuracy]nnlosses_file = "/Users/Natsume/Documents/AI-challenger-stocks/model_output/losses.npy"nimport os.pathnnnlosses_accss = {}nif os.path.isfile(losses_file):n losses_accss = np.load(losses_file).tolist() # 變成dictnnnloss_name = "my_loss" # loss_weight_orig, val_loss_orig, loss_weight_stad, val_loss_weight_stadnval_loss_name = "val_loss"nacc_name = my_accnval_acc_name = val_accnnlosses_accss[loss_name] = lossnlosses_accss[val_loss_name] = val_lossnlosses_accss[acc_name] = accnlosses_accss[val_acc_name] = val_accnnp.save(losses_file, losses_accss)nnprint("-----------------training model_no_weight-----------------")n## display logn# 模型1: 1 hidden 2 neurons, lr = 0.001, 訓練驗證集無標準化,無group, weight 無標準化n# tensorboard --logdir "/Users/Natsume/Documents/AI-challenger-stocks/model_output/logs"n

推薦閱讀:

機器學習、深度學習入坑之路?
Esquirrel 的 Live -- 小白跨界入門深度學習的那些事
【消息】Keras 2參上!這次也請大家多多關照哦
簡易的深度學習框架Keras代碼解析與應用

TAG:Keras |