NLP(2) Tensorflow 文本- 價格建模 Part2

NLP:Tensorflow 文本- 價格建模 Part2

緊接著NLP第一期,沒看第一期的朋友先去看下第一期哦,用tensorflow做一個沒加入文本類特徵的basemodel,先上網路圖:

模型簡單介紹:

  • inputs (品牌id、品類id、shipping/condition獨熱編碼、價格)

  • 兩個embedding layer分別接入品牌id特徵和品類id特徵

  • 兩層dence layer,結構類似

  • 輸出

  • 損失函數 mse

  • 優化方法Adam

import numpy as npnimport pandas as pdnfrom sklearn.preprocessing import LabelEncoder, Binarizer, OneHotEncodernfrom sklearn.model_selection import train_test_splitnfrom tensorflow.contrib.tensorboard.plugins import projectornimport tensorflow as tfnimport matplotlib.pyplot as pltnimport seaborn as snsn%matplotlib inlinen

def onehot_condition(dataset):n onehot = pd.get_dummies(merge.condition, drop_first=True, prefix=condition)n dataset = pd.concat([dataset, onehot], axis=1).drop(condition, axis=1)n return datasetnndef handle_missing(dataset):n dataset.category_name.fillna(value="missing", inplace=True)n dataset.brand_name.fillna(value="missing", inplace=True)n dataset.item_description.fillna(value="missing", inplace=True)n dataset.loc[dataset.item_description == No description yet, item_description] = missingn return datasetnndef upper2lower(dataset):n for (col, dtype) in dataset.dtypes.iteritems():n if dtype == object:n dataset[col] = dataset[col].str.lower()n return datasetnndef label_encoding(dataset):n le_category = LabelEncoder()n dataset[category] = le_category.fit_transform(dataset.category_name)n le_brand = LabelEncoder()n dataset[brand] = le_brand.fit_transform(dataset.brand_name)n dataset[condition] = dataset[item_condition_id]n del dataset[brand_name], dataset[item_condition_id]n return dataset, le_category, le_brandnnmerge = handle_missing(merge)nmerge = upper2lower(merge)nmerge, le_category, le_brand = label_encoding(merge)nmerge = onehot_condition(merge)nmerge.head(3)n

def batch_generator(dataset, batch_size):n data = dataset[[brand,category,price,shipping,condition_2,condition_3,condition_4,condition_5]].valuesn np.random.shuffle(data)n datalen = data.shape[0]n idx = 0n while idx * batch_size < datalen:n brand = data[idx*batch_size:(idx+1)*batch_size, 0].reshape(-1, 1)n category = data[idx*batch_size:(idx+1)*batch_size, 1].reshape(-1, 1)n price = np.log1p(data[idx*batch_size:(idx+1)*batch_size, 2]).reshape(-1, 1)n num = data[idx*batch_size:(idx+1)*batch_size, 3:]n idx += 1n yield brand, category, num, pricen

class Model1(object):n """無文本建模模型"""n def __init__(self, param):n self.graph = tf.Graph()n with self.graph.as_default():n n self.keep_prob = tf.placeholder(tf.float32, name=keep_prob)n n with tf.name_scope(inputs):n self.add_input()n n with tf.name_scope(brand_embedding):n self.brand_embedding = self.add_brand_embedding(param.brand_num, param.brand_embed_dim)n n with tf.name_scope(category_embedding):n self.category_embedding = self.add_category_embedding(param.category_num, param.category_embed_dim)n n self.main = tf.concat([self.input_num, n tf.reshape(self.brand_embedding, [-1, param.brand_embed_dim]), n tf.reshape(self.category_embedding, [-1, param.category_embed_dim])], axis=1)n n with tf.name_scope(dense1):n self.dense1 = self.add_dence_layer(inputs=self.main, n input_size=param.category_embed_dim + param.brand_embed_dim + 5, n output_size=64, n activation=tf.nn.relu, n keep_prob=self.keep_prob)n n with tf.name_scope(dense2):n self.dense2 = self.add_dence_layer(inputs=self.dense1, n input_size=64, n output_size=32, n activation=tf.nn.relu, n keep_prob=self.keep_prob)n n n with tf.name_scope(output_layer):n self.output = self.add_dence_layer(inputs=self.dense2,n input_size=32,n output_size=1,n activation=None, n keep_prob=false)n with tf.name_scope(loss):n self.loss = tf.losses.mean_squared_error(self.target, self.output)nn with tf.name_scope(optimizer):n self.train = tf.train.AdamOptimizer(param.lr).minimize(self.loss)nn self.init = tf.global_variables_initializer()nn n def add_input(self):n self.input_brand = tf.placeholder(tf.int32, [None, 1], name=brand)n self.input_category = tf.placeholder(tf.int32, [None, 1], name=category)n self.input_num = tf.placeholder(tf.float32, [None, 5], name=num)n self.target = tf.placeholder(tf.float32, [None,1], name=price)n n n def add_brand_embedding(self, input_dim, output_dim):n self.brand_embed_matrix = tf.Variable(tf.random_uniform([input_dim, output_dim], -1., 1.0), name=brand_embed_matrix)n embeding = tf.nn.embedding_lookup(self.brand_embed_matrix, self.input_category, name=brand_embed_lookup)n return embedingn n def add_category_embedding(self, input_dim, output_dim):n self.category_embed_matrix = tf.Variable(tf.random_uniform([input_dim, output_dim], -1., 1.0), name=category_embed_matrix)n embeding = tf.nn.embedding_lookup(self.category_embed_matrix, self.input_category, name=category_embed_lookup)n return embedingn n def add_dence_layer(self, inputs, input_size, output_size, activation=None, keep_prob=false):n W = tf.Variable(tf.random_normal(dtype=tf.float32, shape=[input_size, output_size], mean=0, stddev=0.1), name=W)n b = tf.Variable(tf.zeros(dtype=tf.float32, shape=[1, output_size]) + 0.1, name=b)n output = tf.matmul(inputs, W) + bn if activation:n output = activation(output, name=output)n if keep_prob != false:n output = tf.nn.dropout(output, keep_prob)n return outputn

Train Model

model1 = Model1(param)ntf.summary.histogram(name=brand_embedding,values=model1.brand_embed_matrix)ntf.summary.histogram(name=category_embedding,values=model1.category_embed_matrix)ntf.summary.scalar(loss,model1.loss)nsummary = tf.summary.merge_all()nwriter = tf.summary.FileWriter(param.logdir, model1.graph)n# val_writer = tf.summary.FileWriter(param.logdir+/val, model1.graph)nsess = tf.Session(graph=model1.graph)nsess.run(model1.init)nnfor i in range(param.epochs):n k = 0n for (brand, category, num, price) in batch_generator(dtrain, param.batchsize):n _, loss = sess.run([model1.train, model1.loss], {model1.input_brand:brand, n model1.input_category:category,n model1.input_num:num, n model1.target:price,n model1.keep_prob:param.keep_prob})n if k%20 == 0:n val_loss = sess.run(model1.loss, {model1.input_brand:val_brand, n model1.input_category:val_category,n model1.input_num:val_num, n model1.target:val_price,n model1.keep_prob:1.})n train_loss = sess.run(model1.loss, {model1.input_brand:brand, n model1.input_category:category,n model1.input_num:num, n model1.target:price,n model1.keep_prob:1.})n print(epoch:%d step%d : train_loss:%.4f ,val_loss:%.4f, embeddsum:%.2f%(i, k, train_loss, val_loss, sess.run(model1.brand_embed_matrix).sum()))n k+=1n

後續優化會儘快更新,tensorflow用的太少,我感覺這個model有問題。。還不如同樣輸入的xgboost的表現= =。。

推薦閱讀:

【博客存檔】風格畫之最後一彈MRF-CNN
深度學習對話系統實戰篇--老版本tf.contrib.legacy_seq2seq API介紹和源碼解析
拔了智齒,疼滴想屎。
【博客存檔】深度學習之Neural Image Caption
[乾貨|實踐] TensorBoard可視化

TAG:TensorFlow | 深度学习DeepLearning | 机器学习 |