Tensorflow simple code
親測可用,從網路上看到的帖子,忘記出處了(對作者表示歉意!),這個是用tf的low api實現的,十幾行代碼,比較好理解。
import tensorflow as tfimport numpy as npx_data = np.random.rand(100).astype(np.float32)y_data = x_data * 0.2 + 0.6weight = tf.Variable(tf.random_uniform([1], -1.0, 1.0))baise = tf.Variable(tf.zeros([1]))y = weight * x_data + baiseloss = tf.reduce_mean(tf.square(y - y_data))optimizer = tf.train.GradientDescentOptimizer(0.5)train = optimizer.minimize(loss)init = tf.initialize_all_variables()sess = tf.Session()sess.run(init)for step in range(400): sess.run(train) if step % 40 == 0: print(step, sess.run(weight), sess.run(baise))sess.close()
改寫tensorflow的官方start中分花的代碼,把官方文檔的多個文件改成了50行以內的代碼,親測可用,前提是下載好兩個csv文件在當前目錄下,感覺難點是如何把數據裝到pandas中,很多api不夠熟悉,而且python還不夠精通。
import tensorflow as tfimport pandas as pdCSV_COLUMN_NAMES = [SepalLength, SepalWidth, PetalLength, PetalWidth, Species]SPECIES = [Setosa, Versicolor, Virginica]tf.logging.set_verbosity(tf.logging.INFO) def fuc_data_set(features, labels): if labels is None: data_base1 = tf.data.Dataset.from_tensor_slices(dict(features)) data_set1 = data_base1.batch(100) return data_set1 else: data_base2 = tf.data.Dataset.from_tensor_slices((dict(features), labels)) data_set2 = data_base2.shuffle(1000).repeat().batch(100) return data_set2train_data_DataFrame = pd.read_csv(iris_training.csv, names=CSV_COLUMN_NAMES, header=0)train_label_Series = train_data_DataFrame.pop(Species) # pop(delete) lastline as labelstest_data_DataFrame = pd.read_csv(iris_test.csv, names=CSV_COLUMN_NAMES, header=0)test_label_Series = test_data_DataFrame.pop(Species)featch_columns = [] for key in train_data_DataFrame.keys(): featch_columns.append(tf.feature_column.numeric_column(key=key))classifier = tf.estimator.DNNClassifier(hidden_units=[20, 15], feature_columns=featch_columns, n_classes=3) # train and evaluate the modelclassifier.train(input_fn=lambda: fuc_data_set(train_data_DataFrame, train_label_Series), steps=1000)eval_result = classifier.evaluate(input_fn=lambda: fuc_data_set(test_data_DataFrame, test_label_Series), steps=1000) print(
Test set accuracy: {accuracy:0.3f}
.format(**eval_result)) # predict the modelpredict_x = { SepalLength: [5.1, 5.9, 6.9, 3], SepalWidth: [3.3, 3.0, 3.1, 3], PetalLength: [1.7, 4.2, 5.4, 2], PetalWidth: [0.5, 1.5, 2.1, 1] }predict_data_set = tf.data.Dataset.from_tensor_slices(dict(predict_x))predict_data_set = predict_data_set.batch(100)predictions = classifier.predict(input_fn=lambda: fuc_data_set(predict_x, labels=None)) for pred_dict in predictions: template = Prediction is {:.1f}% class_id = pred_dict[class_ids][0] for i in range(3): probability = pred_dict[probabilities][i] print(template.format(100 * probability)) print()
推薦閱讀:
※TensorFlow煉丹(1) Using GPUs
※Docker--深度學習環境配置一站式解決方案
※學習筆記TF012:卷積網路簡述
※tensorflow 為何方聖物??
TAG:TensorFlow |