基於行為模仿的自動駕駛

本篇內容由科賽用戶fiona貢獻:基於《Udacity Self-Driving Car Engineer Nanodegree projects》改編自動駕駛項目,於 K-Lab 上用新版本的 keras 進行的實現。

本文的圖片的來源是通過模擬駕駛遊戲的截圖獲得。

登錄科賽 Kesci,啟動K - Lab在線數據分析工具,你可一鍵fork本項目,展開項目分析。

# 列印 python 版本

import sysprint(sys.version)

# 檢查 GPU 驅動

!ls /usr/local/ | grep cuda

# 安裝 opencv-python

!pip3 install -i https://pypi.douban.com/simple opencv-python# 安裝新版本 keras!pip3 install -U -i https://pypi.douban.com/simple keras

# 解壓文件

!tar zxf /home/kesci/input/car_img3807/car_img.gz -C .#查看文件!ls ./IMG_CAR# touch 日誌文件!touch ./history.csv

# 查看數據格式

import pandas as pdimport numpy as npimport matplotlib.pyplot as pltimport cv2% matplotlib inlinedf = pd.read_csv(./IMG_CAR/driving_log.csv)df.columns = [ct_path,lt_path,rt_path,steer,throttle,brake,speed]df.head()# 分別是中間鏡頭,左鏡頭,右鏡頭,以及角度,油門,剎車,和速度。

# 先看一下圖像

i = 1limit = 30plt.figure(figsize=(12, 16))for index, row in df.iterrows():if i > limit:breakct_path, lt_path, rt_path, steering = row[ct_path], row[lt_path], row[rt_path], row[steer]if ct_path == "IMG_CAR/IMG/center_2018_02_01_09_48_06_846.jpg" or ct_path == "IMG_CAR/IMG/center_2018_02_01_09_48_51_188.jpg" or ct_path == "IMG_CAR/IMG/center_2018_02_01_09_48_58_162.jpg" or ct_path == "IMG_CAR/IMG/center_2018_02_01_09_48_47_181.jpg":plt.subplot(4, 3, i)i = i + 1plt.gca().set_axis_off()left_frame = cv2.imread(lt_path)plt.imshow(cv2.cvtColor(left_frame, code=cv2.COLOR_BGR2RGB))plt.subplot(4, 3, i)i = i + 1plt.title(Steering: {:03f}.format(steering))plt.gca().set_axis_off()central_frame = cv2.imread(ct_path)plt.imshow(cv2.cvtColor(central_frame, code=cv2.COLOR_BGR2RGB))plt.subplot(4, 3, i)i = i + 1plt.gca().set_axis_off()right_frame = cv2.imread(rt_path)plt.imshow(cv2.cvtColor(right_frame, code=cv2.COLOR_BGR2RGB))# 可以看到最後一張其實是直線,但是有一點誤操作。

# 再來看一下數據分布

count = df.groupby([steer])[ct_path].count()count.plot(kind=bar, color=green, figsize=(10, 8))# 可以看到大部分的分布都是走直線,可能很容易想到,這種情況下,基本上都會學習到直線駕駛。

# 定義預處理函數

def preprocess(frame_bgr, verbose=False):# 設置縮放和裁剪的大小h, w = 66, 200crop_height = range(20, 140)# 裁剪frame_cropped = frame_bgr[crop_height, :, :]# 縮放frame_resized = cv2.resize(frame_cropped, dsize=(w, h))# 灰度frame_gray = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2GRAY)if verbose:plt.figure(1), plt.gca().set_axis_off(), plt.imshow(cv2.cvtColor(frame_bgr, code=cv2.COLOR_BGR2RGB))plt.figure(2), plt.gca().set_axis_off(), plt.imshow(cv2.cvtColor(frame_cropped, code=cv2.COLOR_BGR2RGB))plt.figure(3), plt.gca().set_axis_off(), plt.imshow(cv2.cvtColor(frame_resized, code=cv2.COLOR_BGR2RGB))plt.figure(4), plt.gca().set_axis_off( ), plt.imshow(frame_gray, cmap=gray)plt.show()# 灰度以後 shape 是二維,再擴展一緯。return np.expand_dims(frame_gray, axis=2).astype(float32)img = preprocess(cv2.imread("IMG_CAR/IMG/center_2018_02_01_09_48_47_181.jpg"),verbose=True)# 列印單色道圖像尺寸print(img.shape)

from sklearn.model_selection import train_test_split

from os.path import joinimport csvimport random# 對數據做過濾,把通過 bias 把數據打勻def load_data(data, bias=0.5):# 灰度圖像x = np.zeros(shape=(len(data), 66, 200, 1), dtype=np.float32)# 對應的方向盤角度y_steer = np.zeros(shape=(len(data),), dtype=np.float32)loaded_elements = 0for (ct_path, lt_path, rt_path, steer, throttle, brake, speed) in data:steer = np.float32(steer)# 因為人本身也有操作失誤,所以加入一些小的調整,抵消人的誤操作delta_correction = 0.05camera = random.choice([frontal, left, right])if camera == frontal:frame = preprocess(cv2.imread(ct_path.strip()))steer = steerelif camera == left:frame = preprocess(cv2.imread(lt_path.strip()))steer = steer + delta_correctionelif camera == right:frame = preprocess(cv2.imread(rt_path.strip()))steer = steer - delta_correction# 按照均值為 0,標準差為 0.2 正態分布取隨機述職steer += np.random.normal(loc=0, scale=0.2)# 把直線行駛的數據消掉steer_magnitude_thresh = np.random.rand()if (abs(steer) + bias) < steer_magnitude_thresh:pass # 丟掉else:x[loaded_elements] = framey_steer[loaded_elements] = steerloaded_elements +=1return x, y_steer# 查看 bias 對數據分布的影響def visualize_bias_parameter_effect(train_data):biases = np.linspace(start=0., stop=1., num=5)fig, axarray = plt.subplots(len(biases))plt.suptitle(bias 對方向角度分布的影響,fontsize=14,fontweight=bold)for i, ax in enumerate(axarray.ravel()):b = biases[i]x_batch, y_batch = load_data(train_data, bias=b)ax.hist(y_batch, 50, normed=1, facecolor=green, alpha=0.75)ax.set_title(Bias: {:02f}.format(b))ax.axis([-1., 1., 0., 2.])plt.tight_layout(pad=2, w_pad=0.5, h_pad=1.0)plt.show()# 分配訓練集和測試集def split_train_val(csv_driving_data, test_size=0.2):with open(csv_driving_data, r) as f:reader = csv.reader(f)driving_data = [row for row in reader][1:]train_data, val_data = train_test_split(driving_data, test_size=test_size, random_state=1)return train_data, val_datatrain_data, val_data = split_train_val("./IMG_CAR/driving_log.csv")visualize_bias_parameter_effect(train_data)# 可以看到 1 是個比較合理的數值,能夠使得方向分布比較均勻

網路的設計是基於 nvida 的一篇論文的結構做的。

from keras.models import Sequential

from keras.layers import Input, Conv2D, Flatten, Dense, Dropout, Lambda# 根據論文的結構設計網路,這裡每層 CNN 的 kernel_size 設的不一樣,用了比較小的尺寸。def get_model(summary=True):model = Sequential()model.add(Lambda(lambda x: x / 127.5 - 1., input_shape=(66,200,1)))model.add(Conv2D(24, (5, 5), padding=valid, activation=elu))model.add(Dropout(0.2))model.add(Conv2D(36, (5, 5), padding=valid, strides=(2, 2), activation=elu))model.add(Dropout(0.2))model.add(Conv2D(48, (5, 5), padding=valid, strides=(2, 2), activation=elu))model.add(Dropout(0.2))model.add(Conv2D(64, (3, 3), padding=valid, activation=elu))model.add(Dropout(0.2))model.add(Conv2D(64, (3, 3), padding=valid, activation=elu))model.add(Dropout(0.2))model.add(Flatten())model.add(Dense(100, activation=elu))model.add(Dropout(0.5))model.add(Dense(50, activation=elu))model.add(Dropout(0.5))model.add(Dense(10, activation=elu))model.add(Dense(1))#model = Model(input=input_frame, output=out)if summary:model.summary()return model# 查看網路結構,keras 確實對於構造神經網路的原型確實很有幫助。net = get_model()

from keras.callbacks import ModelCheckpoint, CSVLogger

net.compile(optimizer=adam, loss=mse)# 設置檢查點和記錄checkpointer = ModelCheckpoint(checkpoints_weights.{epoch:02d}-{val_loss:.3f}.hdf5)logger = CSVLogger(filename="history.csv")train_data, val_data = split_train_val(csv_driving_data="./IMG_CAR/driving_log.csv")x, y = load_data(train_data)# 訓練數據net.fit(x, y, validation_data=load_data(val_data), epochs=10,batch_size=32, callbacks=[checkpointer, logger])

# 看一下訓練結果

img_bgr = cv2.imread("IMG_CAR/IMG/center_2018_02_01_09_47_57_570.jpg")img = np.expand_dims(preprocess(img_bgr), axis=0)steering_angle = net.predict(img, batch_size=1)plt.title("Steering {0:.3f}".format(steering_angle[0][0]))plt.gca().set_axis_off(), plt.imshow(cv2.cvtColor(img_bgr, code=cv2.COLOR_BGR2RGB))plt.show()# 判斷的還行,知道往左拐

TO DO

接入遊戲的 API,在模擬駕駛中進行模擬,如下視頻。

%%HTML<iframe width="560" height="315" src="https://www.youtube.com/embed/gXkMELjZmCc?rel=0&amp;showinfo=0&amp;start=120" frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe>

相關鏈接:Udacity Self-Driving Car Engineer Nanodegree projects


推薦閱讀:

AirSim 自動駕駛教程--訓練數據
自動駕駛汽車的下個階段:你可以在車上打個盹
Mobileye 的成功之道是什麼? Shashua 給出了這三點思考

TAG:自動駕駛 | 人工智慧 |