機器學習:一步一步學習強化學習 例子2
這個例子2是例子1的16個節點的版本。 相似的代碼,具有一些改進。
===============================================================
java代碼:
import java.util.Random;nnpublic class QLearning2n{n private static final int Q_SIZE = 16;n private static final double GAMMA = 0.8;n private static final int ITERATIONS = 10;n private static final int NUM_INITIALS = 6;n private static final int GOAL_STATE = 15;n private static final int INITIAL_STATES[] = new int[] {1, 3, 5, 2, 4, 0};nn private static final int R[][] = new int[][] n {{-1, 0, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, n {0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, n {-1, -1, -1, 0, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1}, n {-1, -1, 0, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1}, n {0, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1}, n {-1, -1, -1, -1, -1, -1, 0, -1, -1, 0, -1, -1, -1, -1, -1, -1}, n {-1, -1, 0, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, n {-1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1}, n {-1, -1, -1, -1, 0, -1, -1, -1, -1, 0, -1, -1, 0, -1, -1, -1}, n {-1, -1, -1, -1, -1, 0, -1, -1, 0, -1, 0, -1, -1, 0, -1, -1}, n {-1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, 0, -1}, n {-1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, 100}, n {-1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1}, n {-1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, 0, -1}, n {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, 0, -1, -1}, n {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, 100}};nn private static int q[][] = new int[Q_SIZE][Q_SIZE];n private static int currentState = 0;n n private static void train()n {n initialize();nn // Perform training, starting at all initial states.n for(int j = 0; j < ITERATIONS; j++)n {n for(int i = 0; i < NUM_INITIALS; i++)n {n episode(INITIAL_STATES[i]);n }n }nn // Print out Q Matrixn// System.out.println("Q Matrix values:");n// for(int i = 0; i < Q_SIZE; i++)n// {n// for(int j = 0; j < Q_SIZE; j++)n// {n// System.out.print(q[i][j] + ",t");n// }n// System.out.print("n");n// }n// System.out.print("n");n return;n }n n private static void test()n {n int newState = 0;nn // Perform tests, starting at all initial states.n System.out.println("Shortest routes from initial states:");n for(int i = 0; i < NUM_INITIALS; i++)n {n currentState = INITIAL_STATES[i];n newState = 0;n don {n newState = maximum(currentState, true);n System.out.print(currentState + ", ");n currentState = newState;n }while(currentState < GOAL_STATE); //Loop Until currentState = GOAL_STATEn System.out.print(GOAL_STATE + "n");n }n return;n }n n private static void episode(final int initialState)n {n currentState = initialState;nn // Travel from state to state until goal state is reached.n don {n chooseAnAction();n }while(currentState == GOAL_STATE); // Loop Until currentState = GOAL_STATEnn // When currentState = GOAL_STATE, Run through the set once more ton // for convergence.n for(int i = 0; i < Q_SIZE; i++)n {n chooseAnAction();n }n return;n }n n private static void chooseAnAction()n {n int possibleAction = 0;nn // Randomly choose a possible action connected to the current state.n possibleAction = getRandomAction(Q_SIZE);nn if(R[currentState][possibleAction] >= 0){n q[currentState][possibleAction] = reward(possibleAction);n currentState = possibleAction;n }n return;n }n n private static int getRandomAction(final int upperBound)n {n int action = 0;n boolean choiceIsValid = false;n n // Randomly choose a possible action connected to the current state.n while(choiceIsValid == false)n {n // Get a random value between 0(inclusive) and UpperBound(exclusive).n action = new Random().nextInt(upperBound);n if(R[currentState][action] > -1){n choiceIsValid = true;n }n }n return action;n }n n private static void initialize()n {n for(int i = 0; i < Q_SIZE; i++)n {n for(int j = 0; j < Q_SIZE; j++)n {n q[i][j] = 0;n }n }n return;n }n n private static int maximum(final int state, final boolean returnIndexOnly)n {n // if(ReturnIndexOnly = true, the Q matrix index is returned.n // if(ReturnIndexOnly = false, the Q matrix value is returned.n int winner = 0;n boolean foundNewWinner = false;n boolean done = false;nn while(!done)n {n foundNewWinner = false;n for(int i = 0; i < Q_SIZE; i++)n {n if(i != winner){ // Avoid self-comparison.n if(q[state][i] > q[state][winner]){n winner = i;n foundNewWinner = true;n }n }n }nn if(foundNewWinner == false){n done = true;n }n }nn if(returnIndexOnly == true){n return winner;n }else{n return q[state][winner];n }n }n n private static int reward(final int action)n {n return (int)(R[currentState][action] + (GAMMA * maximum(action, false)));n }nn public static void main(String[] args)n {n train();n test();n return;n }nn}n
參考內容:
1.Q-Learning Example 2
推薦閱讀:
※模式識別機器學習的發展方向?
※為什麼深度神經網路要使用權值來連接神經元?
※貸還是不貸:如何用Python和機器學習幫你決策?
※面向數據科學家的兩門課:Data8 和 DS100
※個性化-前言
TAG:机器学习 | 强化学习ReinforcementLearning |