{"id":644,"date":"2024-11-01T03:46:28","date_gmt":"2024-11-01T03:46:28","guid":{"rendered":"https:\/\/119.91.218.15\/?p=644"},"modified":"2024-11-01T03:49:07","modified_gmt":"2024-11-01T03:49:07","slug":"%e5%bc%ba%e5%8c%96%e5%ad%a6%e4%b9%a0%e5%85%a5%e9%97%a8-q%e5%ad%a6%e4%b9%a0","status":"publish","type":"post","link":"https:\/\/blog.liguanxin.cn\/index.php\/2024\/11\/01\/%e5%bc%ba%e5%8c%96%e5%ad%a6%e4%b9%a0%e5%85%a5%e9%97%a8-q%e5%ad%a6%e4%b9%a0\/","title":{"rendered":"\u5f3a\u5316\u5b66\u4e60\u5165\u95e8-Q\u5b66\u4e60"},"content":{"rendered":"<p>\u5f3a\u5316\u5b66\u4e60\uff08Reinforcement Learning, RL\uff09\u662f\u4e00\u79cd\u673a\u5668\u5b66\u4e60\u65b9\u6cd5\uff0c\u901a\u8fc7\u4e0e\u73af\u5883\u7684\u4ea4\u4e92\u6765\u5b66\u4e60\u5982\u4f55\u91c7\u53d6\u884c\u52a8\uff0c\u4ee5\u6700\u5927\u5316\u7d2f\u79ef\u7684\u5956\u52b1\u3002\u4ee5\u4e0b\u662f\u5bf9\u5f3a\u5316\u5b66\u4e60\u539f\u7406\u3001\u4f5c\u7528\u4ee5\u53ca\u5982\u4f55\u6784\u5efa\u4e00\u4e2a\u5f3a\u5316\u5b66\u4e60\u6a21\u578b\u7684\u8be6\u7ec6\u89e3\u91ca\uff1a<\/p>\n<h1>\u539f\u7406<\/h1>\n<p><strong>\u4ee3\u7406\uff08Agent\uff09<\/strong>\uff1a\u505a\u51fa\u51b3\u7b56\u7684\u4e3b\u4f53\u3002<br \/>\n<strong>\u73af\u5883\uff08Environment\uff09<\/strong>\uff1a\u4ee3\u7406\u4e0e\u4e4b\u4ea4\u4e92\u7684\u5916\u90e8\u7cfb\u7edf\u3002<br \/>\n<strong>\u72b6\u6001\uff08State, s\uff09<\/strong>\uff1a\u73af\u5883\u5728\u67d0\u4e00\u65f6\u523b\u7684\u5177\u4f53\u60c5\u51b5\u3002<br \/>\n<strong>\u52a8\u4f5c\uff08Action, a\uff09<\/strong>\uff1a\u4ee3\u7406\u5728\u67d0\u4e00\u72b6\u6001\u4e0b\u53ef\u4ee5\u91c7\u53d6\u7684\u884c\u4e3a\u3002<br \/>\n<strong>\u5956\u52b1\uff08Reward, r\uff09<\/strong>\uff1a\u4ee3\u7406\u91c7\u53d6\u67d0\u4e00\u52a8\u4f5c\u540e\u73af\u5883\u53cd\u9988\u7684\u4fe1\u53f7\uff0c\u7528\u4e8e\u8861\u91cf\u52a8\u4f5c\u7684\u597d\u574f\u3002<br \/>\n<strong>\u7b56\u7565\uff08Policy, \u03c0\uff09<\/strong>\uff1a\u4ee3\u7406\u9009\u62e9\u52a8\u4f5c\u7684\u89c4\u5219\u6216\u51fd\u6570\uff0c\u53ef\u4ee5\u662f\u786e\u5b9a\u6027\u7684\u6216\u968f\u673a\u7684\u3002<br \/>\n<strong>\u503c\u51fd\u6570\uff08Value Function, V\uff09<\/strong>\uff1a\u8bc4\u4f30\u67d0\u4e00\u72b6\u6001\u7684\u597d\u574f\uff0c\u8868\u793a\u5728\u8be5\u72b6\u6001\u4e0b\u80fd\u83b7\u5f97\u7684\u671f\u671b\u7d2f\u79ef\u5956\u52b1\u3002<br \/>\n<strong>Q\u51fd\u6570\uff08Q-Value, Q\uff09<\/strong>\uff1a\u8bc4\u4f30\u5728\u67d0\u4e00\u72b6\u6001\u91c7\u53d6\u67d0\u4e00\u52a8\u4f5c\u7684\u597d\u574f\uff0c\u8868\u793a\u91c7\u53d6\u8be5\u52a8\u4f5c\u540e\u80fd\u83b7\u5f97\u7684\u671f\u671b\u7d2f\u79ef\u5956\u52b1\u3002<\/p>\n<h1>Q-learning\u7b97\u6cd5<\/h1>\n<pre><code class=\"language-python\">import numpy as np\n\nclass GridWorld:\n    def __init__(self, size=5):\n        self.size = size\n        self.state = 0  # \u8d77\u70b9\u5728\u5de6\u4e0a\u89d2\n        self.end_state = size * size - 1  # \u7ec8\u70b9\u5728\u53f3\u4e0b\u89d2\n\n        # \u52a8\u4f5c\u7a7a\u95f4\uff1a\u4e0a(0)\u3001\u53f3(1)\u3001\u4e0b(2)\u3001\u5de6(3)\n        self.action_space = [0, 1, 2, 3]\n        print(f&quot;\u521b\u5efa\u4e86\u4e00\u4e2a {size}x{size} \u7684\u7f51\u683c\u4e16\u754c&quot;)\n        print(f&quot;\u8d77\u70b9\u4f4d\u7f6e: (0,0), \u7ec8\u70b9\u4f4d\u7f6e: ({size-1},{size-1})&quot;)\n\n    def get_state_coords(self, state):\n        &quot;&quot;&quot;\u5c06\u72b6\u6001\u6570\u5b57\u8f6c\u6362\u4e3a\u5750\u6807&quot;&quot;&quot;\n        return state \/\/ self.size, state % self.size\n\n    def reset(self):\n        self.state = 0\n        x, y = self.get_state_coords(self.state)\n        print(f&quot;\\n\u91cd\u7f6e\u73af\u5883\uff0c\u667a\u80fd\u4f53\u4f4d\u7f6e: ({x},{y})&quot;)\n        return self.state\n\n    def step(self, action):\n        old_x, old_y = self.get_state_coords(self.state)\n        x, y = old_x, old_y\n\n        # \u6839\u636e\u52a8\u4f5c\u66f4\u65b0\u4f4d\u7f6e\n        if action == 0:    # \u4e0a\n            x = max(0, x-1)\n        elif action == 1:  # \u53f3\n            y = min(self.size-1, y+1)\n        elif action == 2:  # \u4e0b\n            x = min(self.size-1, x+1)\n        elif action == 3:  # \u5de6\n            y = max(0, y-1)\n\n        self.state = x * self.size + y\n\n        # \u5230\u8fbe\u7ec8\u70b9\u5956\u52b1\u4e3a1\uff0c\u5176\u4ed6\u6b65\u9aa4\u5956\u52b1\u4e3a-0.1\n        reward = 1.0 if self.state == self.end_state else -0.1\n        done = self.state == self.end_state\n\n        action_names = [&#039;\u4e0a&#039;, &#039;\u53f3&#039;, &#039;\u4e0b&#039;, &#039;\u5de6&#039;]\n        print(f&quot;\u52a8\u4f5c: {action_names[action]}, \u4ece({old_x},{old_y})\u79fb\u52a8\u5230({x},{y}), \u5956\u52b1: {reward:.1f}&quot;)\n\n        return self.state, reward, done\n\nclass QLearning:\n    def __init__(self, state_size, action_size, learning_rate=0.1, gamma=0.9):\n        self.q_table = np.zeros((state_size, action_size))\n        self.lr = learning_rate  # \u5b66\u4e60\u7387\n        self.gamma = gamma      # \u6298\u6263\u56e0\u5b50\n        print(f&quot;\\n\u521d\u59cb\u5316Q-learning\u667a\u80fd\u4f53:&quot;)\n        print(f&quot;\u5b66\u4e60\u7387: {learning_rate}&quot;)\n        print(f&quot;\u6298\u6263\u56e0\u5b50: {gamma}&quot;)\n        print(f&quot;Q\u8868\u5927\u5c0f: {state_size}x{action_size}&quot;)\n\n    def get_action(self, state, epsilon=0.1):\n        # epsilon-\u8d2a\u5a6a\u7b56\u7565\n        if np.random.random() &lt; epsilon:\n            action = np.random.choice(len(self.q_table[state]))\n            print(f&quot;\u63a2\u7d22\uff1a\u968f\u673a\u9009\u62e9\u52a8\u4f5c {action}&quot;)\n            return action\n        action = np.argmax(self.q_table[state])\n        print(f&quot;\u5229\u7528\uff1a\u9009\u62e9\u6700\u4f18\u52a8\u4f5c {action}&quot;)\n        return action\n\n    def learn(self, state, action, reward, next_state):\n        # 1. \u83b7\u53d6\u5f53\u524d\u72b6\u6001-\u52a8\u4f5c\u5bf9\u7684Q\u503c\n        old_value = self.q_table[state, action]\n\n        # 2. \u83b7\u53d6\u4e0b\u4e00\u4e2a\u72b6\u6001\u4e2d\u6700\u5927\u7684Q\u503c\n        next_max = np.max(self.q_table[next_state])\n\n        # 3. Q-learning\u66f4\u65b0\u516c\u5f0f\n        new_value = (1 - self.lr) * old_value + self.lr * (reward + self.gamma * next_max)\n\n        # 4. \u66f4\u65b0Q\u8868\n        self.q_table[state, action] = new_value\n\ndef print_episode_summary(episode, total_reward, steps):\n    print(f&quot;\\n\u56de\u5408 {episode} \u603b\u7ed3:&quot;)\n    print(f&quot;\u603b\u6b65\u6570: {steps}&quot;)\n    print(f&quot;\u603b\u5956\u52b1: {total_reward:.2f}&quot;)\n    print(&quot;-&quot; * 50)\n\n# \u8bad\u7ec3\u8fc7\u7a0b\nenv = GridWorld(size=5)\nagent = QLearning(state_size=25, action_size=4)\nepisodes = 100  # \u4e3a\u4e86\u6f14\u793a\uff0c\u6211\u4eec\u51cf\u5c11\u56de\u5408\u6570\n\nfor episode in range(episodes):\n    state = env.reset()\n    total_reward = 0\n    done = False\n    steps = 0\n\n    print(f&quot;\\n\u5f00\u59cb\u56de\u5408 {episode + 1}&quot;)\n\n    while not done:\n        steps += 1\n        action = agent.get_action(state, epsilon=0.1)\n        next_state, reward, done = env.step(action)\n        agent.learn(state, action, reward, next_state)\n        state = next_state\n        total_reward += reward\n\n        if steps &gt; 100:  # \u9632\u6b62\u65e0\u9650\u5faa\u73af\n            print(&quot;\u56de\u5408\u6b65\u6570\u8fc7\u591a\uff0c\u63d0\u524d\u7ed3\u675f&quot;)\n            break\n\n    print_episode_summary(episode + 1, total_reward, steps)\n\n    # \u6bcf10\u4e2a\u56de\u5408\u5c55\u793a\u4e00\u6b21Q\u8868\n    if (episode + 1) % 10 == 0:\n        print(&quot;\\nQ\u8868\u7247\u6bb5:&quot;)\n        print(agent.q_table[:, :])  # \u53ea\u663e\u793a\u524d5\u4e2a\u72b6\u6001\u7684Q\u503c\n<\/code><\/pre>\n<h3>\u4ee3\u7801\u89e3\u6790<\/h3>\n<p><strong>1.GridWorld \u7c7b<\/strong>\uff1a<\/p>\n<ul>\n<li>\u521b\u5efa\u4e00\u4e2a\u7b80\u5355\u7684\u7f51\u683c\u73af\u5883<\/li>\n<li>\u667a\u80fd\u4f53\u53ef\u4ee5\u4e0a\u4e0b\u5de6\u53f3\u79fb\u52a8<\/li>\n<li>\u5230\u8fbe\u7ec8\u70b9\u83b7\u5f97\u6b63\u5956\u52b1\uff0c\u5176\u4ed6\u6b65\u9aa4\u83b7\u5f97\u5c0f\u7684\u8d1f\u5956\u52b1<br \/>\n<strong>QLearning \u7c7b<\/strong>\uff1a<\/li>\n<li>\u5b9e\u73b0 Q-learning \u7b97\u6cd5<\/li>\n<li>\u7ef4\u62a4\u4e00\u4e2a Q \u8868\u6765\u5b58\u50a8\u72b6\u6001-\u52a8\u4f5c\u503c<\/li>\n<li>\u4f7f\u7528 \u03b5-\u8d2a\u5a6a\u7b56\u7565\u9009\u62e9\u52a8\u4f5c<\/li>\n<li>\u901a\u8fc7\u65f6\u5e8f\u5dee\u5206\u5b66\u4e60\u66f4\u65b0 Q \u503c<br \/>\n<strong>\u8bad\u7ec3\u5faa\u73af<\/strong>\uff1a<\/li>\n<li>\u8fd0\u884c\u591a\u4e2a\u56de\u5408\u6765\u8bad\u7ec3\u667a\u80fd\u4f53<\/li>\n<li>\u6bcf\u4e2a\u56de\u5408\u90fd\u4ece\u8d77\u70b9\u5f00\u59cb\uff0c\u76f4\u5230\u5230\u8fbe\u7ec8\u70b9<\/li>\n<\/ul>\n<p>\u8f93\u51fa\uff1a<\/p>\n<pre><code class=\"language-log\">Q\u8868\u7247\u6bb5:\n[[-0.29766024 -0.27952562 -0.24557464 -0.28062151]\n [-0.2124381  -0.21151503 -0.1930229  -0.21528052]\n [-0.15677518 -0.15030181 -0.10340539 -0.17394976]\n [-0.10466175 -0.10065021 -0.09897863 -0.10862233]\n [-0.07430568 -0.06793465 -0.05423492 -0.06129447]\n [-0.22295002 -0.11124957 -0.21446148 -0.22789866]\n [-0.19177152  0.06200571 -0.15535018 -0.19476545]\n [-0.12503019 -0.10603329  0.25057189 -0.12785624]\n [-0.08993588 -0.05934414  0.07503771 -0.06348727]\n [-0.03940399 -0.03940399  0.22452878 -0.03948338]\n [-0.17292671 -0.15084247 -0.15448072 -0.15648468]\n [-0.12302759  0.02442123 -0.10888807 -0.12450674]\n [-0.07707295  0.4344819  -0.05944325 -0.0677957 ]\n [-0.03414978  0.61352714  0.01632828 -0.03240244]\n [-0.01567082  0.05200123  0.79880578  0.03590251]\n [-0.11063855 -0.1078826  -0.11357292 -0.11286845]\n [-0.07876345 -0.06386965 -0.06959083 -0.07230351]\n [-0.0306487   0.11765511 -0.034561   -0.0306487 ]\n [-0.01        0.60844074 -0.01       -0.01171   ]\n [ 0.09009701  0.25152601  0.99991536  0.10631137]\n [-0.08317677 -0.08653641 -0.08582936 -0.08564389]\n [-0.04145702 -0.01794207 -0.04900995 -0.05261765]\n [-0.0199      0.21003608 -0.019171   -0.0109    ]\n [-0.01673687  0.6861894   0.0071      0.        ]\n [ 0.          0.          0.          0.        ]]<\/code><\/pre>\n","protected":false},"excerpt":{"rendered":"<p>\u5f3a\u5316\u5b66\u4e60\uff08Reinforcement Learning, RL\uff09\u662f\u4e00\u79cd\u673a\u5668\u5b66\u4e60\u65b9\u6cd5\uff0c\u901a\u8fc7\u4e0e\u73af\u5883\u7684\u4ea4\u4e92\u6765\u5b66\u4e60\u5982\u4f55 [&hellip;]<\/p>\n","protected":false},"author":1,"featured_media":646,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":[],"categories":[40],"tags":[],"_links":{"self":[{"href":"https:\/\/blog.liguanxin.cn\/index.php\/wp-json\/wp\/v2\/posts\/644"}],"collection":[{"href":"https:\/\/blog.liguanxin.cn\/index.php\/wp-json\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/blog.liguanxin.cn\/index.php\/wp-json\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/blog.liguanxin.cn\/index.php\/wp-json\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/blog.liguanxin.cn\/index.php\/wp-json\/wp\/v2\/comments?post=644"}],"version-history":[{"count":0,"href":"https:\/\/blog.liguanxin.cn\/index.php\/wp-json\/wp\/v2\/posts\/644\/revisions"}],"wp:featuredmedia":[{"embeddable":true,"href":"https:\/\/blog.liguanxin.cn\/index.php\/wp-json\/wp\/v2\/media\/646"}],"wp:attachment":[{"href":"https:\/\/blog.liguanxin.cn\/index.php\/wp-json\/wp\/v2\/media?parent=644"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/blog.liguanxin.cn\/index.php\/wp-json\/wp\/v2\/categories?post=644"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/blog.liguanxin.cn\/index.php\/wp-json\/wp\/v2\/tags?post=644"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}