### demo >>> m = Fig[17,1] >>> pi = best_policy(m, value_iteration(m, .01)) >>> pi {(3, 2): None, (3, 1): None, (3, 0): (-1, 0), (2, 1): (0, 1), (0, 2): (1, 0), (1, 0): (1, 0), (0, 0): (0, 1), (1, 2): (1, 0), (2, 0): (0, 1), (0, 1): (0, 1), (2, 2): (1, 0)} >>> m.to_arrows(pi) [['>', '>', '>', '.'], ['^', None, '^', '.'], ['^', '>', '^', '<']] >>> print_table(m.to_arrows(pi)) > > > . ^ None ^ . ^ > ^ < >>> value_iteration(m, .01) {(3, 2): 1.0, (3, 1): -1.0, (3, 0): 0.12958868267972745, (0, 1): 0.39810203830605462, (0, 2): 0.50928545646220924, (1, 0): 0.25348746162470537, (0, 0): 0.29543540628363629, (1, 2): 0.64958064617168676, (2, 0): 0.34461306281476806, (2, 1): 0.48643676237737926, (2, 2): 0.79536093684710951} >>> policy_iteration(m) {(3, 2): None, (3, 1): None, (3, 0): (0, -1), (2, 1): (-1, 0), (0, 2): (1, 0), (1, 0): (1, 0), (0, 0): (1, 0), (1, 2): (1, 0), (2, 0): (1, 0), (0, 1): (1, 0), (2, 2): (1, 0)} >>> print_table(m.to_arrows(policy_iteration(m))) > > > . > None < . > > > v