Added basic bayes-mcts using beta distribution
This commit is contained in:
15
main.py
15
main.py
@@ -1,7 +1,10 @@
|
||||
import random
|
||||
import chess
|
||||
import chess.engine
|
||||
import chess.pgn
|
||||
from src.chesspp.classic_mcts import ClassicMcts
|
||||
from src.chesspp.baysian_mcts import BayesianMcts
|
||||
from src.chesspp.random_strategy import RandomStrategy
|
||||
from src.chesspp import engine
|
||||
from src.chesspp import util
|
||||
from src.chesspp import simulation, eval
|
||||
@@ -24,6 +27,18 @@ def test_mcts():
|
||||
print("move (mcts):", c.move, " with score:", c.score)
|
||||
|
||||
|
||||
def test_bayes_mcts():
|
||||
global lookup_count
|
||||
fools_mate = "rnbqkbnr/pppp1ppp/4p3/8/5PP1/8/PPPPP2P/RNBQKBNR b KQkq f3 0 2"
|
||||
board = chess.Board(fools_mate)
|
||||
seed = 1
|
||||
stategy = RandomStrategy(random.Random(seed))
|
||||
mcts = BayesianMcts(board, stategy, seed)
|
||||
mcts.sample()
|
||||
for c in mcts.get_children():
|
||||
print("move (mcts):", c.move, " with score:", c.mu)
|
||||
|
||||
|
||||
def test_stockfish():
|
||||
fools_mate = "rnbqkbnr/pppp1ppp/4p3/8/5PP1/8/PPPPP2P/RNBQKBNR b KQkq f3 0 2"
|
||||
board = chess.Board(fools_mate)
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
chess==1.10.0
|
||||
numpy==1.26.3
|
||||
stockfish==3.28.0
|
||||
torch==2.1.2
|
||||
pytest
|
||||
aiohttp
|
||||
145
src/chesspp/baysian_mcts.py
Normal file
145
src/chesspp/baysian_mcts.py
Normal file
@@ -0,0 +1,145 @@
|
||||
import chess
|
||||
from src.chesspp.i_mcts import *
|
||||
from src.chesspp.i_strategy import IStrategy
|
||||
from src.chesspp.util_gaussian import gaussian_ucb1, max_gaussian, beta_std, beta_mean
|
||||
from src.chesspp.eval import *
|
||||
import numpy as np
|
||||
import math
|
||||
|
||||
|
||||
class BayesianMctsNode(IMctsNode):
|
||||
def __init__(self, board: chess.Board, strategy: IStrategy, parent: Self | None, move: chess.Move | None,
|
||||
random_state: random.Random, inherit_results: list[int] | None = None):
|
||||
super().__init__(board, strategy, parent, move, random_state)
|
||||
self.visits = 0
|
||||
self.results = inherit_results.copy() if inherit_results is not None else [1, 1]
|
||||
|
||||
self._set_mu_sigma()
|
||||
|
||||
def _create_child(self, move: chess.Move):
|
||||
copied_board = self.board.copy()
|
||||
copied_board.push(move)
|
||||
return BayesianMctsNode(copied_board, self.strategy, self, move, self.random_state, inherit_results=self.results)
|
||||
|
||||
def _set_mu_sigma(self):
|
||||
alpha = self.results[0]
|
||||
beta = self.results[1]
|
||||
|
||||
self.mu = beta_mean(alpha, beta)
|
||||
self.sigma = beta_std(alpha, beta)
|
||||
|
||||
def _select_child(self) -> IMctsNode:
|
||||
# select child by modified UCB1
|
||||
if self.board.is_game_over():
|
||||
return self
|
||||
|
||||
best_child = self.random_state.choice(self.children)
|
||||
best_val = gaussian_ucb1(best_child.mu, best_child.sigma, self.visits)
|
||||
for c in self.children:
|
||||
g = gaussian_ucb1(c.mu, c.sigma, self.visits)
|
||||
|
||||
if g > best_val:
|
||||
best_val = g
|
||||
best_child = c
|
||||
return best_child
|
||||
|
||||
def select(self) -> IMctsNode:
|
||||
if len(self.children) == 0:
|
||||
return self
|
||||
else:
|
||||
return self._select_child().select()
|
||||
|
||||
def expand(self) -> IMctsNode:
|
||||
if self.visits == 0:
|
||||
return self
|
||||
|
||||
for move in self.legal_moves:
|
||||
self.children.append(self._create_child(move))
|
||||
|
||||
return self._select_child()
|
||||
|
||||
def rollout(self, rollout_depth: int = 20) -> int:
|
||||
copied_board = self.board.copy()
|
||||
steps = 1
|
||||
for i in range(rollout_depth):
|
||||
if copied_board.is_game_over():
|
||||
break
|
||||
|
||||
m = self.strategy.pick_next_move(copied_board)
|
||||
if m is None:
|
||||
break
|
||||
|
||||
copied_board.push(m)
|
||||
steps += 1
|
||||
|
||||
score = eval.score_manual(copied_board) // steps
|
||||
if score > 0:
|
||||
self.results[1] += 1
|
||||
else:
|
||||
self.results[0] += abs(score) // 50_000
|
||||
return score
|
||||
|
||||
def backpropagate(self, score: int | None = None) -> None:
|
||||
self.visits += 1
|
||||
|
||||
if score is not None:
|
||||
self.results.append(score)
|
||||
|
||||
if len(self.children) == 0:
|
||||
# leaf node
|
||||
self._set_mu_sigma()
|
||||
else:
|
||||
# interior node
|
||||
shuffled_children = self.random_state.sample(self.children, len(self.children))
|
||||
max_mu = shuffled_children[0].mu
|
||||
max_sigma = shuffled_children[0].sigma
|
||||
for c in shuffled_children[1:]:
|
||||
max_mu, max_sigma = max_gaussian(max_mu, max_sigma, c.mu, c.sigma)
|
||||
|
||||
if max_sigma == 0:
|
||||
max_sigma = 0.001
|
||||
self.mu = max_mu
|
||||
self.sigma = max_sigma
|
||||
|
||||
if self.parent:
|
||||
self.parent.backpropagate()
|
||||
|
||||
def print(self, indent=0):
|
||||
print("\t"*indent + f"visits={self.visits}, mu={self.mu}, sigma={self.sigma}")
|
||||
for c in self.children:
|
||||
c.print(indent+1)
|
||||
|
||||
|
||||
class BayesianMcts(IMcts):
|
||||
def __init__(self, board: chess.Board, strategy: IStrategy, seed: int | None = None):
|
||||
super().__init__(board, strategy, seed)
|
||||
self.root = BayesianMctsNode(board, strategy, None, None, self.random_state)
|
||||
self.root.visits += 1
|
||||
|
||||
def sample(self, runs: int = 1000) -> None:
|
||||
for i in range(runs):
|
||||
#print(f"sample {i}")
|
||||
leaf_node = self.root.select().expand()
|
||||
_ = leaf_node.rollout()
|
||||
leaf_node.backpropagate()
|
||||
#self.root.print()
|
||||
|
||||
def apply_move(self, move: chess.Move) -> None:
|
||||
self.board.push(move)
|
||||
|
||||
# if a child node contains the move, set this child as new root
|
||||
for child in self.get_children():
|
||||
if child.move == move:
|
||||
self.root = child
|
||||
self.root.parent = None
|
||||
return
|
||||
|
||||
# if no child node contains the move, initialize a new tree.
|
||||
self.root = BayesianMctsNode(self.board, self.root.strategy, None, None, self.random_state)
|
||||
|
||||
def get_children(self) -> list[IMctsNode]:
|
||||
return self.root.children
|
||||
|
||||
def print(self):
|
||||
print("================================")
|
||||
self.root.print()
|
||||
@@ -3,8 +3,8 @@ import random
|
||||
import numpy as np
|
||||
|
||||
|
||||
from chesspp import eval
|
||||
from chesspp import util
|
||||
from src.chesspp import eval
|
||||
from src.chesspp import util
|
||||
|
||||
|
||||
class ClassicMcts:
|
||||
|
||||
@@ -3,7 +3,7 @@ import chess
|
||||
import chess.engine
|
||||
import random
|
||||
import time
|
||||
from chesspp.classic_mcts import ClassicMcts
|
||||
from src.chesspp.classic_mcts import ClassicMcts
|
||||
|
||||
class Limit:
|
||||
""" Class to determine when to stop searching for moves """
|
||||
|
||||
@@ -1,13 +1,61 @@
|
||||
import chess
|
||||
import random
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Dict
|
||||
from chesspp.i_strategy import IStrategy
|
||||
from typing import Dict, Self
|
||||
from src.chesspp.i_strategy import IStrategy
|
||||
|
||||
|
||||
class IMctsNode(ABC):
|
||||
def __init__(self, board: chess.Board, strategy: IStrategy, parent: Self | None, move: chess.Move | None,
|
||||
random_state: random.Random):
|
||||
self.board = board
|
||||
self.strategy = strategy
|
||||
self.parent = parent
|
||||
self.children = []
|
||||
self.move = move
|
||||
self.legal_moves = list(board.legal_moves)
|
||||
self.random_state = random_state
|
||||
|
||||
@abstractmethod
|
||||
def select(self) -> Self:
|
||||
"""
|
||||
Selects the next node leaf node in the tree
|
||||
:return:
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def expand(self) -> Self:
|
||||
"""
|
||||
Expands this node creating X child leaf nodes, i.e., choose an action and apply it to the board
|
||||
:return:
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def rollout(self, rollout_depth: int = 20) -> int:
|
||||
"""
|
||||
Rolls out the node by simulating a game for a given depth.
|
||||
Sometimes this step is called 'simulation' or 'playout'.
|
||||
:return: the score of the rolled out game
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def backpropagate(self, score: float) -> None:
|
||||
"""
|
||||
Backpropagates the results of the rollout
|
||||
:param score:
|
||||
:return:
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class IMcts(ABC):
|
||||
|
||||
def __init__(self, board: chess.Board, strategy: IStrategy):
|
||||
def __init__(self, board: chess.Board, strategy: IStrategy, seed: int | None):
|
||||
self.board = board
|
||||
self.strategy = strategy
|
||||
self.random_state = random.Random(seed)
|
||||
|
||||
@abstractmethod
|
||||
def sample(self, runs: int = 1000) -> None:
|
||||
@@ -28,7 +76,7 @@ class IMcts(ABC):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_children(self) -> list['IMcts']:
|
||||
def get_children(self) -> list[IMctsNode]:
|
||||
"""
|
||||
Return the immediate children of the root node
|
||||
:return: list of immediate children of mcts root
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
import chess
|
||||
|
||||
|
||||
# TODO extend class
|
||||
class IStrategy(ABC):
|
||||
|
||||
@abstractmethod
|
||||
def pick_next_move(self, ):
|
||||
def pick_next_move(self, board: chess.Board) -> chess.Move:
|
||||
pass
|
||||
|
||||
13
src/chesspp/random_strategy.py
Normal file
13
src/chesspp/random_strategy.py
Normal file
@@ -0,0 +1,13 @@
|
||||
import chess
|
||||
import random
|
||||
from src.chesspp.i_strategy import IStrategy
|
||||
|
||||
|
||||
class RandomStrategy(IStrategy):
|
||||
def __init__(self, random_state: random.Random):
|
||||
self.random_state = random_state
|
||||
|
||||
def pick_next_move(self, board: chess.Board) -> chess.Move | None:
|
||||
if len(list(board.legal_moves)) == 0:
|
||||
return None
|
||||
return self.random_state.choice(list(board.legal_moves))
|
||||
@@ -6,7 +6,7 @@ from typing import Tuple, List
|
||||
from enum import Enum
|
||||
from dataclasses import dataclass
|
||||
|
||||
from chesspp.engine import Engine, Limit
|
||||
from src.chesspp.engine import Engine, Limit
|
||||
|
||||
|
||||
class Winner(Enum):
|
||||
|
||||
83
src/chesspp/util_gaussian.py
Normal file
83
src/chesspp/util_gaussian.py
Normal file
@@ -0,0 +1,83 @@
|
||||
import math
|
||||
|
||||
import torch
|
||||
import torch.distributions as dist
|
||||
from torch import exp
|
||||
|
||||
F1: dict[float, float] = {}
|
||||
F2: dict[float, float] = {}
|
||||
CDF: dict[float, float] = {}
|
||||
lookup_count = 0
|
||||
|
||||
|
||||
def max_gaussian_numeric(mu1, sigma1, mu2, sigma2) -> (float, float):
|
||||
pass
|
||||
|
||||
|
||||
def max_gaussian(mu1, sigma1, mu2, sigma2) -> (float, float):
|
||||
global lookup_count
|
||||
global F1
|
||||
global F2
|
||||
global CDF
|
||||
|
||||
"""
|
||||
Returns the combined max gaussian of two Gaussians represented by mu1, sigma1, mu2, simga2
|
||||
:param mu1: mu of the first Gaussian
|
||||
:param sigma1: sigma of the first Gaussian
|
||||
:param mu2: mu of the second Gaussian
|
||||
:param sigma2: sigma of the second Gaussian
|
||||
"""
|
||||
# we assume independence of the two gaussians
|
||||
try:
|
||||
#print(mu1, sigma1, mu2, sigma2)
|
||||
normal = dist.Normal(0, 1)
|
||||
sigma_m = math.sqrt(sigma1 ** 2 + sigma2 ** 2)
|
||||
alpha = (mu1 - mu2) / sigma_m
|
||||
|
||||
if alpha in CDF:
|
||||
cdf_alpha = CDF[alpha]
|
||||
lookup_count += 1
|
||||
else:
|
||||
cdf_alpha = normal.cdf(torch.tensor(alpha)).item()
|
||||
CDF[alpha] = cdf_alpha
|
||||
|
||||
pdf_alpha = exp(normal.log_prob(torch.tensor(alpha))).item()
|
||||
|
||||
if alpha in F1:
|
||||
f1_alpha = F1[alpha]
|
||||
lookup_count += 1
|
||||
else:
|
||||
f1_alpha = alpha * cdf_alpha + pdf_alpha
|
||||
F1[alpha] = f1_alpha
|
||||
|
||||
if alpha in F2:
|
||||
f2_alpha = F2[alpha]
|
||||
lookup_count += 1
|
||||
else:
|
||||
f2_alpha = alpha ** 2 * cdf_alpha * (1 - cdf_alpha) + (
|
||||
1 - 2 * cdf_alpha) * alpha * pdf_alpha - pdf_alpha ** 2
|
||||
F2[alpha] = f2_alpha
|
||||
|
||||
mu = mu2 + sigma_m * f1_alpha
|
||||
#sigma_old = sigma2 ** 2 + (sigma1 ** 2 - sigma2 ** 2) * cdf_alpha + sigma_m ** 2 * f2_alpha
|
||||
sigma = math.sqrt((mu1**2 + sigma1**2) * cdf_alpha + (mu2**2 + sigma2**2) * (1 - cdf_alpha) + (mu1 + mu2) * sigma_m * pdf_alpha - mu**2)
|
||||
|
||||
return mu, sigma
|
||||
except ValueError:
|
||||
print(mu1, sigma1, mu2, sigma2)
|
||||
exit(1)
|
||||
|
||||
|
||||
def beta_mean(alpha, beta):
|
||||
return alpha / (alpha + beta)
|
||||
|
||||
|
||||
def beta_std(alpha, beta):
|
||||
try:
|
||||
return math.sqrt((alpha * beta) / ((alpha * beta)**2 * (alpha + beta + 1)))
|
||||
except ZeroDivisionError:
|
||||
print(alpha, beta)
|
||||
|
||||
|
||||
def gaussian_ucb1(mu, sigma, N) -> float:
|
||||
return mu + math.sqrt(2 * math.log(N) * sigma)
|
||||
Reference in New Issue
Block a user