From 0bc58fe9b7c3a56e57c60cfa921c987f43d0d219 Mon Sep 17 00:00:00 2001 From: Theo Haslinger Date: Sun, 28 Jan 2024 17:23:06 +0100 Subject: [PATCH] Added min/max score metric to bays-mcts --- main.py | 13 ++-- src/chesspp/baysian_mcts.py | 111 ++++++++++++++++++++++------------- src/chesspp/eval.py | 8 ++- src/chesspp/util_gaussian.py | 86 +++++++++++++++++---------- 4 files changed, 136 insertions(+), 82 deletions(-) diff --git a/main.py b/main.py index c8484b6..9b34010 100644 --- a/main.py +++ b/main.py @@ -31,12 +31,13 @@ def test_bayes_mcts(): global lookup_count fools_mate = "rnbqkbnr/pppp1ppp/4p3/8/5PP1/8/PPPPP2P/RNBQKBNR b KQkq f3 0 2" board = chess.Board(fools_mate) - seed = 1 - stategy = RandomStrategy(random.Random(seed)) - mcts = BayesianMcts(board, stategy, seed) + seed = None + strategy = RandomStrategy(random.Random(seed)) + mcts = BayesianMcts(board, strategy, chess.BLACK, seed) mcts.sample() - for c in mcts.get_children(): - print("move (mcts):", c.move, " with score:", c.mu) + mcts.print() + for move, score in mcts.get_moves().items(): + print("move (mcts):", move, " with score:", score) def test_stockfish(): @@ -96,7 +97,7 @@ def main(): # test_mcts() # test_stockfish() # test_stockfish_prob() - + test_bayes_mcts() if __name__ == '__main__': main() diff --git a/src/chesspp/baysian_mcts.py b/src/chesspp/baysian_mcts.py index 30f9213..69ef14e 100644 --- a/src/chesspp/baysian_mcts.py +++ b/src/chesspp/baysian_mcts.py @@ -1,53 +1,69 @@ import chess from src.chesspp.i_mcts import * from src.chesspp.i_strategy import IStrategy -from src.chesspp.util_gaussian import gaussian_ucb1, max_gaussian, beta_std, beta_mean -from src.chesspp.eval import * +from src.chesspp.util_gaussian import gaussian_ucb1, max_gaussian, min_gaussian +from src.chesspp.eval import score_manual import numpy as np import math class BayesianMctsNode(IMctsNode): - def __init__(self, board: chess.Board, strategy: IStrategy, parent: Self | None, move: chess.Move | None, - random_state: random.Random, inherit_results: list[int] | None = None): + def __init__(self, board: chess.Board, strategy: IStrategy, color: chess.Color, parent: Self | None, move: chess.Move | None, + random_state: random.Random, inherit_result: int | None = None, depth: int = 0): super().__init__(board, strategy, parent, move, random_state) + self.color = color # Color of the player whose turn it is self.visits = 0 - self.results = inherit_results.copy() if inherit_results is not None else [1, 1] - + self.result = inherit_result if inherit_result is not None else 0 self._set_mu_sigma() + self.depth = depth - def _create_child(self, move: chess.Move): + def _create_child(self, move: chess.Move) -> IMctsNode: copied_board = self.board.copy() copied_board.push(move) - return BayesianMctsNode(copied_board, self.strategy, self, move, self.random_state, inherit_results=self.results) + return BayesianMctsNode(copied_board, self.strategy, not self.color, self, move, self.random_state, self.result, self.depth+1) - def _set_mu_sigma(self): - alpha = self.results[0] - beta = self.results[1] + def _set_mu_sigma(self) -> None: + self.mu = self.result + self.sigma = 1 - self.mu = beta_mean(alpha, beta) - self.sigma = beta_std(alpha, beta) + def _is_new_ucb1_better(self, current, new) -> bool: + if self.color == chess.WHITE: + # maximize ucb1 + return new > current + else: + # minimize ubc1 + return new < current + + def _select_best_child(self) -> IMctsNode: + """ + Returns the child with the *best* ucb1 score. + It chooses the child with maximum ucb1 for WHITE, and with minimum ucb1 for BLACK. + """ - def _select_child(self) -> IMctsNode: - # select child by modified UCB1 if self.board.is_game_over(): return self best_child = self.random_state.choice(self.children) - best_val = gaussian_ucb1(best_child.mu, best_child.sigma, self.visits) - for c in self.children: - g = gaussian_ucb1(c.mu, c.sigma, self.visits) + best_ucb1 = gaussian_ucb1(best_child.mu, best_child.sigma, self.visits) + for child in self.children: + # if child has no visits, prioritize this child. + if child.visits == 0: + best_child = child + break + + # save child if it has a *better* score, than our previous best child. + ucb1 = gaussian_ucb1(child.mu, child.sigma, self.visits) + if self._is_new_ucb1_better(best_ucb1, ucb1): + best_ucb1 = ucb1 + best_child = child - if g > best_val: - best_val = g - best_child = c return best_child def select(self) -> IMctsNode: if len(self.children) == 0: return self else: - return self._select_child().select() + return self._select_best_child().select() def expand(self) -> IMctsNode: if self.visits == 0: @@ -56,11 +72,11 @@ class BayesianMctsNode(IMctsNode): for move in self.legal_moves: self.children.append(self._create_child(move)) - return self._select_child() + return self._select_best_child() def rollout(self, rollout_depth: int = 20) -> int: copied_board = self.board.copy() - steps = 1 + steps = self.depth for i in range(rollout_depth): if copied_board.is_game_over(): break @@ -72,18 +88,21 @@ class BayesianMctsNode(IMctsNode): copied_board.push(m) steps += 1 - score = eval.score_manual(copied_board) // steps - if score > 0: - self.results[1] += 1 - else: - self.results[0] += abs(score) // 50_000 + score = score_manual(copied_board) // steps + self.result = score return score + def _combine_gaussians(self, mu1: float, sigma1: float, mu2: float, sigma2: float) -> tuple[float, float]: + if self.color == chess.WHITE: + return max_gaussian(mu1, sigma1, mu2, sigma2) + else: + return min_gaussian(mu1, sigma1, mu2, sigma2) + def backpropagate(self, score: int | None = None) -> None: self.visits += 1 if score is not None: - self.results.append(score) + self.result = score if len(self.children) == 0: # leaf node @@ -91,30 +110,32 @@ class BayesianMctsNode(IMctsNode): else: # interior node shuffled_children = self.random_state.sample(self.children, len(self.children)) - max_mu = shuffled_children[0].mu - max_sigma = shuffled_children[0].sigma + mu = shuffled_children[0].mu + sigma = shuffled_children[0].sigma for c in shuffled_children[1:]: - max_mu, max_sigma = max_gaussian(max_mu, max_sigma, c.mu, c.sigma) + mu, sigma = self._combine_gaussians(mu, sigma, c.mu, c.sigma) - if max_sigma == 0: - max_sigma = 0.001 - self.mu = max_mu - self.sigma = max_sigma + # if max_sigma == 0: + # max_sigma = 0.001 + self.mu = mu + self.sigma = sigma if self.parent: self.parent.backpropagate() def print(self, indent=0): - print("\t"*indent + f"visits={self.visits}, mu={self.mu}, sigma={self.sigma}") + print("\t"*indent + f"move={self.move}, visits={self.visits}, mu={self.mu}, sigma={self.sigma}") for c in self.children: c.print(indent+1) class BayesianMcts(IMcts): - def __init__(self, board: chess.Board, strategy: IStrategy, seed: int | None = None): + + def __init__(self, board: chess.Board, strategy: IStrategy, color: chess.Color, seed: int | None = None): super().__init__(board, strategy, seed) - self.root = BayesianMctsNode(board, strategy, None, None, self.random_state) + self.root = BayesianMctsNode(board, strategy, color,None, None, self.random_state) self.root.visits += 1 + self.color = color def sample(self, runs: int = 1000) -> None: for i in range(runs): @@ -122,10 +143,10 @@ class BayesianMcts(IMcts): leaf_node = self.root.select().expand() _ = leaf_node.rollout() leaf_node.backpropagate() - #self.root.print() def apply_move(self, move: chess.Move) -> None: self.board.push(move) + self.color = not self.color # if a child node contains the move, set this child as new root for child in self.get_children(): @@ -135,11 +156,17 @@ class BayesianMcts(IMcts): return # if no child node contains the move, initialize a new tree. - self.root = BayesianMctsNode(self.board, self.root.strategy, None, None, self.random_state) + self.root = BayesianMctsNode(self.board, self.root.strategy, self.color, None, None, self.random_state) def get_children(self) -> list[IMctsNode]: return self.root.children + def get_moves(self) -> Dict[chess.Move, int]: + res = {} + for c in self.root.children: + res[c.move] = c.mu + return res + def print(self): print("================================") self.root.print() \ No newline at end of file diff --git a/src/chesspp/eval.py b/src/chesspp/eval.py index 833fad7..1c91ddd 100644 --- a/src/chesspp/eval.py +++ b/src/chesspp/eval.py @@ -140,14 +140,16 @@ def check_endgame(board: chess.Board) -> bool: def score_manual(board: chess.Board) -> int: """ - Calculate the score of the given board regarding the given color + Calculate the score of a given board. + Positive scores indicate an advantage for WHITE, negative scores indicate and advantage for BLACK. + The range of scores is from approx. -1.100.000 to 1.100.000 :param board: the chess board - :return: score metric + :return: score """ outcome = board.outcome() if outcome is not None: if outcome.termination == chess.Termination.CHECKMATE: - return sys.maxsize if outcome.winner == chess.WHITE else -sys.maxsize + return 1_100_000 if outcome.winner == chess.WHITE else -1_100_000 else: # draw return 0 diff --git a/src/chesspp/util_gaussian.py b/src/chesspp/util_gaussian.py index 41c15de..e6696d3 100644 --- a/src/chesspp/util_gaussian.py +++ b/src/chesspp/util_gaussian.py @@ -10,11 +10,12 @@ CDF: dict[float, float] = {} lookup_count = 0 -def max_gaussian_numeric(mu1, sigma1, mu2, sigma2) -> (float, float): - pass +def get_lookup_count(): + global lookup_count + return lookup_count -def max_gaussian(mu1, sigma1, mu2, sigma2) -> (float, float): +def max_gaussian(mu1, sigma1, mu2, sigma2) -> tuple[float, float]: global lookup_count global F1 global F2 @@ -26,46 +27,69 @@ def max_gaussian(mu1, sigma1, mu2, sigma2) -> (float, float): :param sigma1: sigma of the first Gaussian :param mu2: mu of the second Gaussian :param sigma2: sigma of the second Gaussian + :return: mu and sigma maximized """ # we assume independence of the two gaussians + #print(mu1, sigma1, mu2, sigma2) + normal = dist.Normal(0, 1) + sigma_m = math.sqrt(sigma1 ** 2 + sigma2 ** 2) + alpha = (mu1 - mu2) / sigma_m + + if alpha in CDF: + cdf_alpha = CDF[alpha] + lookup_count += 1 + else: + cdf_alpha = normal.cdf(torch.tensor(alpha)).item() + CDF[alpha] = cdf_alpha + + pdf_alpha = exp(normal.log_prob(torch.tensor(alpha))).item() + + if alpha in F1: + f1_alpha = F1[alpha] + lookup_count += 1 + else: + f1_alpha = alpha * cdf_alpha + pdf_alpha + F1[alpha] = f1_alpha + + if alpha in F2: + f2_alpha = F2[alpha] + lookup_count += 1 + else: + f2_alpha = alpha ** 2 * cdf_alpha * (1 - cdf_alpha) + ( + 1 - 2 * cdf_alpha) * alpha * pdf_alpha - pdf_alpha ** 2 + F2[alpha] = f2_alpha + + mu = mu2 + sigma_m * f1_alpha + sigma = math.sqrt(sigma2 ** 2 + (sigma1 ** 2 - sigma2 ** 2) * cdf_alpha + sigma_m ** 2 * f2_alpha) + #sigma = math.sqrt((mu1**2 + sigma1**2) * cdf_alpha + (mu2**2 + sigma2**2) * (1 - cdf_alpha) + (mu1 + mu2) * sigma_m * pdf_alpha - mu**2) + + return mu, sigma + + +def min_gaussian(mu1, sigma1, mu2, sigma2) -> tuple[float, float]: + """ + Returns the combined min gaussian of two Gaussians represented by mu1, sigma1, mu2, simga2 + :param mu1: mu of the first Gaussian + :param sigma1: sigma of the first Gaussian + :param mu2: mu of the second Gaussian + :param sigma2: sigma of the second Gaussian + :return: mu and sigma minimized + """ try: - #print(mu1, sigma1, mu2, sigma2) normal = dist.Normal(0, 1) sigma_m = math.sqrt(sigma1 ** 2 + sigma2 ** 2) alpha = (mu1 - mu2) / sigma_m - if alpha in CDF: - cdf_alpha = CDF[alpha] - lookup_count += 1 - else: - cdf_alpha = normal.cdf(torch.tensor(alpha)).item() - CDF[alpha] = cdf_alpha - + cdf_alpha = normal.cdf(torch.tensor(alpha)).item() pdf_alpha = exp(normal.log_prob(torch.tensor(alpha))).item() + pdf_alpha_neg = exp(normal.log_prob(torch.tensor(-alpha))).item() - if alpha in F1: - f1_alpha = F1[alpha] - lookup_count += 1 - else: - f1_alpha = alpha * cdf_alpha + pdf_alpha - F1[alpha] = f1_alpha - - if alpha in F2: - f2_alpha = F2[alpha] - lookup_count += 1 - else: - f2_alpha = alpha ** 2 * cdf_alpha * (1 - cdf_alpha) + ( - 1 - 2 * cdf_alpha) * alpha * pdf_alpha - pdf_alpha ** 2 - F2[alpha] = f2_alpha - - mu = mu2 + sigma_m * f1_alpha - #sigma_old = sigma2 ** 2 + (sigma1 ** 2 - sigma2 ** 2) * cdf_alpha + sigma_m ** 2 * f2_alpha - sigma = math.sqrt((mu1**2 + sigma1**2) * cdf_alpha + (mu2**2 + sigma2**2) * (1 - cdf_alpha) + (mu1 + mu2) * sigma_m * pdf_alpha - mu**2) - + mu = mu1 * (1 - cdf_alpha) + mu2 * cdf_alpha - pdf_alpha_neg * sigma_m + sigma = math.sqrt((mu1**2 + sigma1**2) * (1 - cdf_alpha) + (mu2**2 + sigma2**2) * cdf_alpha - (mu1 + mu2) * sigma_m * pdf_alpha - mu**2) return mu, sigma except ValueError: print(mu1, sigma1, mu2, sigma2) - exit(1) + def beta_mean(alpha, beta):