Added min/max score metric to bays-mcts

This commit is contained in:
Theo Haslinger
2024-01-28 17:23:06 +01:00
committed by Theo Haslinger
parent 2662dbf53a
commit 0bc58fe9b7
4 changed files with 136 additions and 82 deletions

13
main.py
View File

@@ -31,12 +31,13 @@ def test_bayes_mcts():
global lookup_count global lookup_count
fools_mate = "rnbqkbnr/pppp1ppp/4p3/8/5PP1/8/PPPPP2P/RNBQKBNR b KQkq f3 0 2" fools_mate = "rnbqkbnr/pppp1ppp/4p3/8/5PP1/8/PPPPP2P/RNBQKBNR b KQkq f3 0 2"
board = chess.Board(fools_mate) board = chess.Board(fools_mate)
seed = 1 seed = None
stategy = RandomStrategy(random.Random(seed)) strategy = RandomStrategy(random.Random(seed))
mcts = BayesianMcts(board, stategy, seed) mcts = BayesianMcts(board, strategy, chess.BLACK, seed)
mcts.sample() mcts.sample()
for c in mcts.get_children(): mcts.print()
print("move (mcts):", c.move, " with score:", c.mu) for move, score in mcts.get_moves().items():
print("move (mcts):", move, " with score:", score)
def test_stockfish(): def test_stockfish():
@@ -96,7 +97,7 @@ def main():
# test_mcts() # test_mcts()
# test_stockfish() # test_stockfish()
# test_stockfish_prob() # test_stockfish_prob()
test_bayes_mcts()
if __name__ == '__main__': if __name__ == '__main__':
main() main()

View File

@@ -1,53 +1,69 @@
import chess import chess
from src.chesspp.i_mcts import * from src.chesspp.i_mcts import *
from src.chesspp.i_strategy import IStrategy from src.chesspp.i_strategy import IStrategy
from src.chesspp.util_gaussian import gaussian_ucb1, max_gaussian, beta_std, beta_mean from src.chesspp.util_gaussian import gaussian_ucb1, max_gaussian, min_gaussian
from src.chesspp.eval import * from src.chesspp.eval import score_manual
import numpy as np import numpy as np
import math import math
class BayesianMctsNode(IMctsNode): class BayesianMctsNode(IMctsNode):
def __init__(self, board: chess.Board, strategy: IStrategy, parent: Self | None, move: chess.Move | None, def __init__(self, board: chess.Board, strategy: IStrategy, color: chess.Color, parent: Self | None, move: chess.Move | None,
random_state: random.Random, inherit_results: list[int] | None = None): random_state: random.Random, inherit_result: int | None = None, depth: int = 0):
super().__init__(board, strategy, parent, move, random_state) super().__init__(board, strategy, parent, move, random_state)
self.color = color # Color of the player whose turn it is
self.visits = 0 self.visits = 0
self.results = inherit_results.copy() if inherit_results is not None else [1, 1] self.result = inherit_result if inherit_result is not None else 0
self._set_mu_sigma() self._set_mu_sigma()
self.depth = depth
def _create_child(self, move: chess.Move): def _create_child(self, move: chess.Move) -> IMctsNode:
copied_board = self.board.copy() copied_board = self.board.copy()
copied_board.push(move) copied_board.push(move)
return BayesianMctsNode(copied_board, self.strategy, self, move, self.random_state, inherit_results=self.results) return BayesianMctsNode(copied_board, self.strategy, not self.color, self, move, self.random_state, self.result, self.depth+1)
def _set_mu_sigma(self): def _set_mu_sigma(self) -> None:
alpha = self.results[0] self.mu = self.result
beta = self.results[1] self.sigma = 1
self.mu = beta_mean(alpha, beta) def _is_new_ucb1_better(self, current, new) -> bool:
self.sigma = beta_std(alpha, beta) if self.color == chess.WHITE:
# maximize ucb1
return new > current
else:
# minimize ubc1
return new < current
def _select_best_child(self) -> IMctsNode:
"""
Returns the child with the *best* ucb1 score.
It chooses the child with maximum ucb1 for WHITE, and with minimum ucb1 for BLACK.
"""
def _select_child(self) -> IMctsNode:
# select child by modified UCB1
if self.board.is_game_over(): if self.board.is_game_over():
return self return self
best_child = self.random_state.choice(self.children) best_child = self.random_state.choice(self.children)
best_val = gaussian_ucb1(best_child.mu, best_child.sigma, self.visits) best_ucb1 = gaussian_ucb1(best_child.mu, best_child.sigma, self.visits)
for c in self.children: for child in self.children:
g = gaussian_ucb1(c.mu, c.sigma, self.visits) # if child has no visits, prioritize this child.
if child.visits == 0:
best_child = child
break
# save child if it has a *better* score, than our previous best child.
ucb1 = gaussian_ucb1(child.mu, child.sigma, self.visits)
if self._is_new_ucb1_better(best_ucb1, ucb1):
best_ucb1 = ucb1
best_child = child
if g > best_val:
best_val = g
best_child = c
return best_child return best_child
def select(self) -> IMctsNode: def select(self) -> IMctsNode:
if len(self.children) == 0: if len(self.children) == 0:
return self return self
else: else:
return self._select_child().select() return self._select_best_child().select()
def expand(self) -> IMctsNode: def expand(self) -> IMctsNode:
if self.visits == 0: if self.visits == 0:
@@ -56,11 +72,11 @@ class BayesianMctsNode(IMctsNode):
for move in self.legal_moves: for move in self.legal_moves:
self.children.append(self._create_child(move)) self.children.append(self._create_child(move))
return self._select_child() return self._select_best_child()
def rollout(self, rollout_depth: int = 20) -> int: def rollout(self, rollout_depth: int = 20) -> int:
copied_board = self.board.copy() copied_board = self.board.copy()
steps = 1 steps = self.depth
for i in range(rollout_depth): for i in range(rollout_depth):
if copied_board.is_game_over(): if copied_board.is_game_over():
break break
@@ -72,18 +88,21 @@ class BayesianMctsNode(IMctsNode):
copied_board.push(m) copied_board.push(m)
steps += 1 steps += 1
score = eval.score_manual(copied_board) // steps score = score_manual(copied_board) // steps
if score > 0: self.result = score
self.results[1] += 1
else:
self.results[0] += abs(score) // 50_000
return score return score
def _combine_gaussians(self, mu1: float, sigma1: float, mu2: float, sigma2: float) -> tuple[float, float]:
if self.color == chess.WHITE:
return max_gaussian(mu1, sigma1, mu2, sigma2)
else:
return min_gaussian(mu1, sigma1, mu2, sigma2)
def backpropagate(self, score: int | None = None) -> None: def backpropagate(self, score: int | None = None) -> None:
self.visits += 1 self.visits += 1
if score is not None: if score is not None:
self.results.append(score) self.result = score
if len(self.children) == 0: if len(self.children) == 0:
# leaf node # leaf node
@@ -91,30 +110,32 @@ class BayesianMctsNode(IMctsNode):
else: else:
# interior node # interior node
shuffled_children = self.random_state.sample(self.children, len(self.children)) shuffled_children = self.random_state.sample(self.children, len(self.children))
max_mu = shuffled_children[0].mu mu = shuffled_children[0].mu
max_sigma = shuffled_children[0].sigma sigma = shuffled_children[0].sigma
for c in shuffled_children[1:]: for c in shuffled_children[1:]:
max_mu, max_sigma = max_gaussian(max_mu, max_sigma, c.mu, c.sigma) mu, sigma = self._combine_gaussians(mu, sigma, c.mu, c.sigma)
if max_sigma == 0: # if max_sigma == 0:
max_sigma = 0.001 # max_sigma = 0.001
self.mu = max_mu self.mu = mu
self.sigma = max_sigma self.sigma = sigma
if self.parent: if self.parent:
self.parent.backpropagate() self.parent.backpropagate()
def print(self, indent=0): def print(self, indent=0):
print("\t"*indent + f"visits={self.visits}, mu={self.mu}, sigma={self.sigma}") print("\t"*indent + f"move={self.move}, visits={self.visits}, mu={self.mu}, sigma={self.sigma}")
for c in self.children: for c in self.children:
c.print(indent+1) c.print(indent+1)
class BayesianMcts(IMcts): class BayesianMcts(IMcts):
def __init__(self, board: chess.Board, strategy: IStrategy, seed: int | None = None):
def __init__(self, board: chess.Board, strategy: IStrategy, color: chess.Color, seed: int | None = None):
super().__init__(board, strategy, seed) super().__init__(board, strategy, seed)
self.root = BayesianMctsNode(board, strategy, None, None, self.random_state) self.root = BayesianMctsNode(board, strategy, color,None, None, self.random_state)
self.root.visits += 1 self.root.visits += 1
self.color = color
def sample(self, runs: int = 1000) -> None: def sample(self, runs: int = 1000) -> None:
for i in range(runs): for i in range(runs):
@@ -122,10 +143,10 @@ class BayesianMcts(IMcts):
leaf_node = self.root.select().expand() leaf_node = self.root.select().expand()
_ = leaf_node.rollout() _ = leaf_node.rollout()
leaf_node.backpropagate() leaf_node.backpropagate()
#self.root.print()
def apply_move(self, move: chess.Move) -> None: def apply_move(self, move: chess.Move) -> None:
self.board.push(move) self.board.push(move)
self.color = not self.color
# if a child node contains the move, set this child as new root # if a child node contains the move, set this child as new root
for child in self.get_children(): for child in self.get_children():
@@ -135,11 +156,17 @@ class BayesianMcts(IMcts):
return return
# if no child node contains the move, initialize a new tree. # if no child node contains the move, initialize a new tree.
self.root = BayesianMctsNode(self.board, self.root.strategy, None, None, self.random_state) self.root = BayesianMctsNode(self.board, self.root.strategy, self.color, None, None, self.random_state)
def get_children(self) -> list[IMctsNode]: def get_children(self) -> list[IMctsNode]:
return self.root.children return self.root.children
def get_moves(self) -> Dict[chess.Move, int]:
res = {}
for c in self.root.children:
res[c.move] = c.mu
return res
def print(self): def print(self):
print("================================") print("================================")
self.root.print() self.root.print()

View File

@@ -140,14 +140,16 @@ def check_endgame(board: chess.Board) -> bool:
def score_manual(board: chess.Board) -> int: def score_manual(board: chess.Board) -> int:
""" """
Calculate the score of the given board regarding the given color Calculate the score of a given board.
Positive scores indicate an advantage for WHITE, negative scores indicate and advantage for BLACK.
The range of scores is from approx. -1.100.000 to 1.100.000
:param board: the chess board :param board: the chess board
:return: score metric :return: score
""" """
outcome = board.outcome() outcome = board.outcome()
if outcome is not None: if outcome is not None:
if outcome.termination == chess.Termination.CHECKMATE: if outcome.termination == chess.Termination.CHECKMATE:
return sys.maxsize if outcome.winner == chess.WHITE else -sys.maxsize return 1_100_000 if outcome.winner == chess.WHITE else -1_100_000
else: # draw else: # draw
return 0 return 0

View File

@@ -10,11 +10,12 @@ CDF: dict[float, float] = {}
lookup_count = 0 lookup_count = 0
def max_gaussian_numeric(mu1, sigma1, mu2, sigma2) -> (float, float): def get_lookup_count():
pass global lookup_count
return lookup_count
def max_gaussian(mu1, sigma1, mu2, sigma2) -> (float, float): def max_gaussian(mu1, sigma1, mu2, sigma2) -> tuple[float, float]:
global lookup_count global lookup_count
global F1 global F1
global F2 global F2
@@ -26,46 +27,69 @@ def max_gaussian(mu1, sigma1, mu2, sigma2) -> (float, float):
:param sigma1: sigma of the first Gaussian :param sigma1: sigma of the first Gaussian
:param mu2: mu of the second Gaussian :param mu2: mu of the second Gaussian
:param sigma2: sigma of the second Gaussian :param sigma2: sigma of the second Gaussian
:return: mu and sigma maximized
""" """
# we assume independence of the two gaussians # we assume independence of the two gaussians
#print(mu1, sigma1, mu2, sigma2)
normal = dist.Normal(0, 1)
sigma_m = math.sqrt(sigma1 ** 2 + sigma2 ** 2)
alpha = (mu1 - mu2) / sigma_m
if alpha in CDF:
cdf_alpha = CDF[alpha]
lookup_count += 1
else:
cdf_alpha = normal.cdf(torch.tensor(alpha)).item()
CDF[alpha] = cdf_alpha
pdf_alpha = exp(normal.log_prob(torch.tensor(alpha))).item()
if alpha in F1:
f1_alpha = F1[alpha]
lookup_count += 1
else:
f1_alpha = alpha * cdf_alpha + pdf_alpha
F1[alpha] = f1_alpha
if alpha in F2:
f2_alpha = F2[alpha]
lookup_count += 1
else:
f2_alpha = alpha ** 2 * cdf_alpha * (1 - cdf_alpha) + (
1 - 2 * cdf_alpha) * alpha * pdf_alpha - pdf_alpha ** 2
F2[alpha] = f2_alpha
mu = mu2 + sigma_m * f1_alpha
sigma = math.sqrt(sigma2 ** 2 + (sigma1 ** 2 - sigma2 ** 2) * cdf_alpha + sigma_m ** 2 * f2_alpha)
#sigma = math.sqrt((mu1**2 + sigma1**2) * cdf_alpha + (mu2**2 + sigma2**2) * (1 - cdf_alpha) + (mu1 + mu2) * sigma_m * pdf_alpha - mu**2)
return mu, sigma
def min_gaussian(mu1, sigma1, mu2, sigma2) -> tuple[float, float]:
"""
Returns the combined min gaussian of two Gaussians represented by mu1, sigma1, mu2, simga2
:param mu1: mu of the first Gaussian
:param sigma1: sigma of the first Gaussian
:param mu2: mu of the second Gaussian
:param sigma2: sigma of the second Gaussian
:return: mu and sigma minimized
"""
try: try:
#print(mu1, sigma1, mu2, sigma2)
normal = dist.Normal(0, 1) normal = dist.Normal(0, 1)
sigma_m = math.sqrt(sigma1 ** 2 + sigma2 ** 2) sigma_m = math.sqrt(sigma1 ** 2 + sigma2 ** 2)
alpha = (mu1 - mu2) / sigma_m alpha = (mu1 - mu2) / sigma_m
if alpha in CDF: cdf_alpha = normal.cdf(torch.tensor(alpha)).item()
cdf_alpha = CDF[alpha]
lookup_count += 1
else:
cdf_alpha = normal.cdf(torch.tensor(alpha)).item()
CDF[alpha] = cdf_alpha
pdf_alpha = exp(normal.log_prob(torch.tensor(alpha))).item() pdf_alpha = exp(normal.log_prob(torch.tensor(alpha))).item()
pdf_alpha_neg = exp(normal.log_prob(torch.tensor(-alpha))).item()
if alpha in F1: mu = mu1 * (1 - cdf_alpha) + mu2 * cdf_alpha - pdf_alpha_neg * sigma_m
f1_alpha = F1[alpha] sigma = math.sqrt((mu1**2 + sigma1**2) * (1 - cdf_alpha) + (mu2**2 + sigma2**2) * cdf_alpha - (mu1 + mu2) * sigma_m * pdf_alpha - mu**2)
lookup_count += 1
else:
f1_alpha = alpha * cdf_alpha + pdf_alpha
F1[alpha] = f1_alpha
if alpha in F2:
f2_alpha = F2[alpha]
lookup_count += 1
else:
f2_alpha = alpha ** 2 * cdf_alpha * (1 - cdf_alpha) + (
1 - 2 * cdf_alpha) * alpha * pdf_alpha - pdf_alpha ** 2
F2[alpha] = f2_alpha
mu = mu2 + sigma_m * f1_alpha
#sigma_old = sigma2 ** 2 + (sigma1 ** 2 - sigma2 ** 2) * cdf_alpha + sigma_m ** 2 * f2_alpha
sigma = math.sqrt((mu1**2 + sigma1**2) * cdf_alpha + (mu2**2 + sigma2**2) * (1 - cdf_alpha) + (mu1 + mu2) * sigma_m * pdf_alpha - mu**2)
return mu, sigma return mu, sigma
except ValueError: except ValueError:
print(mu1, sigma1, mu2, sigma2) print(mu1, sigma1, mu2, sigma2)
exit(1)
def beta_mean(alpha, beta): def beta_mean(alpha, beta):