diff --git a/src/gomoku.cpp b/src/gomoku.cpp index 4668285..a413f91 100644 --- a/src/gomoku.cpp +++ b/src/gomoku.cpp @@ -57,7 +57,7 @@ bool Gomoku::set_rule(unsigned int rule_flag) bool Gomoku::is_illegal(unsigned int x, unsigned int y) { - return x > this->n - 1 || y > this->n - 1 || this->board[x][y] != 0; + return (x > this->n - 1 || y > this->n - 1 || this->board[x][y] != 0); } std::vector Gomoku::get_legal_moves() diff --git a/src/onnx.cpp b/src/onnx.cpp index 552dddd..989aeff 100644 --- a/src/onnx.cpp +++ b/src/onnx.cpp @@ -200,7 +200,7 @@ NeuralNetwork::~NeuralNetwork() this->shared_session.reset(); } -std::future NeuralNetwork::commit(Gomoku *gomoku) +std::future NeuralNetwork::commit(const Gomoku *gomoku) { std::vector state = transorm_gomoku_to_Tensor(gomoku); @@ -282,7 +282,7 @@ std::vector NeuralNetwork::transorm_board_to_Tensor(const board_type &boa // return cat({ state0, state1, state2 }, 1); } -std::vector NeuralNetwork::transorm_gomoku_to_Tensor(Gomoku *gomoku) +std::vector NeuralNetwork::transorm_gomoku_to_Tensor(const Gomoku *gomoku) { return NeuralNetwork::transorm_board_to_Tensor(gomoku->get_board(), gomoku->get_last_move(), gomoku->get_current_color()); } diff --git a/src/onnx.h b/src/onnx.h index 1c5ebb2..7a1cbc4 100644 --- a/src/onnx.h +++ b/src/onnx.h @@ -52,9 +52,9 @@ class NeuralNetwork // void save_weights(std::string model_path); ~NeuralNetwork(); - std::future commit(Gomoku *gomoku); // commit task to queue + std::future commit(const Gomoku *gomoku); // commit task to queue // std::shared_ptr module; // torch module origin:private - static std::vector transorm_gomoku_to_Tensor(Gomoku *gomoku); + static std::vector transorm_gomoku_to_Tensor(const Gomoku *gomoku); static std::vector transorm_board_to_Tensor(const board_type &board, int last_move, int cur_player); bool set_batch_size(unsigned int u_batch_size); diff --git a/src/play.cpp b/src/play.cpp index 6d0186e..a752878 100644 --- a/src/play.cpp +++ b/src/play.cpp @@ -62,7 +62,7 @@ void SelfPlay::play(unsigned int saved_id) while (game_state.first == 0) { // std::cout << "game id: " << saved_id << std::endl; - double temp = step < EXPLORE_STEP ? 1.0 : 1e-3; + double temp = (step < EXPLORE_STEP) ? 1.0 : 1e-3; auto action_probs = mcts->get_action_probs(g.get(), temp); // auto action_probs = m->get_action_probs(g.get(), 1); // int best_action = m->get_best_action_from_prob(action_probs); diff --git a/src/train_and_eval/train_eval_net.cpp b/src/train_and_eval/train_eval_net.cpp index ca664c9..970c0e3 100644 --- a/src/train_and_eval/train_eval_net.cpp +++ b/src/train_and_eval/train_eval_net.cpp @@ -57,7 +57,7 @@ void play_for_eval(NeuralNetwork *a, NeuralNetwork *b, bool a_first, int *win_ta // std::cout << episode << " th game!!" << std::endl; while (game_state.first == 0) { - int res = (step + a_first) % 2 ? ma.get_best_action(g.get()) : mb.get_best_action(g.get()); + int res = ((step + a_first) % 2) ? ma.get_best_action(g.get()) : mb.get_best_action(g.get()); ma.update_with_move(res); mb.update_with_move(res); g->execute_move(res);