@@ -48,8 +48,9 @@ static bool is_interacting = false;
48
48
49
49
void write_logfile (
50
50
const llama_context * ctx, const gpt_params & params, const llama_model * model,
51
- const std::vector<llama_token> input_tokens, const std::string output, const std::vector<llama_token> output_tokens) {
52
-
51
+ const std::vector<llama_token> & input_tokens, const std::string & output,
52
+ const std::vector<llama_token> & output_tokens
53
+ ) {
53
54
if (params.logdir .empty ()) {
54
55
return ;
55
56
}
@@ -109,7 +110,7 @@ int main(int argc, char ** argv) {
109
110
gpt_params params;
110
111
g_params = ¶ms;
111
112
112
- if (gpt_params_parse (argc, argv, params) == false ) {
113
+ if (! gpt_params_parse (argc, argv, params)) {
113
114
return 1 ;
114
115
}
115
116
@@ -303,7 +304,7 @@ int main(int argc, char ** argv) {
303
304
304
305
// debug message about similarity of saved session, if applicable
305
306
size_t n_matching_session_tokens = 0 ;
306
- if (session_tokens.size () > 0 ) {
307
+ if (! session_tokens.empty () ) {
307
308
for (llama_token id : session_tokens) {
308
309
if (n_matching_session_tokens >= embd_inp.size () || id != embd_inp[n_matching_session_tokens]) {
309
310
break ;
@@ -401,7 +402,7 @@ int main(int argc, char ** argv) {
401
402
402
403
LOG_TEE (" %s: interactive mode on.\n " , __func__);
403
404
404
- if (params.antiprompt .size ()) {
405
+ if (! params.antiprompt .empty ()) {
405
406
for (const auto & antiprompt : params.antiprompt ) {
406
407
LOG_TEE (" Reverse prompt: '%s'\n " , antiprompt.c_str ());
407
408
}
@@ -499,7 +500,7 @@ int main(int argc, char ** argv) {
499
500
500
501
while ((n_remain != 0 && !is_antiprompt) || params.interactive ) {
501
502
// predict
502
- if (embd.size () > 0 ) {
503
+ if (! embd.empty () ) {
503
504
// Note: n_ctx - 4 here is to match the logic for commandline prompt handling via
504
505
// --prompt or --file which uses the same value.
505
506
int max_embd_size = n_ctx - 4 ;
@@ -624,7 +625,7 @@ int main(int argc, char ** argv) {
624
625
LOG (" n_past = %d\n " , n_past);
625
626
}
626
627
627
- if (embd.size () > 0 && !path_session.empty ()) {
628
+ if (! embd.empty () && !path_session.empty ()) {
628
629
session_tokens.insert (session_tokens.end (), embd.begin (), embd.end ());
629
630
n_session_consumed = session_tokens.size ();
630
631
}
@@ -695,7 +696,7 @@ int main(int argc, char ** argv) {
695
696
// if not currently processing queued inputs;
696
697
if ((int ) embd_inp.size () <= n_consumed) {
697
698
// check for reverse prompt
698
- if (params.antiprompt .size ()) {
699
+ if (! params.antiprompt .empty ()) {
699
700
std::string last_output;
700
701
for (auto id : last_tokens) {
701
702
last_output += llama_token_to_piece (ctx, id);
@@ -732,7 +733,7 @@ int main(int argc, char ** argv) {
732
733
LOG (" found EOS token\n " );
733
734
734
735
if (params.interactive ) {
735
- if (params.antiprompt .size () != 0 ) {
736
+ if (! params.antiprompt .empty () ) {
736
737
// tokenize and inject first reverse prompt
737
738
const auto first_antiprompt = ::llama_tokenize (ctx, params.antiprompt .front (), false );
738
739
embd_inp.insert (embd_inp.end (), first_antiprompt.begin (), first_antiprompt.end ());
0 commit comments