Skip to content

Commit 00d62ad

Browse files
cebtenzzrexaedes
andauthored
fix some warnings from gcc and clang-tidy (ggml-org#3038)
Co-authored-by: xaedes <[email protected]>
1 parent 4fa2cc1 commit 00d62ad

22 files changed

+63
-101
lines changed

.clang-tidy

+5
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ Checks: >
33
bugprone-*,
44
-bugprone-easily-swappable-parameters,
55
-bugprone-implicit-widening-of-multiplication-result,
6+
-bugprone-misplaced-widening-cast,
67
-bugprone-narrowing-conversions,
78
readability-*,
89
-readability-avoid-unconditional-preprocessor-if,
@@ -15,4 +16,8 @@ Checks: >
1516
-clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling,
1617
performance-*,
1718
portability-*,
19+
misc-*,
20+
-misc-const-correctness,
21+
-misc-non-private-member-variables-in-classes,
22+
-misc-no-recursion,
1823
FormatStyle: none

CMakeLists.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -426,7 +426,7 @@ if (LLAMA_ALL_WARNINGS)
426426
)
427427
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
428428
# g++ only
429-
set(cxx_flags ${cxx_flags} -Wno-format-truncation)
429+
set(cxx_flags ${cxx_flags} -Wno-format-truncation -Wno-array-bounds)
430430
endif()
431431
else()
432432
# todo : msvc

Makefile

+1-1
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,7 @@ MK_CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wno-m
134134

135135
ifeq '' '$(findstring clang++,$(CXX))'
136136
# g++ only
137-
MK_CXXFLAGS += -Wno-format-truncation
137+
MK_CXXFLAGS += -Wno-format-truncation -Wno-array-bounds
138138
endif
139139

140140
# OS specific

common/common.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ int32_t get_num_physical_cores() {
5757
siblings.insert(line);
5858
}
5959
}
60-
if (siblings.size() > 0) {
60+
if (!siblings.empty()) {
6161
return static_cast<int32_t>(siblings.size());
6262
}
6363
#elif defined(__APPLE__) && defined(__MACH__)

common/common.h

+3
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,9 @@
2020
#define DIRECTORY_SEPARATOR '/'
2121
#endif // _WIN32
2222

23+
#define die(msg) do { fputs("error: " msg "\n", stderr); exit(1); } while (0)
24+
#define die_fmt(fmt, ...) do { fprintf(stderr, "error: " fmt "\n", ##__VA_ARGS__); exit(1); } while (0)
25+
2326
//
2427
// CLI argument parsing
2528
//

common/grammar-parser.cpp

+1
Original file line numberDiff line numberDiff line change
@@ -415,6 +415,7 @@ namespace grammar_parser {
415415

416416
std::vector<const llama_grammar_element *> parse_state::c_rules() {
417417
std::vector<const llama_grammar_element *> ret;
418+
ret.reserve(rules.size());
418419
for (const auto & rule : rules) {
419420
ret.push_back(rule.data());
420421
}

examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
#include "ggml.h"
22
#include "llama.h"
3+
#include "common.h"
34

45
#include <unordered_map>
56
#include <vector>
@@ -499,10 +500,10 @@ struct llama_file {
499500
errno = 0;
500501
std::size_t ret = std::fread(ptr, size, 1, fp);
501502
if (ferror(fp)) {
502-
throw std::runtime_error(format("read error: %s", strerror(errno)));
503+
die_fmt("fread failed: %s", strerror(errno));
503504
}
504505
if (ret != 1) {
505-
throw std::runtime_error(std::string("unexpectedly reached end of file"));
506+
die("unexpectedly reached end of file");
506507
}
507508
}
508509

@@ -597,8 +598,7 @@ void load_vocab(const char *filename, Config *config, struct llama_vocab *vocab)
597598
printf("Assuming llama2.c vocabulary since %s is not a gguf file\n", filename);
598599
llama_file file(filename, "rb");
599600
if (!file.fp) {
600-
fprintf(stderr, "error: %s: %s\n", strerror(errno), filename);
601-
exit(1);
601+
die_fmt("%s: %s", strerror(errno), filename);
602602
}
603603
const int n_vocab = config->vocab_size;
604604
/* uint32_t max_token_length = */ file.read_u32(); // unused

examples/embd-input/embd-input-lib.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ extern "C" {
2323
struct MyModel* create_mymodel(int argc, char ** argv) {
2424
gpt_params params;
2525

26-
if (gpt_params_parse(argc, argv, params) == false) {
26+
if (!gpt_params_parse(argc, argv, params)) {
2727
return nullptr;
2828
}
2929

examples/embedding/embedding.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
int main(int argc, char ** argv) {
1212
gpt_params params;
1313

14-
if (gpt_params_parse(argc, argv, params) == false) {
14+
if (!gpt_params_parse(argc, argv, params)) {
1515
return 1;
1616
}
1717

examples/gptneox-wip/falcon-main.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -953,7 +953,7 @@ int main(int argc, char ** argv) {
953953

954954
gpt_params params;
955955

956-
if (gpt_params_parse(argc, argv, params) == false) {
956+
if (!gpt_params_parse(argc, argv, params)) {
957957
return 1;
958958
}
959959

examples/gptneox-wip/gptneox-main.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -925,7 +925,7 @@ int main(int argc, char ** argv) {
925925

926926
gpt_params params;
927927

928-
if (gpt_params_parse(argc, argv, params) == false) {
928+
if (!gpt_params_parse(argc, argv, params)) {
929929
return 1;
930930
}
931931

examples/main/main.cpp

+10-9
Original file line numberDiff line numberDiff line change
@@ -48,8 +48,9 @@ static bool is_interacting = false;
4848

4949
void write_logfile(
5050
const llama_context * ctx, const gpt_params & params, const llama_model * model,
51-
const std::vector<llama_token> input_tokens, const std::string output, const std::vector<llama_token> output_tokens) {
52-
51+
const std::vector<llama_token> & input_tokens, const std::string & output,
52+
const std::vector<llama_token> & output_tokens
53+
) {
5354
if (params.logdir.empty()) {
5455
return;
5556
}
@@ -109,7 +110,7 @@ int main(int argc, char ** argv) {
109110
gpt_params params;
110111
g_params = &params;
111112

112-
if (gpt_params_parse(argc, argv, params) == false) {
113+
if (!gpt_params_parse(argc, argv, params)) {
113114
return 1;
114115
}
115116

@@ -303,7 +304,7 @@ int main(int argc, char ** argv) {
303304

304305
// debug message about similarity of saved session, if applicable
305306
size_t n_matching_session_tokens = 0;
306-
if (session_tokens.size() > 0) {
307+
if (!session_tokens.empty()) {
307308
for (llama_token id : session_tokens) {
308309
if (n_matching_session_tokens >= embd_inp.size() || id != embd_inp[n_matching_session_tokens]) {
309310
break;
@@ -401,7 +402,7 @@ int main(int argc, char ** argv) {
401402

402403
LOG_TEE("%s: interactive mode on.\n", __func__);
403404

404-
if (params.antiprompt.size()) {
405+
if (!params.antiprompt.empty()) {
405406
for (const auto & antiprompt : params.antiprompt) {
406407
LOG_TEE("Reverse prompt: '%s'\n", antiprompt.c_str());
407408
}
@@ -499,7 +500,7 @@ int main(int argc, char ** argv) {
499500

500501
while ((n_remain != 0 && !is_antiprompt) || params.interactive) {
501502
// predict
502-
if (embd.size() > 0) {
503+
if (!embd.empty()) {
503504
// Note: n_ctx - 4 here is to match the logic for commandline prompt handling via
504505
// --prompt or --file which uses the same value.
505506
int max_embd_size = n_ctx - 4;
@@ -624,7 +625,7 @@ int main(int argc, char ** argv) {
624625
LOG("n_past = %d\n", n_past);
625626
}
626627

627-
if (embd.size() > 0 && !path_session.empty()) {
628+
if (!embd.empty() && !path_session.empty()) {
628629
session_tokens.insert(session_tokens.end(), embd.begin(), embd.end());
629630
n_session_consumed = session_tokens.size();
630631
}
@@ -695,7 +696,7 @@ int main(int argc, char ** argv) {
695696
// if not currently processing queued inputs;
696697
if ((int) embd_inp.size() <= n_consumed) {
697698
// check for reverse prompt
698-
if (params.antiprompt.size()) {
699+
if (!params.antiprompt.empty()) {
699700
std::string last_output;
700701
for (auto id : last_tokens) {
701702
last_output += llama_token_to_piece(ctx, id);
@@ -732,7 +733,7 @@ int main(int argc, char ** argv) {
732733
LOG("found EOS token\n");
733734

734735
if (params.interactive) {
735-
if (params.antiprompt.size() != 0) {
736+
if (!params.antiprompt.empty()) {
736737
// tokenize and inject first reverse prompt
737738
const auto first_antiprompt = ::llama_tokenize(ctx, params.antiprompt.front(), false);
738739
embd_inp.insert(embd_inp.end(), first_antiprompt.begin(), first_antiprompt.end());

examples/perplexity/perplexity.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -655,7 +655,7 @@ int main(int argc, char ** argv) {
655655
gpt_params params;
656656

657657
params.n_batch = 512;
658-
if (gpt_params_parse(argc, argv, params) == false) {
658+
if (!gpt_params_parse(argc, argv, params)) {
659659
return 1;
660660
}
661661

examples/quantize-stats/quantize-stats.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ void quantize_stats_print_usage(int /*argc*/, char ** argv) {
7171
}
7272

7373
// Check if a layer is included/excluded by command line
74-
bool layer_included(const quantize_stats_params params, const std::string & layer) {
74+
bool layer_included(const quantize_stats_params & params, const std::string & layer) {
7575
for (const auto& excluded : params.exclude_layers) {
7676
if (std::regex_search(layer, std::regex(excluded))) {
7777
return false;

examples/quantize/quantize.cpp

+3-4
Original file line numberDiff line numberDiff line change
@@ -143,10 +143,9 @@ int main(int argc, char ** argv) {
143143
if (!try_parse_ftype(argv[arg_idx], params.ftype, ftype_str)) {
144144
fprintf(stderr, "%s: invalid ftype '%s'\n", __func__, argv[3]);
145145
return 1;
146-
} else {
147-
if (ftype_str == "COPY") {
148-
params.only_copy = true;
149-
}
146+
}
147+
if (ftype_str == "COPY") {
148+
params.only_copy = true;
150149
}
151150
arg_idx++;
152151
}

examples/save-load-state/save-load-state.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ int main(int argc, char ** argv) {
1313
params.repeat_last_n = 64;
1414
params.prompt = "The quick brown fox";
1515

16-
if (gpt_params_parse(argc, argv, params) == false) {
16+
if (!gpt_params_parse(argc, argv, params)) {
1717
return 1;
1818
}
1919

@@ -44,7 +44,7 @@ int main(int argc, char ** argv) {
4444
llama_free_model(model);
4545
return 1;
4646
}
47-
auto tokens = llama_tokenize(ctx, params.prompt.c_str(), true);
47+
auto tokens = llama_tokenize(ctx, params.prompt, true);
4848
auto n_prompt_tokens = tokens.size();
4949
if (n_prompt_tokens < 1) {
5050
fprintf(stderr, "%s : failed to tokenize prompt\n", __func__);

examples/server/server.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@ static std::string tokens_to_output_formatted_string(const llama_context *ctx, c
139139
}
140140

141141
// convert a vector of completion_token_output to json
142-
static json probs_vector_to_json(const llama_context *ctx, const std::vector<completion_token_output> probs)
142+
static json probs_vector_to_json(const llama_context *ctx, const std::vector<completion_token_output> & probs)
143143
{
144144
json out = json::array();
145145
for (const auto &prob : probs)
@@ -271,7 +271,7 @@ struct llama_server_context
271271
return true;
272272
}
273273

274-
std::vector<llama_token> tokenize(json json_prompt, bool add_bos)
274+
std::vector<llama_token> tokenize(const json & json_prompt, bool add_bos) const
275275
{
276276
// If `add_bos` is true, we only add BOS, when json_prompt is a string,
277277
// or the first element of the json_prompt array is a string.
@@ -611,7 +611,7 @@ struct llama_server_context
611611

612612
completion_token_output doCompletion()
613613
{
614-
const completion_token_output token_with_probs = nextToken();
614+
auto token_with_probs = nextToken();
615615

616616
const std::string token_text = token_with_probs.tok == -1 ? "" : llama_token_to_piece(ctx, token_with_probs.tok);
617617
generated_text += token_text;
@@ -1255,7 +1255,7 @@ void beam_search_callback(void * callback_data, llama_beams_state beams_state) {
12551255
struct token_translator {
12561256
llama_context * ctx;
12571257
std::string operator()(llama_token tok) const { return llama_token_to_piece(ctx, tok); }
1258-
std::string operator()(completion_token_output cto) const { return (*this)(cto.tok); }
1258+
std::string operator()(const completion_token_output & cto) const { return (*this)(cto.tok); }
12591259
};
12601260

12611261
void append_to_generated_text_from_generated_token_probs(llama_server_context & llama) {

0 commit comments

Comments
 (0)