Skip to content

llama-mtmd-cli: Sigint rework in mtmd vision example #13080

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Apr 23, 2025
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 24 additions & 7 deletions examples/llava/mtmd-cli.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,9 @@
#include <signal.h>
#endif

static bool g_is_generating = false;
// volatile, because of signal being an interrupt
static volatile bool g_is_generating = false;
static volatile bool g_is_interrupted = false;

/**
* Please note that this is NOT a production-ready stuff.
Expand All @@ -50,8 +52,10 @@ static void sigint_handler(int signo) {
g_is_generating = false;
} else {
console::cleanup();
LOG("\nInterrupted by user\n");
_exit(130);
if (g_is_interrupted) {
_exit(1);
}
g_is_interrupted = true;
}
}
}
Expand Down Expand Up @@ -167,7 +171,7 @@ struct decode_embd_batch {
static int generate_response(mtmd_cli_context & ctx, common_sampler * smpl, int n_predict) {
llama_tokens generated_tokens;
for (int i = 0; i < n_predict; i++) {
if (i > n_predict || !g_is_generating) {
if (i > n_predict || !g_is_generating || g_is_interrupted) {
printf("\n");
break;
}
Expand All @@ -184,6 +188,11 @@ static int generate_response(mtmd_cli_context & ctx, common_sampler * smpl, int
printf("%s", common_token_to_piece(ctx.lctx, token_id).c_str());
fflush(stdout);

if (g_is_interrupted) {
printf("\n");
break;
}

// eval the token
common_batch_clear(ctx.batch);
common_batch_add(ctx.batch, token_id, ctx.n_past++, {0}, true);
Expand Down Expand Up @@ -219,6 +228,9 @@ static int eval_message(mtmd_cli_context & ctx, common_chat_msg & msg, std::vect
text.add_special = add_bos;
text.parse_special = true;
mtmd_input_chunks chunks;

if (g_is_interrupted) return 0;

int32_t res = mtmd_tokenize(ctx.ctx_vision.get(), chunks, text, bitmaps);
if (res != 0) {
LOG_ERR("Unable to tokenize prompt, res = %d\n", res);
Expand Down Expand Up @@ -276,6 +288,8 @@ int main(int argc, char ** argv) {
#endif
}

if (g_is_interrupted) return 130;

if (is_single_turn) {
g_is_generating = true;
if (params.prompt.find("<__image__>") == std::string::npos) {
Expand All @@ -287,7 +301,7 @@ int main(int argc, char ** argv) {
if (eval_message(ctx, msg, params.image, true)) {
return 1;
}
if (generate_response(ctx, smpl, n_predict)) {
if (!g_is_interrupted && generate_response(ctx, smpl, n_predict)) {
return 1;
}

Expand All @@ -302,12 +316,13 @@ int main(int argc, char ** argv) {
std::vector<std::string> images_fname;
std::string content;

while (true) {
while (!g_is_interrupted) {
g_is_generating = false;
LOG("\n> ");
console::set_display(console::user_input);
std::string line;
console::readline(line, false);
if (g_is_interrupted) break;
console::set_display(console::reset);
line = string_strip(line);
if (line.empty()) {
Expand Down Expand Up @@ -335,6 +350,7 @@ int main(int argc, char ** argv) {
msg.role = "user";
msg.content = content;
int ret = eval_message(ctx, msg, images_fname, is_first_msg);
if (g_is_interrupted) break;
if (ret == 2) {
// non-fatal error
images_fname.clear();
Expand All @@ -352,6 +368,7 @@ int main(int argc, char ** argv) {
is_first_msg = false;
}
}
if (g_is_interrupted) LOG("\nInterrupted by user\n");
llama_perf_context_print(ctx.lctx);
return 0;
return g_is_interrupted ? 130 : 0;
}
Loading