340 lines
20 KiB
Plaintext
340 lines
20 KiB
Plaintext
ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no
|
|
ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no
|
|
ggml_cuda_init: found 1 CUDA devices:
|
|
Device 0: NVIDIA GeForce RTX 4060 Ti, compute capability 8.9, VMM: yes
|
|
build: 6912 (961660b8c) with cc (Ubuntu 11.4.0-1ubuntu1~22.04.2) 11.4.0 for x86_64-linux-gnu
|
|
system info: n_threads = 6, n_threads_batch = 6, total_threads = 16
|
|
|
|
system_info: n_threads = 6 (n_threads_batch = 6) / 16 | CUDA : ARCHS = 890 | USE_GRAPHS = 1 | PEER_MAX_BATCH_SIZE = 128 | CPU : SSE3 = 1 | SSSE3 = 1 | AVX = 1 | AVX_VNNI = 1 | AVX2 = 1 | F16C = 1 | FMA = 1 | BMI2 = 1 | LLAMAFILE = 1 | OPENMP = 1 | REPACK = 1 |
|
|
|
|
main: binding port with default address family
|
|
main: HTTP server is listening, hostname: 0.0.0.0, port: 8081, http threads: 15
|
|
main: loading model
|
|
srv load_model: loading model '/home/huangfukk/models/gguf/Qwen3/Qwen3-4B/Qwen3-4B-Q5_K_M.gguf'
|
|
llama_model_load_from_file_impl: using device CUDA0 (NVIDIA GeForce RTX 4060 Ti) (0000:01:00.0) - 13681 MiB free
|
|
llama_model_loader: loaded meta data with 28 key-value pairs and 398 tensors from /home/huangfukk/models/gguf/Qwen3/Qwen3-4B/Qwen3-4B-Q5_K_M.gguf (version GGUF V3 (latest))
|
|
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
|
|
llama_model_loader: - kv 0: general.architecture str = qwen3
|
|
llama_model_loader: - kv 1: general.type str = model
|
|
llama_model_loader: - kv 2: general.name str = Qwen3 4B Instruct
|
|
llama_model_loader: - kv 3: general.finetune str = Instruct
|
|
llama_model_loader: - kv 4: general.basename str = Qwen3
|
|
llama_model_loader: - kv 5: general.size_label str = 4B
|
|
llama_model_loader: - kv 6: qwen3.block_count u32 = 36
|
|
llama_model_loader: - kv 7: qwen3.context_length u32 = 40960
|
|
llama_model_loader: - kv 8: qwen3.embedding_length u32 = 2560
|
|
llama_model_loader: - kv 9: qwen3.feed_forward_length u32 = 9728
|
|
llama_model_loader: - kv 10: qwen3.attention.head_count u32 = 32
|
|
llama_model_loader: - kv 11: qwen3.attention.head_count_kv u32 = 8
|
|
llama_model_loader: - kv 12: qwen3.rope.freq_base f32 = 1000000.000000
|
|
llama_model_loader: - kv 13: qwen3.attention.layer_norm_rms_epsilon f32 = 0.000001
|
|
llama_model_loader: - kv 14: qwen3.attention.key_length u32 = 128
|
|
llama_model_loader: - kv 15: qwen3.attention.value_length u32 = 128
|
|
llama_model_loader: - kv 16: tokenizer.ggml.model str = gpt2
|
|
llama_model_loader: - kv 17: tokenizer.ggml.pre str = qwen2
|
|
llama_model_loader: - kv 18: tokenizer.ggml.tokens arr[str,151936] = ["!", "\"", "#", "$", "%", "&", "'", ...
|
|
llama_model_loader: - kv 19: tokenizer.ggml.token_type arr[i32,151936] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...
|
|
llama_model_loader: - kv 20: tokenizer.ggml.merges arr[str,151387] = ["Ġ Ġ", "ĠĠ ĠĠ", "i n", "Ġ t",...
|
|
llama_model_loader: - kv 21: tokenizer.ggml.eos_token_id u32 = 151645
|
|
llama_model_loader: - kv 22: tokenizer.ggml.padding_token_id u32 = 151643
|
|
llama_model_loader: - kv 23: tokenizer.ggml.bos_token_id u32 = 151643
|
|
llama_model_loader: - kv 24: tokenizer.ggml.add_bos_token bool = false
|
|
llama_model_loader: - kv 25: tokenizer.chat_template str = {%- if tools %}\n {{- '<|im_start|>...
|
|
llama_model_loader: - kv 26: general.quantization_version u32 = 2
|
|
llama_model_loader: - kv 27: general.file_type u32 = 17
|
|
llama_model_loader: - type f32: 145 tensors
|
|
llama_model_loader: - type q5_K: 216 tensors
|
|
llama_model_loader: - type q6_K: 37 tensors
|
|
print_info: file format = GGUF V3 (latest)
|
|
print_info: file type = Q5_K - Medium
|
|
print_info: file size = 2.69 GiB (5.73 BPW)
|
|
load: printing all EOG tokens:
|
|
load: - 151643 ('<|endoftext|>')
|
|
load: - 151645 ('<|im_end|>')
|
|
load: - 151662 ('<|fim_pad|>')
|
|
load: - 151663 ('<|repo_name|>')
|
|
load: - 151664 ('<|file_sep|>')
|
|
load: special tokens cache size = 26
|
|
load: token to piece cache size = 0.9311 MB
|
|
print_info: arch = qwen3
|
|
print_info: vocab_only = 0
|
|
print_info: n_ctx_train = 40960
|
|
print_info: n_embd = 2560
|
|
print_info: n_layer = 36
|
|
print_info: n_head = 32
|
|
print_info: n_head_kv = 8
|
|
print_info: n_rot = 128
|
|
print_info: n_swa = 0
|
|
print_info: is_swa_any = 0
|
|
print_info: n_embd_head_k = 128
|
|
print_info: n_embd_head_v = 128
|
|
print_info: n_gqa = 4
|
|
print_info: n_embd_k_gqa = 1024
|
|
print_info: n_embd_v_gqa = 1024
|
|
print_info: f_norm_eps = 0.0e+00
|
|
print_info: f_norm_rms_eps = 1.0e-06
|
|
print_info: f_clamp_kqv = 0.0e+00
|
|
print_info: f_max_alibi_bias = 0.0e+00
|
|
print_info: f_logit_scale = 0.0e+00
|
|
print_info: f_attn_scale = 0.0e+00
|
|
print_info: n_ff = 9728
|
|
print_info: n_expert = 0
|
|
print_info: n_expert_used = 0
|
|
print_info: n_expert_groups = 0
|
|
print_info: n_group_used = 0
|
|
print_info: causal attn = 1
|
|
print_info: pooling type = -1
|
|
print_info: rope type = 2
|
|
print_info: rope scaling = linear
|
|
print_info: freq_base_train = 1000000.0
|
|
print_info: freq_scale_train = 1
|
|
print_info: n_ctx_orig_yarn = 40960
|
|
print_info: rope_finetuned = unknown
|
|
print_info: model type = 4B
|
|
print_info: model params = 4.02 B
|
|
print_info: general.name = Qwen3 4B Instruct
|
|
print_info: vocab type = BPE
|
|
print_info: n_vocab = 151936
|
|
print_info: n_merges = 151387
|
|
print_info: BOS token = 151643 '<|endoftext|>'
|
|
print_info: EOS token = 151645 '<|im_end|>'
|
|
print_info: EOT token = 151645 '<|im_end|>'
|
|
print_info: PAD token = 151643 '<|endoftext|>'
|
|
print_info: LF token = 198 'Ċ'
|
|
print_info: FIM PRE token = 151659 '<|fim_prefix|>'
|
|
print_info: FIM SUF token = 151661 '<|fim_suffix|>'
|
|
print_info: FIM MID token = 151660 '<|fim_middle|>'
|
|
print_info: FIM PAD token = 151662 '<|fim_pad|>'
|
|
print_info: FIM REP token = 151663 '<|repo_name|>'
|
|
print_info: FIM SEP token = 151664 '<|file_sep|>'
|
|
print_info: EOG token = 151643 '<|endoftext|>'
|
|
print_info: EOG token = 151645 '<|im_end|>'
|
|
print_info: EOG token = 151662 '<|fim_pad|>'
|
|
print_info: EOG token = 151663 '<|repo_name|>'
|
|
print_info: EOG token = 151664 '<|file_sep|>'
|
|
print_info: max token length = 256
|
|
load_tensors: loading model tensors, this can take a while... (mmap = true)
|
|
load_tensors: offloading 36 repeating layers to GPU
|
|
load_tensors: offloaded 36/37 layers to GPU
|
|
load_tensors: CPU_Mapped model buffer size = 304.29 MiB
|
|
load_tensors: CUDA0 model buffer size = 2445.68 MiB
|
|
..........................................................................................
|
|
llama_context: constructing llama_context
|
|
llama_context: n_seq_max = 1
|
|
llama_context: n_ctx = 8192
|
|
llama_context: n_ctx_per_seq = 8192
|
|
llama_context: n_batch = 2048
|
|
llama_context: n_ubatch = 512
|
|
llama_context: causal_attn = 1
|
|
llama_context: flash_attn = auto
|
|
llama_context: kv_unified = false
|
|
llama_context: freq_base = 1000000.0
|
|
llama_context: freq_scale = 1
|
|
llama_context: n_ctx_per_seq (8192) < n_ctx_train (40960) -- the full capacity of the model will not be utilized
|
|
llama_context: CPU output buffer size = 0.58 MiB
|
|
llama_kv_cache: CUDA0 KV buffer size = 1152.00 MiB
|
|
llama_kv_cache: size = 1152.00 MiB ( 8192 cells, 36 layers, 1/1 seqs), K (f16): 576.00 MiB, V (f16): 576.00 MiB
|
|
llama_context: Flash Attention was auto, set to enabled
|
|
llama_context: CUDA0 compute buffer size = 606.03 MiB
|
|
llama_context: CUDA_Host compute buffer size = 21.01 MiB
|
|
llama_context: graph nodes = 1267
|
|
llama_context: graph splits = 4 (with bs=512), 3 (with bs=1)
|
|
common_init_from_params: added <|endoftext|> logit bias = -inf
|
|
common_init_from_params: added <|im_end|> logit bias = -inf
|
|
common_init_from_params: added <|fim_pad|> logit bias = -inf
|
|
common_init_from_params: added <|repo_name|> logit bias = -inf
|
|
common_init_from_params: added <|file_sep|> logit bias = -inf
|
|
common_init_from_params: setting dry_penalty_last_n to ctx_size = 8192
|
|
common_init_from_params: warming up the model with an empty run - please wait ... (--no-warmup to disable)
|
|
srv init: initializing slots, n_slots = 1
|
|
slot init: id 0 | task -1 | new slot n_ctx_slot = 8192
|
|
srv init: prompt cache is enabled, size limit: 8192 MiB
|
|
srv init: use `--cache-ram 0` to disable the prompt cache
|
|
srv init: for more info see https://github.com/ggml-org/llama.cpp/pull/16391
|
|
srv init: thinking = 0
|
|
main: model loaded
|
|
main: chat template, chat_template: {%- if tools %}
|
|
{{- '<|im_start|>system\n' }}
|
|
{%- if messages[0].role == 'system' %}
|
|
{{- messages[0].content + '\n\n' }}
|
|
{%- endif %}
|
|
{{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
|
|
{%- for tool in tools %}
|
|
{{- "\n" }}
|
|
{{- tool | tojson }}
|
|
{%- endfor %}
|
|
{{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
|
|
{%- else %}
|
|
{%- if messages[0].role == 'system' %}
|
|
{{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
|
|
{%- endif %}
|
|
{%- endif %}
|
|
{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
|
|
{%- for index in range(ns.last_query_index, -1, -1) %}
|
|
{%- set message = messages[index] %}
|
|
{%- if ns.multi_step_tool and message.role == "user" and not('<tool_response>' in message.content and '</tool_response>' in message.content) %}
|
|
{%- set ns.multi_step_tool = false %}
|
|
{%- set ns.last_query_index = index %}
|
|
{%- endif %}
|
|
{%- endfor %}
|
|
{%- for message in messages %}
|
|
{%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
|
|
{{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
|
|
{%- elif message.role == "assistant" %}
|
|
{%- set content = message.content %}
|
|
{%- set reasoning_content = '' %}
|
|
{%- if message.reasoning_content is defined and message.reasoning_content is not none %}
|
|
{%- set reasoning_content = message.reasoning_content %}
|
|
{%- else %}
|
|
{%- if '</think>' in message.content %}
|
|
{%- set content = message.content.split('</think>')[-1].lstrip('\n') %}
|
|
{%- set reasoning_content = message.content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
|
|
{%- endif %}
|
|
{%- endif %}
|
|
{%- if loop.index0 > ns.last_query_index %}
|
|
{%- if loop.last or (not loop.last and reasoning_content) %}
|
|
{{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
|
|
{%- else %}
|
|
{{- '<|im_start|>' + message.role + '\n' + content }}
|
|
{%- endif %}
|
|
{%- else %}
|
|
{{- '<|im_start|>' + message.role + '\n' + content }}
|
|
{%- endif %}
|
|
{%- if message.tool_calls %}
|
|
{%- for tool_call in message.tool_calls %}
|
|
{%- if (loop.first and content) or (not loop.first) %}
|
|
{{- '\n' }}
|
|
{%- endif %}
|
|
{%- if tool_call.function %}
|
|
{%- set tool_call = tool_call.function %}
|
|
{%- endif %}
|
|
{{- '<tool_call>\n{"name": "' }}
|
|
{{- tool_call.name }}
|
|
{{- '", "arguments": ' }}
|
|
{%- if tool_call.arguments is string %}
|
|
{{- tool_call.arguments }}
|
|
{%- else %}
|
|
{{- tool_call.arguments | tojson }}
|
|
{%- endif %}
|
|
{{- '}\n</tool_call>' }}
|
|
{%- endfor %}
|
|
{%- endif %}
|
|
{{- '<|im_end|>\n' }}
|
|
{%- elif message.role == "tool" %}
|
|
{%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
|
|
{{- '<|im_start|>user' }}
|
|
{%- endif %}
|
|
{{- '\n<tool_response>\n' }}
|
|
{{- message.content }}
|
|
{{- '\n</tool_response>' }}
|
|
{%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
|
|
{{- '<|im_end|>\n' }}
|
|
{%- endif %}
|
|
{%- endif %}
|
|
{%- endfor %}
|
|
{%- if add_generation_prompt %}
|
|
{{- '<|im_start|>assistant\n' }}
|
|
{%- if enable_thinking is defined and enable_thinking is false %}
|
|
{{- '<think>\n\n</think>\n\n' }}
|
|
{%- endif %}
|
|
{%- endif %}, example_format: '<|im_start|>system
|
|
You are a helpful assistant<|im_end|>
|
|
<|im_start|>user
|
|
Hello<|im_end|>
|
|
<|im_start|>assistant
|
|
Hi there<|im_end|>
|
|
<|im_start|>user
|
|
How are you?<|im_end|>
|
|
<|im_start|>assistant
|
|
'
|
|
main: server is listening on http://0.0.0.0:8081 - starting the main loop
|
|
srv update_slots: all slots are idle
|
|
srv log_server_r: request: GET /health 127.0.0.1 200
|
|
srv params_from_: Chat format: Content-only
|
|
slot get_availabl: id 0 | task -1 | selected slot by LRU, t_last = -1
|
|
slot launch_slot_: id 0 | task 0 | processing task
|
|
slot update_slots: id 0 | task 0 | new prompt, n_ctx_slot = 8192, n_keep = 0, task.n_tokens = 375
|
|
slot update_slots: id 0 | task 0 | n_tokens = 0, memory_seq_rm [0, end)
|
|
slot update_slots: id 0 | task 0 | prompt processing progress, n_tokens = 375, batch.n_tokens = 375, progress = 1.000000
|
|
slot update_slots: id 0 | task 0 | prompt done, n_tokens = 375, batch.n_tokens = 375
|
|
slot print_timing: id 0 | task 0 |
|
|
prompt eval time = 111.12 ms / 375 tokens ( 0.30 ms per token, 3374.64 tokens per second)
|
|
eval time = 207.35 ms / 10 tokens ( 20.73 ms per token, 48.23 tokens per second)
|
|
total time = 318.47 ms / 385 tokens
|
|
slot release: id 0 | task 0 | stop processing: n_tokens = 384, truncated = 0
|
|
srv update_slots: all slots are idle
|
|
srv log_server_r: request: POST /v1/chat/completions 127.0.0.1 200
|
|
srv params_from_: Chat format: Content-only
|
|
slot get_availabl: id 0 | task -1 | selected slot by LRU, t_last = 237472165130
|
|
srv get_availabl: updating prompt cache
|
|
srv prompt_save: - saving prompt with length 384, total state size = 54.005 MiB
|
|
srv load: - looking for better prompt, base f_keep = 0.008, sim = 0.001
|
|
srv update: - cache state: 1 prompts, 54.005 MiB (limits: 8192.000 MiB, 8192 tokens, 58248 est)
|
|
srv update: - prompt 0x63c2bba5e140: 384 tokens, checkpoints: 0, 54.005 MiB
|
|
srv get_availabl: prompt cache update took 22.38 ms
|
|
slot launch_slot_: id 0 | task 11 | processing task
|
|
slot update_slots: id 0 | task 11 | new prompt, n_ctx_slot = 8192, n_keep = 0, task.n_tokens = 2554
|
|
slot update_slots: id 0 | task 11 | n_tokens = 3, memory_seq_rm [3, end)
|
|
slot update_slots: id 0 | task 11 | prompt processing progress, n_tokens = 2051, batch.n_tokens = 2048, progress = 0.803054
|
|
slot update_slots: id 0 | task 11 | n_tokens = 2051, memory_seq_rm [2051, end)
|
|
slot update_slots: id 0 | task 11 | prompt processing progress, n_tokens = 2554, batch.n_tokens = 503, progress = 1.000000
|
|
slot update_slots: id 0 | task 11 | prompt done, n_tokens = 2554, batch.n_tokens = 503
|
|
slot print_timing: id 0 | task 11 |
|
|
prompt eval time = 608.76 ms / 2551 tokens ( 0.24 ms per token, 4190.45 tokens per second)
|
|
eval time = 9337.00 ms / 409 tokens ( 22.83 ms per token, 43.80 tokens per second)
|
|
total time = 9945.76 ms / 2960 tokens
|
|
slot release: id 0 | task 11 | stop processing: n_tokens = 2962, truncated = 0
|
|
srv update_slots: all slots are idle
|
|
srv log_server_r: request: POST /v1/chat/completions 127.0.0.1 200
|
|
srv params_from_: Chat format: Content-only
|
|
slot get_availabl: id 0 | task -1 | selected slot by LRU, t_last = 237482215959
|
|
srv get_availabl: updating prompt cache
|
|
srv prompt_save: - saving prompt with length 2962, total state size = 416.566 MiB
|
|
srv load: - looking for better prompt, base f_keep = 0.001, sim = 0.008
|
|
srv load: - found better prompt with f_keep = 0.977, sim = 1.000
|
|
srv update: - cache state: 1 prompts, 416.566 MiB (limits: 8192.000 MiB, 8192 tokens, 58249 est)
|
|
srv update: - prompt 0x63c2bba9e270: 2962 tokens, checkpoints: 0, 416.566 MiB
|
|
srv get_availabl: prompt cache update took 162.15 ms
|
|
slot launch_slot_: id 0 | task 422 | processing task
|
|
slot update_slots: id 0 | task 422 | new prompt, n_ctx_slot = 8192, n_keep = 0, task.n_tokens = 375
|
|
slot update_slots: id 0 | task 422 | need to evaluate at least 1 token for each active slot (n_past = 375, task.n_tokens() = 375)
|
|
slot update_slots: id 0 | task 422 | n_past was set to 374
|
|
slot update_slots: id 0 | task 422 | n_tokens = 374, memory_seq_rm [374, end)
|
|
slot update_slots: id 0 | task 422 | prompt processing progress, n_tokens = 375, batch.n_tokens = 1, progress = 1.000000
|
|
slot update_slots: id 0 | task 422 | prompt done, n_tokens = 375, batch.n_tokens = 1
|
|
slot print_timing: id 0 | task 422 |
|
|
prompt eval time = 30.72 ms / 1 tokens ( 30.72 ms per token, 32.55 tokens per second)
|
|
eval time = 196.03 ms / 10 tokens ( 19.60 ms per token, 51.01 tokens per second)
|
|
total time = 226.75 ms / 11 tokens
|
|
slot release: id 0 | task 422 | stop processing: n_tokens = 384, truncated = 0
|
|
srv update_slots: all slots are idle
|
|
srv log_server_r: request: POST /v1/chat/completions 127.0.0.1 200
|
|
srv params_from_: Chat format: Content-only
|
|
slot get_availabl: id 0 | task -1 | selected slot by LRU, t_last = 237485582368
|
|
srv get_availabl: updating prompt cache
|
|
srv prompt_save: - saving prompt with length 384, total state size = 54.005 MiB
|
|
srv load: - looking for better prompt, base f_keep = 0.008, sim = 0.001
|
|
srv load: - found better prompt with f_keep = 0.862, sim = 1.000
|
|
srv update: - cache state: 1 prompts, 54.005 MiB (limits: 8192.000 MiB, 8192 tokens, 58248 est)
|
|
srv update: - prompt 0x63c2bf15ea60: 384 tokens, checkpoints: 0, 54.005 MiB
|
|
srv get_availabl: prompt cache update took 76.62 ms
|
|
slot launch_slot_: id 0 | task 433 | processing task
|
|
slot update_slots: id 0 | task 433 | new prompt, n_ctx_slot = 8192, n_keep = 0, task.n_tokens = 2554
|
|
slot update_slots: id 0 | task 433 | need to evaluate at least 1 token for each active slot (n_past = 2554, task.n_tokens() = 2554)
|
|
slot update_slots: id 0 | task 433 | n_past was set to 2553
|
|
slot update_slots: id 0 | task 433 | n_tokens = 2553, memory_seq_rm [2553, end)
|
|
slot update_slots: id 0 | task 433 | prompt processing progress, n_tokens = 2554, batch.n_tokens = 1, progress = 1.000000
|
|
slot update_slots: id 0 | task 433 | prompt done, n_tokens = 2554, batch.n_tokens = 1
|
|
slot print_timing: id 0 | task 433 |
|
|
prompt eval time = 37.62 ms / 1 tokens ( 37.62 ms per token, 26.58 tokens per second)
|
|
eval time = 9680.67 ms / 409 tokens ( 23.67 ms per token, 42.25 tokens per second)
|
|
total time = 9718.29 ms / 410 tokens
|
|
slot release: id 0 | task 433 | stop processing: n_tokens = 2962, truncated = 0
|
|
srv update_slots: all slots are idle
|
|
srv log_server_r: request: POST /v1/chat/completions 127.0.0.1 200
|
|
srv operator(): operator(): cleaning up before exit...
|
|
llama_memory_breakdown_print: | memory breakdown [MiB] | total free self model context compute unaccounted |
|
|
llama_memory_breakdown_print: | - CUDA0 (RTX 4060 Ti) | 15944 = 5959 + (4203 = 2445 + 1152 + 606) + 5781 |
|
|
llama_memory_breakdown_print: | - Host | 325 = 304 + 0 + 21 |
|
|
Received second interrupt, terminating immediately.
|