Created
August 9, 2025 23:43
-
-
Save miminashi/2b89b4ef22be598454dc571ed980f1bb to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| ubuntu@mi25:~/llama.cpp (master) $ set -x | |
| ./build/bin/llama-perplexity --n-gpu-layers 100 --split-mode layer -m ~/.cache/llama.cpp/unsloth_gpt-oss-20b-GGUF_gpt-oss-20b-F16.gguf -f ~/polano.txt | |
| printf '\n\n' | |
| ./build/bin/llama-perplexity --n-gpu-layers 100 --split-mode layer -m ~/.cache/llama.cpp/unsloth_gpt-oss-20b-GGUF_gpt-oss-20b-UD-Q8_K_XL.gguf -f ~/polano.txt | |
| set +x | |
| ++ parse_git_branch | |
| ++ git branch --no-color | |
| ++ sed -e '/^[^*]/d' -e 's/* \(.*\)/ (\1)/' | |
| + ./build/bin/llama-perplexity --n-gpu-layers 100 --split-mode layer -m /home/ubuntu/.cache/llama.cpp/unsloth_gpt-oss-20b-GGUF_gpt-oss-20b-F16.gguf -f /home/ubuntu/polano.txt | |
| ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no | |
| ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no | |
| ggml_cuda_init: found 4 ROCm devices: | |
| Device 0: Radeon Instinct MI25, gfx900:xnack- (0x900), VMM: no, Wave Size: 64 | |
| Device 1: Radeon Instinct MI25, gfx900:xnack- (0x900), VMM: no, Wave Size: 64 | |
| Device 2: Radeon Instinct MI25, gfx900:xnack- (0x900), VMM: no, Wave Size: 64 | |
| Device 3: Radeon Instinct MI25, gfx900:xnack- (0x900), VMM: no, Wave Size: 64 | |
| build: 6112 (99acbc99) with cc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 for x86_64-linux-gnu | |
| llama_model_load_from_file_impl: using device ROCm0 (Radeon Instinct MI25) - 16352 MiB free | |
| llama_model_load_from_file_impl: using device ROCm1 (Radeon Instinct MI25) - 16352 MiB free | |
| llama_model_load_from_file_impl: using device ROCm2 (Radeon Instinct MI25) - 16352 MiB free | |
| llama_model_load_from_file_impl: using device ROCm3 (Radeon Instinct MI25) - 16352 MiB free | |
| llama_model_loader: loaded meta data with 37 key-value pairs and 459 tensors from /home/ubuntu/.cache/llama.cpp/unsloth_gpt-oss-20b-GGUF_gpt-oss-20b-F16.gguf (version GGUF V3 (latest)) | |
| llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output. | |
| llama_model_loader: - kv 0: general.architecture str = gpt-oss | |
| llama_model_loader: - kv 1: general.type str = model | |
| llama_model_loader: - kv 2: general.name str = Gpt-Oss-20B | |
| llama_model_loader: - kv 3: general.basename str = Gpt-Oss-20B | |
| llama_model_loader: - kv 4: general.quantized_by str = Unsloth | |
| llama_model_loader: - kv 5: general.size_label str = 20B | |
| llama_model_loader: - kv 6: general.license str = apache-2.0 | |
| llama_model_loader: - kv 7: general.repo_url str = https://huggingface.co/unsloth | |
| llama_model_loader: - kv 8: general.tags arr[str,2] = ["vllm", "text-generation"] | |
| llama_model_loader: - kv 9: gpt-oss.block_count u32 = 24 | |
| llama_model_loader: - kv 10: gpt-oss.context_length u32 = 131072 | |
| llama_model_loader: - kv 11: gpt-oss.embedding_length u32 = 2880 | |
| llama_model_loader: - kv 12: gpt-oss.feed_forward_length u32 = 2880 | |
| llama_model_loader: - kv 13: gpt-oss.attention.head_count u32 = 64 | |
| llama_model_loader: - kv 14: gpt-oss.attention.head_count_kv u32 = 8 | |
| llama_model_loader: - kv 15: gpt-oss.rope.freq_base f32 = 150000.000000 | |
| llama_model_loader: - kv 16: gpt-oss.attention.layer_norm_rms_epsilon f32 = 0.000010 | |
| llama_model_loader: - kv 17: gpt-oss.expert_count u32 = 32 | |
| llama_model_loader: - kv 18: gpt-oss.expert_used_count u32 = 4 | |
| llama_model_loader: - kv 19: gpt-oss.attention.key_length u32 = 64 | |
| llama_model_loader: - kv 20: gpt-oss.attention.value_length u32 = 64 | |
| llama_model_loader: - kv 21: general.file_type u32 = 1 | |
| llama_model_loader: - kv 22: gpt-oss.attention.sliding_window u32 = 128 | |
| llama_model_loader: - kv 23: gpt-oss.expert_feed_forward_length u32 = 2880 | |
| llama_model_loader: - kv 24: gpt-oss.rope.scaling.type str = yarn | |
| llama_model_loader: - kv 25: gpt-oss.rope.scaling.factor f32 = 32.000000 | |
| llama_model_loader: - kv 26: gpt-oss.rope.scaling.original_context_length u32 = 4096 | |
| llama_model_loader: - kv 27: general.quantization_version u32 = 2 | |
| llama_model_loader: - kv 28: tokenizer.ggml.model str = gpt2 | |
| llama_model_loader: - kv 29: tokenizer.ggml.pre str = gpt-4o | |
| llama_model_loader: - kv 30: tokenizer.ggml.tokens arr[str,201088] = ["!", "\"", "#", "$", "%", "&", "'", ... | |
| llama_model_loader: - kv 31: tokenizer.ggml.token_type arr[i32,201088] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ... | |
| llama_model_loader: - kv 32: tokenizer.ggml.merges arr[str,446189] = ["Ġ Ġ", "Ġ ĠĠĠ", "ĠĠ ĠĠ", "... | |
| llama_model_loader: - kv 33: tokenizer.ggml.bos_token_id u32 = 199998 | |
| llama_model_loader: - kv 34: tokenizer.ggml.eos_token_id u32 = 200002 | |
| llama_model_loader: - kv 35: tokenizer.ggml.padding_token_id u32 = 200017 | |
| llama_model_loader: - kv 36: tokenizer.chat_template str = {# Copyright 2025-present Unsloth. Ap... | |
| llama_model_loader: - type f32: 289 tensors | |
| llama_model_loader: - type f16: 98 tensors | |
| llama_model_loader: - type mxfp4: 72 tensors | |
| print_info: file format = GGUF V3 (latest) | |
| print_info: file type = F16 | |
| print_info: file size = 12.83 GiB (5.27 BPW) | |
| load: printing all EOG tokens: | |
| load: - 199999 ('<|endoftext|>') | |
| load: - 200002 ('<|return|>') | |
| load: - 200007 ('<|end|>') | |
| load: - 200012 ('<|call|>') | |
| load: special_eog_ids contains both '<|return|>' and '<|call|>' tokens, removing '<|end|>' token from EOG list | |
| load: special tokens cache size = 21 | |
| load: token to piece cache size = 1.3332 MB | |
| print_info: arch = gpt-oss | |
| print_info: vocab_only = 0 | |
| print_info: n_ctx_train = 131072 | |
| print_info: n_embd = 2880 | |
| print_info: n_layer = 24 | |
| print_info: n_head = 64 | |
| print_info: n_head_kv = 8 | |
| print_info: n_rot = 64 | |
| print_info: n_swa = 128 | |
| print_info: is_swa_any = 1 | |
| print_info: n_embd_head_k = 64 | |
| print_info: n_embd_head_v = 64 | |
| print_info: n_gqa = 8 | |
| print_info: n_embd_k_gqa = 512 | |
| print_info: n_embd_v_gqa = 512 | |
| print_info: f_norm_eps = 0.0e+00 | |
| print_info: f_norm_rms_eps = 1.0e-05 | |
| print_info: f_clamp_kqv = 0.0e+00 | |
| print_info: f_max_alibi_bias = 0.0e+00 | |
| print_info: f_logit_scale = 0.0e+00 | |
| print_info: f_attn_scale = 0.0e+00 | |
| print_info: n_ff = 2880 | |
| print_info: n_expert = 32 | |
| print_info: n_expert_used = 4 | |
| print_info: causal attn = 1 | |
| print_info: pooling type = 0 | |
| print_info: rope type = 2 | |
| print_info: rope scaling = yarn | |
| print_info: freq_base_train = 150000.0 | |
| print_info: freq_scale_train = 0.03125 | |
| print_info: n_ctx_orig_yarn = 4096 | |
| print_info: rope_finetuned = unknown | |
| print_info: model type = ?B | |
| print_info: model params = 20.91 B | |
| print_info: general.name = Gpt-Oss-20B | |
| print_info: n_ff_exp = 2880 | |
| print_info: vocab type = BPE | |
| print_info: n_vocab = 201088 | |
| print_info: n_merges = 446189 | |
| print_info: BOS token = 199998 '<|startoftext|>' | |
| print_info: EOS token = 200002 '<|return|>' | |
| print_info: EOT token = 199999 '<|endoftext|>' | |
| print_info: PAD token = 200017 '<|reserved_200017|>' | |
| print_info: LF token = 198 'Ċ' | |
| print_info: EOG token = 199999 '<|endoftext|>' | |
| print_info: EOG token = 200002 '<|return|>' | |
| print_info: EOG token = 200012 '<|call|>' | |
| print_info: max token length = 256 | |
| load_tensors: loading model tensors, this can take a while... (mmap = true) | |
| load_tensors: offloading 24 repeating layers to GPU | |
| load_tensors: offloading output layer to GPU | |
| load_tensors: offloaded 25/25 layers to GPU | |
| load_tensors: ROCm0 model buffer size = 3188.52 MiB | |
| load_tensors: ROCm1 model buffer size = 2733.01 MiB | |
| load_tensors: ROCm2 model buffer size = 2733.01 MiB | |
| load_tensors: ROCm3 model buffer size = 3382.13 MiB | |
| load_tensors: CPU_Mapped model buffer size = 1104.61 MiB | |
| ................................................................................... | |
| llama_context: constructing llama_context | |
| llama_context: n_seq_max = 4 | |
| llama_context: n_ctx = 2048 | |
| llama_context: n_ctx_per_seq = 512 | |
| llama_context: n_batch = 2048 | |
| llama_context: n_ubatch = 512 | |
| llama_context: causal_attn = 1 | |
| llama_context: flash_attn = 0 | |
| llama_context: kv_unified = false | |
| llama_context: freq_base = 150000.0 | |
| llama_context: freq_scale = 0.03125 | |
| llama_context: n_ctx_per_seq (512) < n_ctx_train (131072) -- the full capacity of the model will not be utilized | |
| llama_context: requested n_seq_max (4) > 1, but swa_full is not enabled -- performance may be degraded: https://github.com/ggml-org/llama.cpp/pull/13845#issuecomment-2924800573 | |
| llama_context: ROCm_Host output buffer size = 3.07 MiB | |
| llama_kv_cache_unified_iswa: creating non-SWA KV cache, size = 512 cells | |
| llama_kv_cache_unified: ROCm0 KV buffer size = 12.00 MiB | |
| llama_kv_cache_unified: ROCm1 KV buffer size = 12.00 MiB | |
| llama_kv_cache_unified: ROCm2 KV buffer size = 12.00 MiB | |
| llama_kv_cache_unified: ROCm3 KV buffer size = 12.00 MiB | |
| llama_kv_cache_unified: size = 48.00 MiB ( 512 cells, 12 layers, 4/4 seqs), K (f16): 24.00 MiB, V (f16): 24.00 MiB | |
| llama_kv_cache_unified_iswa: creating SWA KV cache, size = 512 cells | |
| llama_kv_cache_unified: ROCm0 KV buffer size = 16.00 MiB | |
| llama_kv_cache_unified: ROCm1 KV buffer size = 12.00 MiB | |
| llama_kv_cache_unified: ROCm2 KV buffer size = 12.00 MiB | |
| llama_kv_cache_unified: ROCm3 KV buffer size = 8.00 MiB | |
| llama_kv_cache_unified: size = 48.00 MiB ( 512 cells, 12 layers, 4/4 seqs), K (f16): 24.00 MiB, V (f16): 24.00 MiB | |
| llama_context: pipeline parallelism enabled (n_copies=4) | |
| llama_context: ROCm0 compute buffer size = 137.79 MiB | |
| llama_context: ROCm1 compute buffer size = 137.79 MiB | |
| llama_context: ROCm2 compute buffer size = 137.79 MiB | |
| llama_context: ROCm3 compute buffer size = 444.92 MiB | |
| llama_context: ROCm_Host compute buffer size = 29.68 MiB | |
| llama_context: graph nodes = 1446 | |
| llama_context: graph splits = 5 | |
| common_init_from_params: added <|endoftext|> logit bias = -inf | |
| common_init_from_params: added <|return|> logit bias = -inf | |
| common_init_from_params: added <|call|> logit bias = -inf | |
| common_init_from_params: setting dry_penalty_last_n to ctx_size = 2048 | |
| common_init_from_params: warming up the model with an empty run - please wait ... (--no-warmup to disable) | |
| system_info: n_threads = 12 (n_threads_batch = 12) / 24 | ROCm : NO_VMM = 1 | PEER_MAX_BATCH_SIZE = 128 | CPU : SSE3 = 1 | SSSE3 = 1 | AVX = 1 | AVX2 = 1 | F16C = 1 | FMA = 1 | BMI2 = 1 | LLAMAFILE = 1 | OPENMP = 1 | REPACK = 1 | | |
| perplexity: tokenizing the input .. | |
| perplexity: tokenization took 113.776 ms | |
| perplexity: calculating perplexity over 70 chunks, n_ctx=512, batch_size=2048, n_seq=4 | |
| perplexity: 7.47 seconds per pass - ETA 2.17 minutes | |
| [1]112.9391,[2]120.3856,[3]99.8193,[4]92.5444,[5]88.6741,[6]89.6368,[7]91.0741,[8]92.2524,[9]89.6240,[10]90.4831,[11]89.4148,[12]89.9362,[13]88.6410,[14]89.3298,[15]89.1087,[16]89.3652,[17]91.5408,[18]90.9635,[19]91.4515,[20]89.9630,[21]89.1542,[22]89.6232,[23]91.0604,[24]90.9767,[25]90.7946,[26]89.9260,[27]89.9649,[28]91.2270,[29]91.6070,[30]91.0361,[31]91.3673,[32]91.8827,[33]92.4371,[34]92.4308,[35]91.4885,[36]91.1588,[37]90.6893,[38]90.8425,[39]90.3654,[40]90.2084,[41]89.7867,[42]90.2956,[43]90.6560,[44]89.9171,[45]89.7907,[46]90.0890,[47]90.1316,[48]90.6154,[49]90.3287,[50]90.4228,[51]90.8813,[52]90.1366,[53]90.2139,[54]90.3047,[55]90.1718,[56]89.5759,[57]90.1230,[58]90.6507,[59]90.6380,[60]90.6367,[61]90.8753,[62]90.7443,[63]90.7123,[64]90.5564,[65]90.3993,[66]90.4221,[67]90.0149,[68]90.3950,[69]90.6447,[70]90.9716, | |
| Final estimate: PPL = 90.9716 +/- 1.62124 | |
| llama_perf_context_print: load time = 6037.53 ms | |
| llama_perf_context_print: prompt eval time = 104525.96 ms / 35840 tokens ( 2.92 ms per token, 342.88 tokens per second) | |
| llama_perf_context_print: eval time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second) | |
| llama_perf_context_print: total time = 109684.47 ms / 35841 tokens | |
| llama_perf_context_print: graphs reused = 0 | |
| ++ parse_git_branch | |
| ++ git branch --no-color | |
| ++ sed -e '/^[^*]/d' -e 's/* \(.*\)/ (\1)/' | |
| + printf '\n\n' | |
| ++ parse_git_branch | |
| ++ git branch --no-color | |
| ++ sed -e '/^[^*]/d' -e 's/* \(.*\)/ (\1)/' | |
| + ./build/bin/llama-perplexity --n-gpu-layers 100 --split-mode layer -m /home/ubuntu/.cache/llama.cpp/unsloth_gpt-oss-20b-GGUF_gpt-oss-20b-UD-Q8_K_XL.gguf -f /home/ubuntu/polano.txt | |
| ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no | |
| ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no | |
| ggml_cuda_init: found 4 ROCm devices: | |
| Device 0: Radeon Instinct MI25, gfx900:xnack- (0x900), VMM: no, Wave Size: 64 | |
| Device 1: Radeon Instinct MI25, gfx900:xnack- (0x900), VMM: no, Wave Size: 64 | |
| Device 2: Radeon Instinct MI25, gfx900:xnack- (0x900), VMM: no, Wave Size: 64 | |
| Device 3: Radeon Instinct MI25, gfx900:xnack- (0x900), VMM: no, Wave Size: 64 | |
| build: 6112 (99acbc99) with cc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 for x86_64-linux-gnu | |
| llama_model_load_from_file_impl: using device ROCm0 (Radeon Instinct MI25) - 16352 MiB free | |
| llama_model_load_from_file_impl: using device ROCm1 (Radeon Instinct MI25) - 16352 MiB free | |
| llama_model_load_from_file_impl: using device ROCm2 (Radeon Instinct MI25) - 16352 MiB free | |
| llama_model_load_from_file_impl: using device ROCm3 (Radeon Instinct MI25) - 16352 MiB free | |
| llama_model_loader: loaded meta data with 37 key-value pairs and 459 tensors from /home/ubuntu/.cache/llama.cpp/unsloth_gpt-oss-20b-GGUF_gpt-oss-20b-UD-Q8_K_XL.gguf (version GGUF V3 (latest)) | |
| llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output. | |
| llama_model_loader: - kv 0: general.architecture str = gpt-oss | |
| llama_model_loader: - kv 1: general.type str = model | |
| llama_model_loader: - kv 2: general.name str = Gpt-Oss-20B | |
| llama_model_loader: - kv 3: general.basename str = Gpt-Oss-20B | |
| llama_model_loader: - kv 4: general.quantized_by str = Unsloth | |
| llama_model_loader: - kv 5: general.size_label str = 20B | |
| llama_model_loader: - kv 6: general.license str = apache-2.0 | |
| llama_model_loader: - kv 7: general.repo_url str = https://huggingface.co/unsloth | |
| llama_model_loader: - kv 8: general.tags arr[str,2] = ["vllm", "text-generation"] | |
| llama_model_loader: - kv 9: gpt-oss.block_count u32 = 24 | |
| llama_model_loader: - kv 10: gpt-oss.context_length u32 = 131072 | |
| llama_model_loader: - kv 11: gpt-oss.embedding_length u32 = 2880 | |
| llama_model_loader: - kv 12: gpt-oss.feed_forward_length u32 = 2880 | |
| llama_model_loader: - kv 13: gpt-oss.attention.head_count u32 = 64 | |
| llama_model_loader: - kv 14: gpt-oss.attention.head_count_kv u32 = 8 | |
| llama_model_loader: - kv 15: gpt-oss.rope.freq_base f32 = 150000.000000 | |
| llama_model_loader: - kv 16: gpt-oss.attention.layer_norm_rms_epsilon f32 = 0.000010 | |
| llama_model_loader: - kv 17: gpt-oss.expert_count u32 = 32 | |
| llama_model_loader: - kv 18: gpt-oss.expert_used_count u32 = 4 | |
| llama_model_loader: - kv 19: gpt-oss.attention.key_length u32 = 64 | |
| llama_model_loader: - kv 20: gpt-oss.attention.value_length u32 = 64 | |
| llama_model_loader: - kv 21: gpt-oss.attention.sliding_window u32 = 128 | |
| llama_model_loader: - kv 22: gpt-oss.expert_feed_forward_length u32 = 2880 | |
| llama_model_loader: - kv 23: gpt-oss.rope.scaling.type str = yarn | |
| llama_model_loader: - kv 24: gpt-oss.rope.scaling.factor f32 = 32.000000 | |
| llama_model_loader: - kv 25: gpt-oss.rope.scaling.original_context_length u32 = 4096 | |
| llama_model_loader: - kv 26: tokenizer.ggml.model str = gpt2 | |
| llama_model_loader: - kv 27: tokenizer.ggml.pre str = gpt-4o | |
| llama_model_loader: - kv 28: tokenizer.ggml.tokens arr[str,201088] = ["!", "\"", "#", "$", "%", "&", "'", ... | |
| llama_model_loader: - kv 29: tokenizer.ggml.token_type arr[i32,201088] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ... | |
| llama_model_loader: - kv 30: tokenizer.ggml.merges arr[str,446189] = ["Ġ Ġ", "Ġ ĠĠĠ", "ĠĠ ĠĠ", "... | |
| llama_model_loader: - kv 31: tokenizer.ggml.bos_token_id u32 = 199998 | |
| llama_model_loader: - kv 32: tokenizer.ggml.eos_token_id u32 = 200002 | |
| llama_model_loader: - kv 33: tokenizer.ggml.padding_token_id u32 = 200017 | |
| llama_model_loader: - kv 34: tokenizer.chat_template str = {# Chat template fixes by Unsloth #}\n... | |
| llama_model_loader: - kv 35: general.quantization_version u32 = 2 | |
| llama_model_loader: - kv 36: general.file_type u32 = 7 | |
| llama_model_loader: - type f32: 289 tensors | |
| llama_model_loader: - type f16: 2 tensors | |
| llama_model_loader: - type q8_0: 96 tensors | |
| llama_model_loader: - type mxfp4: 72 tensors | |
| print_info: file format = GGUF V3 (latest) | |
| print_info: file type = Q8_0 | |
| print_info: file size = 12.28 GiB (5.04 BPW) | |
| load: printing all EOG tokens: | |
| load: - 199999 ('<|endoftext|>') | |
| load: - 200002 ('<|return|>') | |
| load: - 200007 ('<|end|>') | |
| load: - 200012 ('<|call|>') | |
| load: special_eog_ids contains both '<|return|>' and '<|call|>' tokens, removing '<|end|>' token from EOG list | |
| load: special tokens cache size = 21 | |
| load: token to piece cache size = 1.3332 MB | |
| print_info: arch = gpt-oss | |
| print_info: vocab_only = 0 | |
| print_info: n_ctx_train = 131072 | |
| print_info: n_embd = 2880 | |
| print_info: n_layer = 24 | |
| print_info: n_head = 64 | |
| print_info: n_head_kv = 8 | |
| print_info: n_rot = 64 | |
| print_info: n_swa = 128 | |
| print_info: is_swa_any = 1 | |
| print_info: n_embd_head_k = 64 | |
| print_info: n_embd_head_v = 64 | |
| print_info: n_gqa = 8 | |
| print_info: n_embd_k_gqa = 512 | |
| print_info: n_embd_v_gqa = 512 | |
| print_info: f_norm_eps = 0.0e+00 | |
| print_info: f_norm_rms_eps = 1.0e-05 | |
| print_info: f_clamp_kqv = 0.0e+00 | |
| print_info: f_max_alibi_bias = 0.0e+00 | |
| print_info: f_logit_scale = 0.0e+00 | |
| print_info: f_attn_scale = 0.0e+00 | |
| print_info: n_ff = 2880 | |
| print_info: n_expert = 32 | |
| print_info: n_expert_used = 4 | |
| print_info: causal attn = 1 | |
| print_info: pooling type = 0 | |
| print_info: rope type = 2 | |
| print_info: rope scaling = yarn | |
| print_info: freq_base_train = 150000.0 | |
| print_info: freq_scale_train = 0.03125 | |
| print_info: n_ctx_orig_yarn = 4096 | |
| print_info: rope_finetuned = unknown | |
| print_info: model type = ?B | |
| print_info: model params = 20.91 B | |
| print_info: general.name = Gpt-Oss-20B | |
| print_info: n_ff_exp = 2880 | |
| print_info: vocab type = BPE | |
| print_info: n_vocab = 201088 | |
| print_info: n_merges = 446189 | |
| print_info: BOS token = 199998 '<|startoftext|>' | |
| print_info: EOS token = 200002 '<|return|>' | |
| print_info: EOT token = 199999 '<|endoftext|>' | |
| print_info: PAD token = 200017 '<|reserved_200017|>' | |
| print_info: LF token = 198 'Ċ' | |
| print_info: EOG token = 199999 '<|endoftext|>' | |
| print_info: EOG token = 200002 '<|return|>' | |
| print_info: EOG token = 200012 '<|call|>' | |
| print_info: max token length = 256 | |
| load_tensors: loading model tensors, this can take a while... (mmap = true) | |
| load_tensors: offloading 24 repeating layers to GPU | |
| load_tensors: offloading output layer to GPU | |
| load_tensors: offloaded 25/25 layers to GPU | |
| load_tensors: ROCm0 model buffer size = 3022.41 MiB | |
| load_tensors: ROCm1 model buffer size = 2590.64 MiB | |
| load_tensors: ROCm2 model buffer size = 2590.64 MiB | |
| load_tensors: ROCm3 model buffer size = 3263.48 MiB | |
| load_tensors: CPU_Mapped model buffer size = 1104.61 MiB | |
| ............................................................................... | |
| llama_context: constructing llama_context | |
| llama_context: n_seq_max = 4 | |
| llama_context: n_ctx = 2048 | |
| llama_context: n_ctx_per_seq = 512 | |
| llama_context: n_batch = 2048 | |
| llama_context: n_ubatch = 512 | |
| llama_context: causal_attn = 1 | |
| llama_context: flash_attn = 0 | |
| llama_context: kv_unified = false | |
| llama_context: freq_base = 150000.0 | |
| llama_context: freq_scale = 0.03125 | |
| llama_context: n_ctx_per_seq (512) < n_ctx_train (131072) -- the full capacity of the model will not be utilized | |
| llama_context: requested n_seq_max (4) > 1, but swa_full is not enabled -- performance may be degraded: https://github.com/ggml-org/llama.cpp/pull/13845#issuecomment-2924800573 | |
| llama_context: ROCm_Host output buffer size = 3.07 MiB | |
| llama_kv_cache_unified_iswa: creating non-SWA KV cache, size = 512 cells | |
| llama_kv_cache_unified: ROCm0 KV buffer size = 12.00 MiB | |
| llama_kv_cache_unified: ROCm1 KV buffer size = 12.00 MiB | |
| llama_kv_cache_unified: ROCm2 KV buffer size = 12.00 MiB | |
| llama_kv_cache_unified: ROCm3 KV buffer size = 12.00 MiB | |
| llama_kv_cache_unified: size = 48.00 MiB ( 512 cells, 12 layers, 4/4 seqs), K (f16): 24.00 MiB, V (f16): 24.00 MiB | |
| llama_kv_cache_unified_iswa: creating SWA KV cache, size = 512 cells | |
| llama_kv_cache_unified: ROCm0 KV buffer size = 16.00 MiB | |
| llama_kv_cache_unified: ROCm1 KV buffer size = 12.00 MiB | |
| llama_kv_cache_unified: ROCm2 KV buffer size = 12.00 MiB | |
| llama_kv_cache_unified: ROCm3 KV buffer size = 8.00 MiB | |
| llama_kv_cache_unified: size = 48.00 MiB ( 512 cells, 12 layers, 4/4 seqs), K (f16): 24.00 MiB, V (f16): 24.00 MiB | |
| llama_context: pipeline parallelism enabled (n_copies=4) | |
| llama_context: ROCm0 compute buffer size = 137.79 MiB | |
| llama_context: ROCm1 compute buffer size = 137.79 MiB | |
| llama_context: ROCm2 compute buffer size = 137.79 MiB | |
| llama_context: ROCm3 compute buffer size = 444.92 MiB | |
| llama_context: ROCm_Host compute buffer size = 29.68 MiB | |
| llama_context: graph nodes = 1446 | |
| llama_context: graph splits = 5 | |
| common_init_from_params: added <|endoftext|> logit bias = -inf | |
| common_init_from_params: added <|return|> logit bias = -inf | |
| common_init_from_params: added <|call|> logit bias = -inf | |
| common_init_from_params: setting dry_penalty_last_n to ctx_size = 2048 | |
| common_init_from_params: warming up the model with an empty run - please wait ... (--no-warmup to disable) | |
| system_info: n_threads = 12 (n_threads_batch = 12) / 24 | ROCm : NO_VMM = 1 | PEER_MAX_BATCH_SIZE = 128 | CPU : SSE3 = 1 | SSSE3 = 1 | AVX = 1 | AVX2 = 1 | F16C = 1 | FMA = 1 | BMI2 = 1 | LLAMAFILE = 1 | OPENMP = 1 | REPACK = 1 | | |
| perplexity: tokenizing the input .. | |
| perplexity: tokenization took 113.672 ms | |
| perplexity: calculating perplexity over 70 chunks, n_ctx=512, batch_size=2048, n_seq=4 | |
| perplexity: 8.07 seconds per pass - ETA 2.35 minutes | |
| [1]115.0262,[2]122.7098,[3]100.3466,[4]93.4961,[5]89.6047,[6]90.0965,[7]91.8012,[8]92.3142,[9]89.6770,[10]90.7297,[11]89.6987,[12]90.3249,[13]89.0910,[14]89.6744,[15]89.5042,[16]90.0630,[17]92.1337,[18]91.7380,[19]92.3333,[20]90.6335,[21]89.8392,[22]90.2258,[23]91.5899,[24]91.4898,[25]91.1945,[26]90.3981,[27]90.5297,[28]91.5912,[29]91.9983,[30]91.4049,[31]91.8051,[32]92.0515,[33]92.5888,[34]92.5595,[35]91.5543,[36]91.2818,[37]90.8417,[38]90.9532,[39]90.4572,[40]90.3260,[41]89.9503,[42]90.4023,[43]90.8267,[44]90.1623,[45]90.0122,[46]90.2988,[47]90.3504,[48]90.8263,[49]90.6357,[50]90.6904,[51]91.1480,[52]90.4410,[53]90.5445,[54]90.6409,[55]90.5900,[56]90.0046,[57]90.4890,[58]90.9705,[59]90.9032,[60]90.8509,[61]91.1024,[62]90.9639,[63]90.9586,[64]90.7712,[65]90.6252,[66]90.6669,[67]90.2588,[68]90.6704,[69]90.8999,[70]91.1773, | |
| Final estimate: PPL = 91.1773 +/- 1.62520 | |
| llama_perf_context_print: load time = 5565.62 ms | |
| llama_perf_context_print: prompt eval time = 117138.47 ms / 35840 tokens ( 3.27 ms per token, 305.96 tokens per second) | |
| llama_perf_context_print: eval time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second) | |
| llama_perf_context_print: total time = 122369.80 ms / 35841 tokens | |
| llama_perf_context_print: graphs reused = 0 | |
| ++ parse_git_branch | |
| ++ git branch --no-color | |
| ++ sed -e '/^[^*]/d' -e 's/* \(.*\)/ (\1)/' | |
| + set +x |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment