Skip to content

Commit eb9951d

Browse files
Merge branch 'ggml-org:master' into master
2 parents e45b997 + 80f19b4 commit eb9951d

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

70 files changed

+6648
-5386
lines changed

.github/workflows/build.yml

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1766,16 +1766,17 @@ jobs:
17661766
if: ${{ github.event_name != 'pull_request' || contains(github.event.pull_request.labels.*.name, 'Ascend NPU') }}
17671767
defaults:
17681768
run:
1769-
shell: bash -el {0}
1770-
runs-on: ubuntu-24.04-arm
1769+
shell: bash -el {0}
17711770
strategy:
17721771
matrix:
1772+
arch: [x86, aarch64]
17731773
cann:
17741774
- '8.1.RC1.alpha001-910b-openeuler22.03-py3.10'
17751775
device:
17761776
- 'ascend910b3'
17771777
build:
17781778
- 'Release'
1779+
runs-on: ${{ matrix.arch == 'aarch64' && 'ubuntu-24.04-arm' || 'ubuntu-24.04' }}
17791780
container: ascendai/cann:${{ matrix.cann }}
17801781
steps:
17811782
- name: Checkout

Makefile

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -780,10 +780,6 @@ ifdef GGML_HIP
780780

781781
MK_CPPFLAGS += -DGGML_USE_HIP -DGGML_USE_CUDA
782782

783-
ifdef GGML_HIP_UMA
784-
MK_CPPFLAGS += -DGGML_HIP_UMA
785-
endif # GGML_HIP_UMA
786-
787783
MK_LDFLAGS += -L$(ROCM_PATH)/lib -Wl,-rpath=$(ROCM_PATH)/lib
788784
MK_LDFLAGS += -L$(ROCM_PATH)/lib64 -Wl,-rpath=$(ROCM_PATH)/lib64
789785
MK_LDFLAGS += -lhipblas -lamdhip64 -lrocblas

convert_hf_to_gguf.py

Lines changed: 31 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4428,6 +4428,10 @@ def set_vocab(self):
44284428
self._set_vocab_gpt2()
44294429

44304430
def set_gguf_parameters(self):
4431+
4432+
# note: deepseek2 using MLA converts into MQA (ie: GQA with 1 group)
4433+
self.hparams["num_key_value_heads"] = 1
4434+
44314435
super().set_gguf_parameters()
44324436
hparams = self.hparams
44334437

@@ -4436,8 +4440,13 @@ def set_gguf_parameters(self):
44364440
if "q_lora_rank" in hparams and hparams["q_lora_rank"] is not None:
44374441
self.gguf_writer.add_q_lora_rank(hparams["q_lora_rank"])
44384442
self.gguf_writer.add_kv_lora_rank(hparams["kv_lora_rank"])
4439-
self.gguf_writer.add_key_length(hparams["qk_nope_head_dim"] + hparams["qk_rope_head_dim"])
4440-
self.gguf_writer.add_value_length(hparams["v_head_dim"])
4443+
4444+
# note: deepseek2 using MLA converts into MQA with larger heads, then decompresses to MHA
4445+
self.gguf_writer.add_key_length(hparams["kv_lora_rank"] + hparams["qk_rope_head_dim"])
4446+
self.gguf_writer.add_value_length(hparams["kv_lora_rank"])
4447+
self.gguf_writer.add_key_length_mla(hparams["qk_nope_head_dim"] + hparams["qk_rope_head_dim"])
4448+
self.gguf_writer.add_value_length_mla(hparams["v_head_dim"])
4449+
44414450
self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"])
44424451
self.gguf_writer.add_expert_count(hparams["n_routed_experts"])
44434452
self.gguf_writer.add_expert_shared_count(hparams["n_shared_experts"])
@@ -4506,6 +4515,26 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter
45064515
else:
45074516
return []
45084517

4518+
# note: MLA with the absorption optimization, needs these two split and k_b_proj transposed
4519+
if name.endswith("kv_b_proj.weight"):
4520+
name_kb = name.replace("kv_b_proj", "k_b_proj")
4521+
name_vb = name.replace("kv_b_proj", "v_b_proj")
4522+
4523+
n_head_kv = self.hparams["num_key_value_heads"]
4524+
v_head_dim = self.hparams["v_head_dim"]
4525+
qk_nope_head_dim = self.hparams["qk_nope_head_dim"]
4526+
4527+
assert data_torch.shape[0] == n_head_kv * (v_head_dim + qk_nope_head_dim)
4528+
4529+
kv_b = data_torch.view(n_head_kv, v_head_dim + qk_nope_head_dim, data_torch.shape[-1])
4530+
k_b, v_b = torch.split(kv_b, [qk_nope_head_dim, v_head_dim], dim=1)
4531+
k_b = k_b.transpose(1, 2)
4532+
4533+
return [
4534+
(self.map_tensor_name(name_kb), k_b),
4535+
(self.map_tensor_name(name_vb), v_b)
4536+
]
4537+
45094538
return [(self.map_tensor_name(name), data_torch)]
45104539

45114540
def prepare_tensors(self):

docs/build.md

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -259,8 +259,6 @@ You can download it from your Linux distro's package manager or from here: [ROCm
259259
cmake -S . -B build -DGGML_HIP=ON -DAMDGPU_TARGETS=gfx1030 -DCMAKE_BUILD_TYPE=Release \
260260
&& cmake --build build --config Release -- -j 16
261261
```
262-
On Linux it is also possible to use unified memory architecture (UMA) to share main memory between the CPU and integrated GPU by setting `-DGGML_HIP_UMA=ON`.
263-
However, this hurts performance for non-integrated GPUs (but enables working with integrated GPUs).
264262

265263
To enhance flash attention performance on RDNA3+ or CDNA architectures, you can utilize the rocWMMA library by enabling the `-DGGML_HIP_ROCWMMA_FATTN=ON` option. This requires rocWMMA headers to be installed on the build system.
266264

@@ -296,6 +294,10 @@ You can download it from your Linux distro's package manager or from here: [ROCm
296294
The environment variable [`HIP_VISIBLE_DEVICES`](https://rocm.docs.amd.com/en/latest/understand/gpu_isolation.html#hip-visible-devices) can be used to specify which GPU(s) will be used.
297295
If your GPU is not officially supported you can use the environment variable [`HSA_OVERRIDE_GFX_VERSION`] set to a similar GPU, for example 10.3.0 on RDNA2 (e.g. gfx1030, gfx1031, or gfx1035) or 11.0.0 on RDNA3.
298296

297+
### Unified Memory
298+
299+
On Linux it is possible to use unified memory architecture (UMA) to share main memory between the CPU and integrated GPU by setting environment variable `GGML_CUDA_ENABLE_UNIFIED_MEMORY=1`. However, this hurts performance for non-integrated GPUs (but enables working with integrated GPUs).
300+
299301
## Vulkan
300302

301303
**Windows**

ggml/CMakeLists.txt

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -170,7 +170,6 @@ option(GGML_HIP "ggml: use HIP"
170170
option(GGML_HIP_GRAPHS "ggml: use HIP graph, experimental, slow" OFF)
171171
option(GGML_HIP_NO_VMM "ggml: do not try to use HIP VMM" ON)
172172
option(GGML_HIP_ROCWMMA_FATTN "ggml: enable rocWMMA for FlashAttention" OFF)
173-
option(GGML_HIP_UMA "ggml: use HIP unified memory architecture" OFF)
174173
option(GGML_VULKAN "ggml: use Vulkan" OFF)
175174
option(GGML_VULKAN_CHECK_RESULTS "ggml: run Vulkan op checks" OFF)
176175
option(GGML_VULKAN_DEBUG "ggml: enable Vulkan debug output" OFF)

0 commit comments

Comments
 (0)