Skip to content

Commit c74453d

Browse files
committed
📝 add doc support and fix bug in qwen2
1 parent 8bad019 commit c74453d

File tree

11 files changed

+170
-12
lines changed

11 files changed

+170
-12
lines changed

.github/workflows/book-ci.yml

+32
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
name: Book-CI
2+
3+
on:
4+
push:
5+
branches:
6+
- main
7+
- server_support
8+
9+
pull_request:
10+
branches:
11+
- main
12+
- server_support
13+
jobs:
14+
test:
15+
name: test
16+
runs-on: ${{ matrix.os }}
17+
strategy:
18+
matrix:
19+
os: [ubuntu-latest, macos-latest, windows-latest]
20+
steps:
21+
- uses: actions/checkout@v4
22+
- name: Install Rust
23+
run: |
24+
rustup set profile minimal
25+
rustup toolchain install stable
26+
rustup default stable
27+
- name: Setup mdBook
28+
uses: peaceiris/actions-mdbook@v2
29+
with:
30+
mdbook-version: "latest"
31+
# - name: Run tests
32+
# run: mdbook test

.github/workflows/deploy.yml

+48
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
name: Deploy
2+
3+
on:
4+
push:
5+
branches:
6+
- main
7+
- server_support
8+
9+
pull_request:
10+
branches:
11+
- main
12+
- server_support
13+
14+
defaults:
15+
run:
16+
shell: bash
17+
18+
permissions:
19+
contents: write
20+
21+
jobs:
22+
deploy:
23+
runs-on: ${{ matrix.os }}
24+
strategy:
25+
matrix:
26+
os: [ubuntu-latest, macos-latest, windows-latest]
27+
steps:
28+
- uses: actions/checkout@v4
29+
- name: Install Rust
30+
run: |
31+
rustup set profile minimal
32+
rustup toolchain install stable
33+
rustup default stable
34+
- name: Setup mdBook
35+
uses: peaceiris/actions-mdbook@v2
36+
with:
37+
mdbook-version: "latest"
38+
- run: mdbook build
39+
# - name: Copy Assets
40+
# run: |
41+
# chmod +x ci/copy-assets.sh
42+
# ci/copy-assets.sh ${{ matrix.os }}
43+
- name: Deploy
44+
uses: peaceiris/actions-gh-pages@v3
45+
if: ${{ github.ref == 'refs/heads/main' }} or || github.ref == 'refs/heads/server_support'
46+
with:
47+
github_token: ${{ secrets.GITHUB_TOKEN }}
48+
publish_dir: ./book

.gitignore

+1
Original file line numberDiff line numberDiff line change
@@ -22,3 +22,4 @@ img/
2222
tmp1.txt
2323
test_65_300_1536.txt
2424
test.txt
25+
book

book.toml

+18
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
[book]
2+
authors = ["kvcache-ai"]
3+
language = "zh-CN"
4+
title = "Ktransformers"
5+
src = "doc"
6+
7+
[output.html]
8+
git-repository-url = "https://github.com/kvcache-ai/ktransformers"
9+
edit-url-template = "https://github.com/kvcache-ai/ktransformers/edit/main/{path}"
10+
11+
[output.html.playground]
12+
editable = true
13+
copy-js = true
14+
# line-numbers = true
15+
16+
[output.html.fold]
17+
enable = true
18+
level = 0

doc/README.md

+31
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
<div align="center">
2+
<!-- <h1>KTransformers</h1> -->
3+
<p align="center">
4+
5+
<picture>
6+
<img alt="KTransformers" src="https://github.com/user-attachments/assets/d5a2492f-a415-4456-af99-4ab102f13f8b" width=50%>
7+
8+
</picture>
9+
10+
</p>
11+
12+
</div>
13+
14+
<h2 id="intro">🎉 Introduction</h2>
15+
KTransformers, pronounced as Quick Transformers, is designed to enhance your 🤗 <a href="https://github.com/huggingface/transformers">Transformers</a> experience with advanced kernel optimizations and placement/parallelism strategies.
16+
<br/><br/>
17+
KTransformers is a flexible, Python-centric framework designed with extensibility at its core.
18+
By implementing and injecting an optimized module with a single line of code, users gain access to a Transformers-compatible
19+
interface, RESTful APIs compliant with OpenAI and Ollama, and even a simplified ChatGPT-like web UI.
20+
<br/><br/>
21+
Our vision for KTransformers is to serve as a flexible platform for experimenting with innovative LLM inference optimizations. Please let us know if you need any other features.
22+
23+
<h2 id="Updates">🔥 Updates</h2>
24+
25+
* **Feb 10, 2025**: Support Deepseek-R1 and V3 on single (24GB VRAM)/multi gpu and 382G DRAM, up to 3~28x speedup. The detailed tutorial is [here](./doc/en/DeepseekR1_V3_tutorial.md).
26+
* **Aug 28, 2024**: Support 1M context under the InternLM2.5-7B-Chat-1M model, utilizing 24GB of VRAM and 150GB of DRAM. The detailed tutorial is [here](./doc/en/long_context_tutorial.md).
27+
* **Aug 28, 2024**: Decrease DeepseekV2's required VRAM from 21G to 11G.
28+
* **Aug 15, 2024**: Update detailed [TUTORIAL](doc/en/injection_tutorial.md) for injection and multi-GPU.
29+
* **Aug 14, 2024**: Support llamfile as linear backend.
30+
* **Aug 12, 2024**: Support multiple GPU; Support new model: mixtral 8\*7B and 8\*22B; Support q2k, q3k, q5k dequant on gpu.
31+
* **Aug 9, 2024**: Support windows native.

doc/SUMMARY.md

+14
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
# Ktransformer
2+
3+
[Introduction](./README.md)
4+
# DeepSeek
5+
- [DeepseekR1_V3_tutorial](en/DeepseekR1_V3_tutorial.md)
6+
- [deepseek-v2-injection](en/deepseek-v2-injection.md)
7+
- [Makefile_usage](en/makefile_usage.md)
8+
# Server
9+
- [Server](zh/api/server/README.md)
10+
- [Server](zh/api/server/server.md)
11+
- [Website](zh/api/server/website.md)
12+
- [Tabby](zh/api/server/tabby.md)
13+
# FAQ
14+
- [FAQ](en/FAQ.md)

doc/basic/note1.md

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
# basic-first20

doc/basic/note2.md

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
# basic-data_structure

doc/zh/api/server/README.md

+2
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
# Server
2+
Still Under Construction... (May have bugs and lack of documentation)

ktransformers/operators/experts.py

+2
Original file line numberDiff line numberDiff line change
@@ -576,6 +576,8 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
576576
routing_weights_expert = routing_weights.to(self.experts.device) if isinstance(self.experts, KExpertsBase) else routing_weights_expert.cpu()
577577

578578
shared_expert_output = self.shared_expert(hidden_states)
579+
tmp = self.shared_expert_gate(hidden_states)
580+
print("shared_expert_gate shape ", tmp.shape)
579581
shared_expert_output = (
580582
F.sigmoid(self.shared_expert_gate(hidden_states)) * shared_expert_output
581583
)

ktransformers/operators/linear.py

+20-12
Original file line numberDiff line numberDiff line change
@@ -54,15 +54,15 @@ def __init__(
5454

5555
self.has_bias = False
5656
self.dtype = torch.get_default_dtype()
57-
# if orig_module is not None:
58-
# self.in_features = orig_module.in_features
59-
# self.out_features = orig_module.out_features
60-
# else:
61-
shape = self.gguf_loader.tensor_info[key + ".weight"]["shape"]
62-
if len(shape) == 1:
63-
print("Warning: orig_module is not set, but has in_features or out_features equals to 1, can't get in_features and out_features from GGUF")
64-
self.in_features = self.gguf_loader.tensor_info[key + ".weight"]["shape"][0]
65-
self.out_features = self.gguf_loader.tensor_info[key + ".weight"]["shape"][1]
57+
if orig_module is not None:
58+
self.in_features = orig_module.in_features
59+
self.out_features = orig_module.out_features
60+
else:
61+
shape = self.gguf_loader.tensor_info[key + ".weight"]["shape"]
62+
if len(shape) == 1:
63+
print("Warning: orig_module is not set, but has in_features or out_features equals to 1, can't get in_features and out_features from GGUF")
64+
self.in_features = self.gguf_loader.tensor_info[key + ".weight"]["shape"][0]
65+
self.out_features = self.gguf_loader.tensor_info[key + ".weight"]["shape"][1]
6666

6767
@abstractmethod
6868
def forward(self, x: torch.Tensor) -> torch.Tensor:
@@ -136,12 +136,19 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
136136
def load(self, w: dict | nn.Parameter | tuple | None = None, device: str|None = None):
137137
if device is None: device = self.device
138138
if w is None: w = self.load_weight(device=device)
139+
# else: self.out_features = w.shape[0], self.in_features = w.shape[1]
139140

140141
if isinstance(w, nn.Parameter):
141-
self.w = w.to(dtype=self.dtype).T
142+
try:
143+
self.w = w.to(dtype=self.dtype).view(self.out_features, self.in_features).T
144+
except:
145+
self.w = w.to(dtype=self.dtype).T
142146
self.has_bias = False
143147
elif isinstance(w, tuple):
144-
self.w = w[0].to(dtype=self.dtype).T
148+
try:
149+
self.w = w[0].to(dtype=self.dtype).view(self.out_features, self.in_features).T
150+
except:
151+
self.w = w[0].to(dtype=self.dtype).T
145152
self.bias = w[1].to(dtype=self.dtype)
146153
self.has_bias = True
147154
else:
@@ -187,7 +194,8 @@ def __init__(
187194
def load(self, w: dict | nn.Parameter | tuple | None = None, device: str|None = None):
188195
if device is None: device = self.device
189196
assert device.lower() != "cpu", "Marlin quantized linear only supports GPU device"
190-
if w is None: w = self.load_weight(device=device)
197+
if w is None:
198+
w = self.load_weight(device=device)
191199

192200
if isinstance(w, nn.Parameter):
193201
# pad weight

0 commit comments

Comments
 (0)