winglian commited on
Commit
f544ab2
1 Parent(s): 641e6f7

don't compile deepspeed or bitsandbytes from source (#837)

Browse files
docker/Dockerfile CHANGED
@@ -21,9 +21,9 @@ WORKDIR /workspace/axolotl
21
  # If AXOLOTL_EXTRAS is set, append it in brackets
22
  RUN sed -i "s/torch==.*/torch==$PYTORCH_VERSION/" requirements.txt
23
  RUN if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
24
- pip install -e .[flash-attn,$AXOLOTL_EXTRAS]; \
25
  else \
26
- pip install -e .[flash-attn]; \
27
  fi
28
 
29
  # fix so that git fetch/pull from remote works
 
21
  # If AXOLOTL_EXTRAS is set, append it in brackets
22
  RUN sed -i "s/torch==.*/torch==$PYTORCH_VERSION/" requirements.txt
23
  RUN if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
24
+ pip install -e .[deepspeed,flash-attn,$AXOLOTL_EXTRAS]; \
25
  else \
26
+ pip install -e .[deepspeed,flash-attn]; \
27
  fi
28
 
29
  # fix so that git fetch/pull from remote works
docker/Dockerfile-base CHANGED
@@ -10,8 +10,10 @@ ENV PATH="/root/miniconda3/bin:${PATH}"
10
  ARG PYTHON_VERSION="3.9"
11
  ARG PYTORCH_VERSION="2.0.1"
12
  ARG CUDA="118"
 
13
 
14
  ENV PYTHON_VERSION=$PYTHON_VERSION
 
15
 
16
  RUN apt-get update \
17
  && apt-get install -y wget git build-essential ninja-build git-lfs libaio-dev && rm -rf /var/lib/apt/lists/* \
@@ -29,45 +31,7 @@ WORKDIR /workspace
29
  RUN python3 -m pip install --upgrade pip && pip3 install packaging && \
30
  python3 -m pip install --no-cache-dir -U torch==${PYTORCH_VERSION}+cu${CUDA} deepspeed-kernels --extra-index-url https://download.pytorch.org/whl/cu$CUDA
31
 
32
- FROM base-builder AS deepspeed-builder
33
-
34
- ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6 9.0+PTX"
35
-
36
- WORKDIR /workspace
37
-
38
- RUN git clone https://github.com/microsoft/DeepSpeed.git && \
39
- cd DeepSpeed && \
40
- MAX_CONCURRENCY=8 DS_BUILD_SPARSE_ATTN=0 DS_BUILD_OPS=1 DS_BUILD_EVOFORMER_ATTN=0 python3 setup.py bdist_wheel
41
-
42
- FROM base-builder AS bnb-builder
43
-
44
- WORKDIR /workspace
45
- ARG CUDA="118"
46
- ENV CUDA=$CUDA
47
- ARG MAX_JOBS="-1"
48
- ENV MAX_JOBS=$MAX_JOBS
49
-
50
- RUN git clone https://github.com/TimDettmers/bitsandbytes.git && \
51
- cd bitsandbytes && \
52
- CUDA_VERSION=$CUDA make cuda11x && \
53
- python setup.py bdist_wheel
54
-
55
- FROM base-builder
56
-
57
- ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6 9.0+PTX"
58
- ENV TORCH_CUDA_ARCH_LIST=$TORCH_CUDA_ARCH_LIST
59
-
60
- RUN mkdir -p /workspace/builds
61
- COPY --from=bnb-builder /workspace/bitsandbytes /workspace/builds/bitsandbytes
62
-
63
- RUN mkdir -p /workspace/wheels/bitsandbytes
64
- COPY --from=deepspeed-builder /workspace/DeepSpeed/dist/deepspeed-*.whl wheels
65
- COPY --from=bnb-builder /workspace/bitsandbytes/dist/bitsandbytes-*.whl wheels
66
- COPY --from=bnb-builder /workspace/bitsandbytes/bitsandbytes/libbitsandbytes*.so wheels/bitsandbytes
67
-
68
- RUN pip3 install wheels/deepspeed-*.whl
69
- RUN cd /workspace/builds/bitsandbytes && python3 setup.py install
70
- RUN git lfs install --skip-repo
71
- RUN pip3 install awscli && \
72
  # The base image ships with `pydantic==1.8.2` which is not working
73
  pip3 install -U --no-cache-dir pydantic==1.10.10
 
10
  ARG PYTHON_VERSION="3.9"
11
  ARG PYTORCH_VERSION="2.0.1"
12
  ARG CUDA="118"
13
+ ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6 9.0+PTX"
14
 
15
  ENV PYTHON_VERSION=$PYTHON_VERSION
16
+ ENV TORCH_CUDA_ARCH_LIST=$TORCH_CUDA_ARCH_LIST
17
 
18
  RUN apt-get update \
19
  && apt-get install -y wget git build-essential ninja-build git-lfs libaio-dev && rm -rf /var/lib/apt/lists/* \
 
31
  RUN python3 -m pip install --upgrade pip && pip3 install packaging && \
32
  python3 -m pip install --no-cache-dir -U torch==${PYTORCH_VERSION}+cu${CUDA} deepspeed-kernels --extra-index-url https://download.pytorch.org/whl/cu$CUDA
33
 
34
+ RUN git lfs install --skip-repo && \
35
+ pip3 install awscli && \
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  # The base image ships with `pydantic==1.8.2` which is not working
37
  pip3 install -U --no-cache-dir pydantic==1.10.10
examples/cerebras/btlm-ft.yml CHANGED
@@ -14,7 +14,7 @@ datasets:
14
  - path: mhenrichsen/alpaca_2k_test
15
  type: alpaca
16
  dataset_prepared_path: last_prepared_run
17
- val_set_size: 0.01
18
 
19
  adapter:
20
  lora_model_dir:
 
14
  - path: mhenrichsen/alpaca_2k_test
15
  type: alpaca
16
  dataset_prepared_path: last_prepared_run
17
+ val_set_size: 0.05
18
 
19
  adapter:
20
  lora_model_dir:
examples/cerebras/qlora.yml CHANGED
@@ -7,7 +7,7 @@ datasets:
7
  - path: teknium/GPT4-LLM-Cleaned
8
  type: alpaca
9
  dataset_prepared_path:
10
- val_set_size: 0.01
11
  adapter: qlora
12
  lora_model_dir:
13
  sequence_len: 2048
 
7
  - path: teknium/GPT4-LLM-Cleaned
8
  type: alpaca
9
  dataset_prepared_path:
10
+ val_set_size: 0.05
11
  adapter: qlora
12
  lora_model_dir:
13
  sequence_len: 2048
examples/code-llama/13b/lora.yml CHANGED
@@ -11,7 +11,7 @@ datasets:
11
  - path: mhenrichsen/alpaca_2k_test
12
  type: alpaca
13
  dataset_prepared_path:
14
- val_set_size: 0.01
15
  output_dir: ./lora-out
16
 
17
  sequence_len: 4096
 
11
  - path: mhenrichsen/alpaca_2k_test
12
  type: alpaca
13
  dataset_prepared_path:
14
+ val_set_size: 0.05
15
  output_dir: ./lora-out
16
 
17
  sequence_len: 4096
examples/code-llama/13b/qlora.yml CHANGED
@@ -11,7 +11,7 @@ datasets:
11
  - path: mhenrichsen/alpaca_2k_test
12
  type: alpaca
13
  dataset_prepared_path:
14
- val_set_size: 0.01
15
  output_dir: ./qlora-out
16
 
17
  adapter: qlora
 
11
  - path: mhenrichsen/alpaca_2k_test
12
  type: alpaca
13
  dataset_prepared_path:
14
+ val_set_size: 0.05
15
  output_dir: ./qlora-out
16
 
17
  adapter: qlora
examples/code-llama/34b/lora.yml CHANGED
@@ -11,7 +11,7 @@ datasets:
11
  - path: mhenrichsen/alpaca_2k_test
12
  type: alpaca
13
  dataset_prepared_path:
14
- val_set_size: 0.01
15
  output_dir: ./lora-out
16
 
17
  sequence_len: 4096
 
11
  - path: mhenrichsen/alpaca_2k_test
12
  type: alpaca
13
  dataset_prepared_path:
14
+ val_set_size: 0.05
15
  output_dir: ./lora-out
16
 
17
  sequence_len: 4096
examples/code-llama/34b/qlora.yml CHANGED
@@ -11,7 +11,7 @@ datasets:
11
  - path: mhenrichsen/alpaca_2k_test
12
  type: alpaca
13
  dataset_prepared_path:
14
- val_set_size: 0.01
15
  output_dir: ./qlora-out
16
 
17
  adapter: qlora
 
11
  - path: mhenrichsen/alpaca_2k_test
12
  type: alpaca
13
  dataset_prepared_path:
14
+ val_set_size: 0.05
15
  output_dir: ./qlora-out
16
 
17
  adapter: qlora
examples/code-llama/7b/lora.yml CHANGED
@@ -11,7 +11,7 @@ datasets:
11
  - path: mhenrichsen/alpaca_2k_test
12
  type: alpaca
13
  dataset_prepared_path:
14
- val_set_size: 0.01
15
  output_dir: ./lora-out
16
 
17
  sequence_len: 4096
 
11
  - path: mhenrichsen/alpaca_2k_test
12
  type: alpaca
13
  dataset_prepared_path:
14
+ val_set_size: 0.05
15
  output_dir: ./lora-out
16
 
17
  sequence_len: 4096
examples/code-llama/7b/qlora.yml CHANGED
@@ -11,7 +11,7 @@ datasets:
11
  - path: mhenrichsen/alpaca_2k_test
12
  type: alpaca
13
  dataset_prepared_path:
14
- val_set_size: 0.01
15
  output_dir: ./qlora-out
16
 
17
  adapter: qlora
 
11
  - path: mhenrichsen/alpaca_2k_test
12
  type: alpaca
13
  dataset_prepared_path:
14
+ val_set_size: 0.05
15
  output_dir: ./qlora-out
16
 
17
  adapter: qlora
examples/falcon/config-7b-lora.yml CHANGED
@@ -12,7 +12,7 @@ datasets:
12
  - path: teknium/GPT4-LLM-Cleaned
13
  type: alpaca:chat
14
  dataset_prepared_path:
15
- val_set_size: 0.01
16
  adapter: lora
17
  lora_model_dir:
18
  sequence_len: 2048
 
12
  - path: teknium/GPT4-LLM-Cleaned
13
  type: alpaca:chat
14
  dataset_prepared_path:
15
+ val_set_size: 0.05
16
  adapter: lora
17
  lora_model_dir:
18
  sequence_len: 2048
examples/falcon/config-7b-qlora.yml CHANGED
@@ -18,7 +18,7 @@ datasets:
18
  - Chain-of-Thought/formatted_cot_data/gsm8k_train.json
19
  type: "alpaca:chat"
20
  dataset_prepared_path:
21
- val_set_size: 0.01
22
  # enable QLoRA
23
  adapter: qlora
24
  lora_model_dir:
 
18
  - Chain-of-Thought/formatted_cot_data/gsm8k_train.json
19
  type: "alpaca:chat"
20
  dataset_prepared_path:
21
+ val_set_size: 0.05
22
  # enable QLoRA
23
  adapter: qlora
24
  lora_model_dir:
examples/falcon/config-7b.yml CHANGED
@@ -12,7 +12,7 @@ datasets:
12
  - path: teknium/GPT4-LLM-Cleaned
13
  type: alpaca:chat
14
  dataset_prepared_path:
15
- val_set_size: 0.01
16
  adapter:
17
  lora_model_dir:
18
  sequence_len: 2048
 
12
  - path: teknium/GPT4-LLM-Cleaned
13
  type: alpaca:chat
14
  dataset_prepared_path:
15
+ val_set_size: 0.05
16
  adapter:
17
  lora_model_dir:
18
  sequence_len: 2048
examples/gptj/qlora.yml CHANGED
@@ -7,7 +7,7 @@ datasets:
7
  - path: teknium/GPT4-LLM-Cleaned
8
  type: alpaca
9
  dataset_prepared_path:
10
- val_set_size: 0.01
11
  adapter: qlora
12
  lora_model_dir:
13
  sequence_len: 2048
 
7
  - path: teknium/GPT4-LLM-Cleaned
8
  type: alpaca
9
  dataset_prepared_path:
10
+ val_set_size: 0.05
11
  adapter: qlora
12
  lora_model_dir:
13
  sequence_len: 2048
examples/llama-2/fft_optimized.yml CHANGED
@@ -11,7 +11,7 @@ datasets:
11
  - path: mhenrichsen/alpaca_2k_test
12
  type: alpaca
13
  dataset_prepared_path: last_run_prepared
14
- val_set_size: 0.01
15
  output_dir: ./out
16
 
17
  sequence_len: 4096
 
11
  - path: mhenrichsen/alpaca_2k_test
12
  type: alpaca
13
  dataset_prepared_path: last_run_prepared
14
+ val_set_size: 0.05
15
  output_dir: ./out
16
 
17
  sequence_len: 4096
examples/llama-2/gptq-lora.yml CHANGED
@@ -15,7 +15,7 @@ datasets:
15
  - path: mhenrichsen/alpaca_2k_test
16
  type: alpaca
17
  dataset_prepared_path:
18
- val_set_size: 0.01
19
  adapter: lora
20
  lora_model_dir:
21
  sequence_len: 4096
 
15
  - path: mhenrichsen/alpaca_2k_test
16
  type: alpaca
17
  dataset_prepared_path:
18
+ val_set_size: 0.05
19
  adapter: lora
20
  lora_model_dir:
21
  sequence_len: 4096
examples/llama-2/lora.yml CHANGED
@@ -11,7 +11,7 @@ datasets:
11
  - path: mhenrichsen/alpaca_2k_test
12
  type: alpaca
13
  dataset_prepared_path:
14
- val_set_size: 0.01
15
  output_dir: ./lora-out
16
 
17
  sequence_len: 4096
 
11
  - path: mhenrichsen/alpaca_2k_test
12
  type: alpaca
13
  dataset_prepared_path:
14
+ val_set_size: 0.05
15
  output_dir: ./lora-out
16
 
17
  sequence_len: 4096
examples/llama-2/qlora.yml CHANGED
@@ -11,7 +11,7 @@ datasets:
11
  - path: mhenrichsen/alpaca_2k_test
12
  type: alpaca
13
  dataset_prepared_path:
14
- val_set_size: 0.01
15
  output_dir: ./qlora-out
16
 
17
  adapter: qlora
 
11
  - path: mhenrichsen/alpaca_2k_test
12
  type: alpaca
13
  dataset_prepared_path:
14
+ val_set_size: 0.05
15
  output_dir: ./qlora-out
16
 
17
  adapter: qlora
examples/llama-2/relora.yml CHANGED
@@ -11,7 +11,7 @@ datasets:
11
  - path: teknium/GPT4-LLM-Cleaned
12
  type: alpaca
13
  dataset_prepared_path:
14
- val_set_size: 0.01
15
  output_dir: ./relora-out
16
 
17
  adapter: qlora
 
11
  - path: teknium/GPT4-LLM-Cleaned
12
  type: alpaca
13
  dataset_prepared_path:
14
+ val_set_size: 0.05
15
  output_dir: ./relora-out
16
 
17
  adapter: qlora
examples/llama-2/tiny-llama.yml CHANGED
@@ -12,7 +12,7 @@ datasets:
12
  - path: mhenrichsen/alpaca_2k_test
13
  type: alpaca
14
  dataset_prepared_path:
15
- val_set_size: 0.01
16
  output_dir: ./lora-out
17
 
18
  sequence_len: 4096
 
12
  - path: mhenrichsen/alpaca_2k_test
13
  type: alpaca
14
  dataset_prepared_path:
15
+ val_set_size: 0.05
16
  output_dir: ./lora-out
17
 
18
  sequence_len: 4096
examples/mistral/config.yml CHANGED
@@ -11,7 +11,7 @@ datasets:
11
  - path: mhenrichsen/alpaca_2k_test
12
  type: alpaca
13
  dataset_prepared_path:
14
- val_set_size: 0.01
15
  output_dir: ./out
16
 
17
  sequence_len: 8192
 
11
  - path: mhenrichsen/alpaca_2k_test
12
  type: alpaca
13
  dataset_prepared_path:
14
+ val_set_size: 0.05
15
  output_dir: ./out
16
 
17
  sequence_len: 8192
examples/mistral/qlora.yml CHANGED
@@ -11,7 +11,7 @@ datasets:
11
  - path: mhenrichsen/alpaca_2k_test
12
  type: alpaca
13
  dataset_prepared_path: last_run_prepared
14
- val_set_size: 0.01
15
  output_dir: ./qlora-out
16
 
17
  adapter: qlora
 
11
  - path: mhenrichsen/alpaca_2k_test
12
  type: alpaca
13
  dataset_prepared_path: last_run_prepared
14
+ val_set_size: 0.05
15
  output_dir: ./qlora-out
16
 
17
  adapter: qlora
examples/openllama-3b/qlora.yml CHANGED
@@ -9,7 +9,7 @@ datasets:
9
  - path: teknium/GPT4-LLM-Cleaned
10
  type: alpaca
11
  dataset_prepared_path:
12
- val_set_size: 0.01
13
  adapter: qlora
14
  lora_model_dir:
15
  sequence_len: 1024
 
9
  - path: teknium/GPT4-LLM-Cleaned
10
  type: alpaca
11
  dataset_prepared_path:
12
+ val_set_size: 0.05
13
  adapter: qlora
14
  lora_model_dir:
15
  sequence_len: 1024
examples/xgen-7b/xgen-7b-8k-qlora.yml CHANGED
@@ -16,7 +16,7 @@ datasets:
16
  - openassistant_best_replies_train.jsonl
17
  type: "completion"
18
  dataset_prepared_path:
19
- val_set_size: 0.01
20
  # enable QLoRA
21
  adapter: qlora
22
  lora_model_dir:
 
16
  - openassistant_best_replies_train.jsonl
17
  type: "completion"
18
  dataset_prepared_path:
19
+ val_set_size: 0.05
20
  # enable QLoRA
21
  adapter: qlora
22
  lora_model_dir:
requirements.txt CHANGED
@@ -3,7 +3,7 @@
3
  torch==2.0.1
4
  auto-gptq==0.4.2
5
  packaging
6
- peft @ git+https://github.com/huggingface/peft.git
7
  transformers @ git+https://github.com/huggingface/transformers.git@acc394c4f5e1283c19783581790b3dc3105a3697
8
  bitsandbytes>=0.41.1
9
  accelerate @ git+https://github.com/huggingface/accelerate@80da9cfb09bb3cc9f1b385cb55d6b90d025a5fd9
 
3
  torch==2.0.1
4
  auto-gptq==0.4.2
5
  packaging
6
+ peft==0.6.0
7
  transformers @ git+https://github.com/huggingface/transformers.git@acc394c4f5e1283c19783581790b3dc3105a3697
8
  bitsandbytes>=0.41.1
9
  accelerate @ git+https://github.com/huggingface/accelerate@80da9cfb09bb3cc9f1b385cb55d6b90d025a5fd9