winglian commited on
Commit
bbc5bc5
2 Parent(s): 4df9da7 48612f8

Merge pull request #108 from OpenAccess-AI-Collective/docker-gptq

Browse files
.github/workflows/base.yml CHANGED
@@ -14,14 +14,17 @@ jobs:
14
  strategy:
15
  matrix:
16
  include:
17
- - cuda: cu118
18
  cuda_version: 11.8.0
19
- cuda_version_bnb: "118"
20
- pytorch: 2.0.0
21
- - cuda: cu117
22
  cuda_version: 11.7.0
23
- cuda_version_bnb: "117"
24
  pytorch: 1.13.1
 
 
 
 
 
25
  steps:
26
  - name: Checkout
27
  uses: actions/checkout@v3
@@ -43,12 +46,12 @@ jobs:
43
  context: .
44
  file: ./docker/Dockerfile-base
45
  push: ${{ github.event_name != 'pull_request' }}
46
- tags: ${{ steps.metadata.outputs.tags }}-${{ matrix.cuda }}-${{ matrix.pytorch }}
47
  labels: ${{ steps.metadata.outputs.labels }}
48
  cache-from: type=gha
49
  cache-to: type=gha,mode=max
50
  build-args: |
51
  CUDA_VERSION=${{ matrix.cuda_version }}
52
- CUDA_VERSION_BNB=${{ matrix.cuda_version_bnb }}
53
  CUDA=${{ matrix.cuda }}
54
  PYTORCH_VERSION=${{ matrix.pytorch }}
 
 
14
  strategy:
15
  matrix:
16
  include:
17
+ - cuda: "118"
18
  cuda_version: 11.8.0
19
+ axolotl_extras:
20
+ - cuda: "117"
 
21
  cuda_version: 11.7.0
 
22
  pytorch: 1.13.1
23
+ axolotl_extras:
24
+ - cuda: "118"
25
+ cuda_version: 11.8.0
26
+ pytorch: 2.0.0
27
+ axolotl_extras: gptq
28
  steps:
29
  - name: Checkout
30
  uses: actions/checkout@v3
 
46
  context: .
47
  file: ./docker/Dockerfile-base
48
  push: ${{ github.event_name != 'pull_request' }}
49
+ tags: ${{ steps.metadata.outputs.tags }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
50
  labels: ${{ steps.metadata.outputs.labels }}
51
  cache-from: type=gha
52
  cache-to: type=gha,mode=max
53
  build-args: |
54
  CUDA_VERSION=${{ matrix.cuda_version }}
 
55
  CUDA=${{ matrix.cuda }}
56
  PYTORCH_VERSION=${{ matrix.pytorch }}
57
+ AXOLOTL_EXTRAS=${{ matrix.axolotl_extras }}
.github/workflows/main.yml CHANGED
@@ -16,9 +16,15 @@ jobs:
16
  - cuda: cu118
17
  cuda_version: 11.8.0
18
  pytorch: 2.0.0
 
 
 
 
 
19
  - cuda: cu117
20
  cuda_version: 11.7.0
21
  pytorch: 1.13.1
 
22
  runs-on: self-hosted
23
  steps:
24
  - name: Checkout
@@ -40,10 +46,10 @@ jobs:
40
  with:
41
  context: .
42
  build-args: |
43
- BASE_TAG=${{ github.ref_name }}-base-${{ matrix.cuda }}-${{ matrix.pytorch }}
44
  file: ./docker/Dockerfile
45
  push: ${{ github.event_name != 'pull_request' }}
46
- tags: ${{ steps.metadata.outputs.tags }}-${{ matrix.cuda }}-${{ matrix.pytorch }}
47
  labels: ${{ steps.metadata.outputs.labels }}
48
  cache-from: type=gha
49
  cache-to: type=gha,mode=max
@@ -57,9 +63,15 @@ jobs:
57
  - cuda: cu118
58
  cuda_version: 11.8.0
59
  pytorch: 2.0.0
 
 
 
 
 
60
  - cuda: cu117
61
  cuda_version: 11.7.0
62
  pytorch: 1.13.1
 
63
  runs-on: self-hosted
64
  steps:
65
  - name: Checkout
@@ -81,10 +93,10 @@ jobs:
81
  with:
82
  context: .
83
  build-args: |
84
- BASE_TAG=${{ github.ref_name }}-${{ matrix.cuda }}-${{ matrix.pytorch }}
85
  file: ./docker/Dockerfile-runpod
86
  push: ${{ github.event_name != 'pull_request' }}
87
- tags: ${{ steps.metadata.outputs.tags }}-${{ matrix.cuda }}-${{ matrix.pytorch }}
88
  labels: ${{ steps.metadata.outputs.labels }}
89
  cache-from: type=gha
90
  cache-to: type=gha,mode=max
 
16
  - cuda: cu118
17
  cuda_version: 11.8.0
18
  pytorch: 2.0.0
19
+ axolotl_extras:
20
+ - cuda: cu118
21
+ cuda_version: 11.8.0
22
+ pytorch: 2.0.0
23
+ axolotl_extras: gptq
24
  - cuda: cu117
25
  cuda_version: 11.7.0
26
  pytorch: 1.13.1
27
+ axolotl_extras:
28
  runs-on: self-hosted
29
  steps:
30
  - name: Checkout
 
46
  with:
47
  context: .
48
  build-args: |
49
+ BASE_TAG=${{ github.ref_name }}-base-${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
50
  file: ./docker/Dockerfile
51
  push: ${{ github.event_name != 'pull_request' }}
52
+ tags: ${{ steps.metadata.outputs.tags }}-${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
53
  labels: ${{ steps.metadata.outputs.labels }}
54
  cache-from: type=gha
55
  cache-to: type=gha,mode=max
 
63
  - cuda: cu118
64
  cuda_version: 11.8.0
65
  pytorch: 2.0.0
66
+ axolotl_extras:
67
+ - cuda: cu118
68
+ cuda_version: 11.8.0
69
+ pytorch: 2.0.0
70
+ axolotl_extras: gptq
71
  - cuda: cu117
72
  cuda_version: 11.7.0
73
  pytorch: 1.13.1
74
+ axolotl_extras:
75
  runs-on: self-hosted
76
  steps:
77
  - name: Checkout
 
93
  with:
94
  context: .
95
  build-args: |
96
+ BASE_TAG=${{ github.ref_name }}-${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
97
  file: ./docker/Dockerfile-runpod
98
  push: ${{ github.event_name != 'pull_request' }}
99
+ tags: ${{ steps.metadata.outputs.tags }}-${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
100
  labels: ${{ steps.metadata.outputs.labels }}
101
  cache-from: type=gha
102
  cache-to: type=gha,mode=max
README.md CHANGED
@@ -32,7 +32,7 @@
32
  ```bash
33
  git clone https://github.com/OpenAccess-AI-Collective/axolotl
34
 
35
- pip3 install -e .[int4]
36
 
37
  accelerate config
38
 
@@ -59,9 +59,9 @@ accelerate launch scripts/finetune.py examples/lora-openllama-3b/config.yml \
59
  1. Install python **3.9**
60
 
61
  2. Install python dependencies with ONE of the following:
62
- - `pip3 install -e .[int4]` (recommended)
63
- - `pip3 install -e .[int4_triton]`
64
- - `pip3 install -e .`
65
 
66
  ### Dataset
67
 
 
32
  ```bash
33
  git clone https://github.com/OpenAccess-AI-Collective/axolotl
34
 
35
+ pip3 install -e .
36
 
37
  accelerate config
38
 
 
59
  1. Install python **3.9**
60
 
61
  2. Install python dependencies with ONE of the following:
62
+ - `pip3 install -e .` (recommended, supports QLoRA, no gptq/int4 support)
63
+ - `pip3 install -e .[gptq]` (next best if you don't need QLoRA, but want to use gptq)
64
+ - `pip3 install -e .[gptq_triton]`
65
 
66
  ### Dataset
67
 
docker/Dockerfile CHANGED
@@ -2,19 +2,26 @@ ARG BASE_TAG=main-base
2
  FROM winglian/axolotl-base:$BASE_TAG
3
 
4
  ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6+PTX"
 
5
 
6
  RUN apt-get update && \
7
  apt-get install -y vim curl
8
 
9
  WORKDIR /workspace
10
 
11
- # The base image ships with `pydantic==1.8.2` which is not working
12
- RUN python3 -m pip install -U --no-cache-dir pydantic
 
13
 
14
  RUN mkdir axolotl
15
  COPY . axolotl/
 
16
  RUN cd axolotl && \
17
- pip install -e .[int4]
 
 
 
 
18
 
19
  # helper for huggingface-login cli
20
  RUN git config --global credential.helper store
 
2
  FROM winglian/axolotl-base:$BASE_TAG
3
 
4
  ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6+PTX"
5
+ ARG AXOLOTL_EXTRAS=""
6
 
7
  RUN apt-get update && \
8
  apt-get install -y vim curl
9
 
10
  WORKDIR /workspace
11
 
12
+ RUN pip3 install --force-reinstall "peft @ git+https://github.com/huggingface/peft.git@main" \
13
+ "accelerate @ git+https://github.com/huggingface/accelerate.git@main" \
14
+ "transformers @ git+https://github.com/huggingface/transformers.git@main"
15
 
16
  RUN mkdir axolotl
17
  COPY . axolotl/
18
+ # If AXOLOTL_EXTRAS is set, append it in brackets
19
  RUN cd axolotl && \
20
+ if [ "$AXOLOTL_EXTRAS" != "" ] ; then \
21
+ pip install -e .[$AXOLOTL_EXTRAS]; \
22
+ else \
23
+ pip install -e .; \
24
+ fi
25
 
26
  # helper for huggingface-login cli
27
  RUN git config --global credential.helper store
docker/Dockerfile-base CHANGED
@@ -9,7 +9,7 @@ ENV PATH="/root/miniconda3/bin:${PATH}"
9
 
10
  ARG PYTHON_VERSION="3.9"
11
  ARG PYTORCH="2.0.0"
12
- ARG CUDA="cu118"
13
 
14
  ENV PYTHON_VERSION=$PYTHON_VERSION
15
 
@@ -29,7 +29,7 @@ ENV PATH="/root/miniconda3/envs/py${PYTHON_VERSION}/bin:${PATH}"
29
  WORKDIR /workspace
30
 
31
  RUN python3 -m pip install --upgrade pip && pip3 install packaging && \
32
- python3 -m pip install --no-cache-dir -U torch==${PYTORCH} torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/$CUDA
33
 
34
 
35
  FROM base-builder AS flash-attn-builder
@@ -61,12 +61,12 @@ RUN git clone https://github.com/microsoft/DeepSpeed.git && \
61
  FROM base-builder AS bnb-builder
62
 
63
  WORKDIR /workspace
64
- ARG CUDA_VERSION_BNB="118"
65
- ENV CUDA_VERSION_BNB=$CUDA_VERSION_BNB
66
 
67
  RUN git clone https://github.com/TimDettmers/bitsandbytes.git && \
68
  cd bitsandbytes && \
69
- CUDA_VERSION=$CUDA_VERSION_BNB make cuda11x && \
70
  python setup.py bdist_wheel
71
 
72
  FROM base-builder
@@ -93,9 +93,6 @@ COPY --from=flash-attn-builder /workspace/flash-attention/csrc/layer_norm/dist/d
93
  RUN pip3 install wheels/deepspeed-*.whl wheels/flash_attn-*.whl wheels/fused_dense_lib-*.whl wheels/xentropy_cuda_lib-*.whl wheels/rotary_emb-*.whl wheels/dropout_layer_norm-*.whl
94
  RUN cd /workspace/builds/bitsandbytes && python3 setup.py install
95
  RUN git lfs install --skip-repo
96
- RUN pip3 install "peft @ git+https://github.com/huggingface/peft.git@main" \
97
- "accelerate @ git+https://github.com/huggingface/accelerate.git@main" \
98
- "transformers @ git+https://github.com/huggingface/transformers.git@main" && \
99
- pip3 install awscli && \
100
  # The base image ships with `pydantic==1.8.2` which is not working
101
  pip3 install -U --no-cache-dir pydantic
 
9
 
10
  ARG PYTHON_VERSION="3.9"
11
  ARG PYTORCH="2.0.0"
12
+ ARG CUDA="118"
13
 
14
  ENV PYTHON_VERSION=$PYTHON_VERSION
15
 
 
29
  WORKDIR /workspace
30
 
31
  RUN python3 -m pip install --upgrade pip && pip3 install packaging && \
32
+ python3 -m pip install --no-cache-dir -U torch==${PYTORCH} torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu$CUDA
33
 
34
 
35
  FROM base-builder AS flash-attn-builder
 
61
  FROM base-builder AS bnb-builder
62
 
63
  WORKDIR /workspace
64
+ ARG CUDA="118"
65
+ ENV CUDA=$CUDA
66
 
67
  RUN git clone https://github.com/TimDettmers/bitsandbytes.git && \
68
  cd bitsandbytes && \
69
+ CUDA_VERSION=$CUDA make cuda11x && \
70
  python setup.py bdist_wheel
71
 
72
  FROM base-builder
 
93
  RUN pip3 install wheels/deepspeed-*.whl wheels/flash_attn-*.whl wheels/fused_dense_lib-*.whl wheels/xentropy_cuda_lib-*.whl wheels/rotary_emb-*.whl wheels/dropout_layer_norm-*.whl
94
  RUN cd /workspace/builds/bitsandbytes && python3 setup.py install
95
  RUN git lfs install --skip-repo
96
+ RUN pip3 install awscli && \
 
 
 
97
  # The base image ships with `pydantic==1.8.2` which is not working
98
  pip3 install -U --no-cache-dir pydantic
scripts/setup-runpod.sh DELETED
@@ -1,43 +0,0 @@
1
- #!/bin/bash
2
-
3
- export WANDB_MODE=offline
4
- export WANDB_CACHE_DIR=/workspace/data/wandb-cache
5
- mkdir -p $WANDB_CACHE_DIR
6
-
7
- mkdir -p /workspace/data/huggingface-cache/{hub,datasets}
8
- export HF_DATASETS_CACHE="/workspace/data/huggingface-cache/datasets"
9
- export HUGGINGFACE_HUB_CACHE="/workspace/data/huggingface-cache/hub"
10
- export TRANSFORMERS_CACHE="/workspace/data/huggingface-cache/hub"
11
- export NCCL_P2P_DISABLE=1
12
-
13
- nvidia-smi
14
- num_gpus=$(nvidia-smi --query-gpu=name --format=csv,noheader | wc -l)
15
- gpu_indices=$(seq 0 $((num_gpus - 1)) | paste -sd "," -)
16
- export CUDA_VISIBLE_DEVICES=$gpu_indices
17
- echo "CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES"
18
-
19
- apt-get update
20
- apt-get install -y build-essential ninja-build vim git-lfs
21
- git lfs install
22
- pip3 install --force-reinstall https://download.pytorch.org/whl/nightly/cu117/torch-2.0.0.dev20230301%2Bcu117-cp38-cp38-linux_x86_64.whl --index-url https://download.pytorch.org/whl/nightly/cu117
23
- if [ -z "${TORCH_CUDA_ARCH_LIST}" ]; then # only set this if not set yet
24
- # this covers most common GPUs that the installed version of pytorch supports
25
- # python -c "import torch; print(torch.cuda.get_arch_list())"
26
- export TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6+PTX"
27
- fi
28
-
29
- # install flash-attn and deepspeed from pre-built wheels for this specific container b/c these take forever to install
30
- mkdir -p /workspace/wheels
31
- cd /workspace/wheels
32
- curl -L -O https://github.com/OpenAccess-AI-Collective/axolotl/raw/wheels/wheels/deepspeed-0.9.2%2B7ddc3b01-cp38-cp38-linux_x86_64.whl
33
- curl -L -O https://github.com/OpenAccess-AI-Collective/axolotl/raw/wheels/wheels/flash_attn-1.0.4-cp38-cp38-linux_x86_64.whl
34
- pip install deepspeed-0.9.2%2B7ddc3b01-cp38-cp38-linux_x86_64.whl
35
- pip install flash_attn-1.0.4-cp38-cp38-linux_x86_64.whl
36
- pip install "peft @ git+https://github.com/huggingface/peft.git@main" --force-reinstall --no-dependencies
37
-
38
- cd /workspace/
39
- git clone https://github.com/OpenAccess-AI-Collective/axolotl.git
40
- cd axolotl
41
- pip install -e .[int4]
42
- mkdir -p ~/.cache/huggingface/accelerate/
43
- cp configs/accelerate/default_config.yaml ~/.cache/huggingface/accelerate/default_config.yaml
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
setup.py CHANGED
@@ -19,10 +19,10 @@ setup(
19
  packages=find_packages(),
20
  install_requires=install_requires,
21
  extras_require={
22
- "int4": [
23
  "alpaca_lora_4bit @ git+https://github.com/winglian/alpaca_lora_4bit.git@setup_pip",
24
  ],
25
- "int4_triton": [
26
  "alpaca_lora_4bit[triton] @ git+https://github.com/winglian/alpaca_lora_4bit.git@setup_pip",
27
  ],
28
  "extras": [
 
19
  packages=find_packages(),
20
  install_requires=install_requires,
21
  extras_require={
22
+ "gptq": [
23
  "alpaca_lora_4bit @ git+https://github.com/winglian/alpaca_lora_4bit.git@setup_pip",
24
  ],
25
+ "gptq_triton": [
26
  "alpaca_lora_4bit[triton] @ git+https://github.com/winglian/alpaca_lora_4bit.git@setup_pip",
27
  ],
28
  "extras": [