File size: 2,498 Bytes
12001a9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import torch


def test_lora_layer_replacement(lit_llama):
    from lit_llama.lora import lora, CausalSelfAttention as LoRACausalSelfAttention
    from lit_llama.model import LLaMA, LLaMAConfig
    
    config = LLaMAConfig()
    config.n_layer = 2
    config.n_head = 4
    config.n_embd = 8
    config.block_size = 8
    config.vocab_size = 8

    with lora(r=8, alpha=8, dropout=0.1):
        model = LLaMA(config)

    assert isinstance(model.transformer.h[0].attn, LoRACausalSelfAttention)
    assert isinstance(model.transformer.h[1].attn, LoRACausalSelfAttention)


def test_lora_merge_unmerge(lit_llama):
    from lit_llama.lora import lora, mark_only_lora_as_trainable
    from lit_llama.model import LLaMA, LLaMAConfig
    
    config = LLaMAConfig(n_layer=1, n_head=2, n_embd=8, block_size=8, vocab_size=8)

    with lora(r=8, alpha=8, dropout=0.1):
        model = LLaMA(config)
    
    initial_weight = model.transformer.h[0].attn.c_attn.weight.clone()
    model.train()
    assert torch.equal(model.transformer.h[0].attn.c_attn.weight, initial_weight)

    # perform an update to the LoRA weights
    mark_only_lora_as_trainable(model)
    optimizer = torch.optim.SGD(model.parameters(), lr=1.0)
    model(torch.randint(0, 8, size=(2, 4), dtype=torch.int64)).sum().backward()
    optimizer.step()
    optimizer.zero_grad()
    # the weight remains unchanged (only lora A and B change)
    assert torch.equal(model.transformer.h[0].attn.c_attn.weight, initial_weight)

    # 'merge' and then 'unmerge' should neutralize themselves
    weight_before = model.transformer.h[0].attn.c_attn.weight.clone()
    model.eval()
    assert not torch.equal(model.transformer.h[0].attn.c_attn.weight, weight_before)
    model.train()
    # note: numerically, `W + (A * B) - (A * B) == W` does not hold exactly
    assert torch.allclose(model.transformer.h[0].attn.c_attn.weight, weight_before)

    # calling eval/train multiple times in a row should not merge/unmerge multiple times
    model.eval()
    assert model.transformer.h[0].attn.c_attn.merged
    weight_after = model.transformer.h[0].attn.c_attn.weight.clone()
    model.eval()
    model.eval()
    assert torch.equal(model.transformer.h[0].attn.c_attn.weight, weight_after)
    model.train()
    assert not model.transformer.h[0].attn.c_attn.merged
    weight_after = model.transformer.h[0].attn.c_attn.weight.clone()
    model.train()
    model.train()
    assert torch.equal(model.transformer.h[0].attn.c_attn.weight, weight_after)