{ "architectures": [ "LlamaForCausalLM" ], "model_type": "llama", "embedding_dim": 2, "hidden_size": 100, "output_size": 32768, "num_layers": 2, "activation_function": "tanh", "initializer_range": 0.02 }