#!/bin/bash # train.sh — reproducible fine-tuning config accelerate launch src/train_pt.py \ --model_name_or_path meta-llama/Llama-3.2-3B-Instruct \ --train_file data/caselaw_chunks.jsonl \ --output_dir outputs/lora_caselaw \ --per_device_train_batch_size 4 \ --gradient_accumulation_steps 4 \ --num_train_epochs 3 \ --learning_rate 2e-4 \ --lora_r 8 \ --lora_alpha 16 \ --lora_dropout 0.05 \ --quantization_bit 4