| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152 |
- import torch
- from datasets import load_dataset
- from transformers import (
- AutoTokenizer,
- AutoModelForCausalLM,
- TrainingArguments,
- )
- from peft import (
- LoraConfig,
- get_peft_model,
- prepare_model_for_kbit_training,
- )
- from trl import SFTTrainer
- import os
- os.environ["TORCHDYNAMO_DISABLE"] = "1"
-
- # ----------------------------
- # Model configuration
- # ----------------------------
- MODEL_NAME = "Qwen/Qwen2.5-14B-Instruct"
-
- print("=== Starting fine-tuning script ===")
-
- print(f"{80 * '_'}\n[1/7] Loading tokenizer...")
- tokenizer = AutoTokenizer.from_pretrained(
- MODEL_NAME,
- trust_remote_code=True
- )
-
- # Ensure padding token is defined
- tokenizer.pad_token = tokenizer.eos_token
- tokenizer.model_max_length = 1024
-
- print("Tokenizer loaded and configured.")
-
- print(f"{80 * '_'}\n[2/7] Loading model in 4-bit mode (QLoRA)...")
- model = AutoModelForCausalLM.from_pretrained(
- MODEL_NAME,
- load_in_4bit=True,
- device_map="auto",
- torch_dtype=torch.float16, # OK for weights
- trust_remote_code=True,
- )
- print("Model loaded.")
-
- print(f"{80 * '_'}\n[3/7] Preparing model for k-bit training...")
- model = prepare_model_for_kbit_training(model)
- print("Model prepared for k-bit training.")
-
- # ----------------------------
- # LoRA configuration
- # ----------------------------
- print(f"{80 * '_'}\n[4/7] Configuring LoRA adapters...")
- lora_config = LoraConfig(
- r=16,
- lora_alpha=32,
- lora_dropout=0.05,
- bias="none",
- task_type="CAUSAL_LM",
- target_modules=[
- "q_proj",
- "k_proj",
- "v_proj",
- "o_proj",
- "gate_proj",
- "up_proj",
- "down_proj",
- ],
- )
-
- model = get_peft_model(model, lora_config)
- model.print_trainable_parameters()
- print("LoRA adapters attached to the model.")
-
- # ----------------------------
- # Dataset loading
- # ----------------------------
- print(f"{80 * '_'}\n[5/7] Loading dataset from JSON file...")
- dataset = load_dataset(
- "json",
- data_files="traductions.json"
- )
- print(f"Dataset loaded with {len(dataset['train'])} samples.")
-
- print("Formatting dataset for Ukrainian → French translation...")
-
- def format_prompt(example):
- prompt = (
- "Translate the following Ukrainian text into French.\n\n"
- f"Ukrainian: {example['text']}\n"
- f"French: {example['translation']}"
- )
- return {"text": prompt}
-
- dataset = dataset.map(format_prompt, remove_columns=dataset["train"].column_names)
- print("Dataset formatting completed.")
-
- # ----------------------------
- # Training arguments
- # ----------------------------
- print(f"{80 * '_'}\n[6/7] Initializing training arguments...")
- training_args = TrainingArguments(
- output_dir="./qwen-uk-fr-lora",
- per_device_train_batch_size=1,
- gradient_accumulation_steps=8,
- learning_rate=2e-4,
- num_train_epochs=3,
-
- fp16=False,
- bf16=False,
-
- logging_steps=10,
- save_steps=500,
- save_total_limit=2,
-
- # Use 32-bit optimizer
- optim="paged_adamw_32bit",
-
- report_to="none",
- )
-
-
- print("Training arguments ready.")
-
- # ----------------------------
- # Trainer
- # ----------------------------
- print("Initializing SFTTrainer...")
- trainer = SFTTrainer(
- model=model,
- train_dataset=dataset["train"],
- processing_class=tokenizer,
- args=training_args,
- )
- print("Trainer initialized.")
-
- # ----------------------------
- # Train
- # ----------------------------
- print(f"{80 * '_'}\n[7/7] Starting training...")
- trainer.train()
- print("Training completed successfully.")
-
- # ----------------------------
- # Save LoRA adapter
- # ----------------------------
- print("Saving LoRA adapter and tokenizer...")
- trainer.model.save_pretrained("./qwen-uk-fr-lora")
- tokenizer.save_pretrained("./qwen-uk-fr-lora")
-
- print("=== Fine-tuning finished ===")
- print("LoRA adapter saved in ./qwen-uk-fr-lora")
|