Browse Source

fine tunning

main
Alex 13 hours ago
parent
commit
71e595a966
  1. 2
      Finetunning/finetunning.py
  2. 2
      Finetunning/validation.py
  3. 2
      Traduction/main.py

2
Finetunning/finetunning.py

@ -145,7 +145,7 @@ training_args = TrainingArguments(
per_device_train_batch_size=1,
gradient_accumulation_steps=16,
learning_rate=1e-4,
num_train_epochs=3,
num_train_epochs=2,
max_steps=1000,
fp16=False, # ⚠ disable AMP

2
Finetunning/validation.py

@ -8,7 +8,7 @@ from nltk.translate.bleu_score import corpus_bleu
# Configuration
# ----------------------------
BASE_MODEL = "Qwen/Qwen2.5-7B-Instruct" # base model
LORA_DIR = "./qwen2.5-7b-uk-fr-lora" # fine-tuned LoRA
LORA_DIR = "./qwen2.5-7b-uk-fr-lora-2epoch" # fine-tuned LoRA
VALIDATION_FILE = "validation.jsonl" # small validation subset
MAX_INPUT_LENGTH = 1024
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"

2
Traduction/main.py

@ -11,7 +11,6 @@ from reportlab.pdfbase.ttfonts import TTFont
import os, time
# Configuration
DEBUG = True
PDF_PATH = "Traduction/TaniaBorecMemoir(Ukr).pdf"
OLLAMA_MODEL = "traductionUkrainienVersFrancais:latest"
OLLAMA_URL = "http://localhost:11434/api/generate"
@ -174,7 +173,6 @@ def load_checkpoint():
return json.load(f)
return {"last_processed_index": -1, "results": {}}
# Sauvegarde le checkpoint
# Sauvegarde le checkpoint
def save_checkpoint(last_index, results):
# Trier les clés du dictionnaire results

Loading…
Cancel
Save