| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| import gradio as gr | |
| import os | |
| huggingface_token = os.getenv("HUGGINGFACE_TOKEN") | |
| if huggingface_token is None: | |
| print("Token HUGGINGFACE_TOKEN.") | |
| exit() | |
| tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B", token=huggingface_token) | |
| model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B", token=huggingface_token) | |
| def translate_code(input_code, prompt=""): | |
| input_text = f"{prompt}\n\n{input_code}" | |
| input_ids = tokenizer.encode(input_text, return_tensors="pt", max_length=1024, truncation=True) | |
| output = model.generate(input_ids, max_length=1024, num_return_sequences=1, temperature=0.7) | |
| translated_code = tokenizer.decode(output[0], skip_special_tokens=True) | |
| return translated_code | |
| gr.Interface( | |
| fn=translate_code, | |
| inputs=["text", "text"], | |
| outputs="text", | |
| title="AI Code Translator", | |
| description="Translate your code using Meta-Llama-3-8B model.", | |
| theme="compact" | |
| ).launch() |