Developed by: Mahmoud Ibrahim
How to use :
! pip install transformers bitsandbytes
from transformers import AutoTokenizer, AutoModelForCausalLM
from IPython.display import Markdown
import textwrap
# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("MahmoudIbrahim/Mistral_12b_Arabic")
model = AutoModelForCausalLM.from_pretrained("MahmoudIbrahim/Mistral_12b_Arabic",load_in_4bit =True)
alpaca_prompt = """ููู
ุง ููู ุชุนููู
ุงุช ุชุตู ู
ูู
ุฉุ ุฅูู ุฌุงูุจ ู
ุฏุฎู ูููุฑ ุณูุงูุงู ุฅุถุงููุงู. ุงูุชุจ ุงุณุชุฌุงุจุฉ ุชููู
ู ุงูุทูุจ ุจุดูู ู
ูุงุณุจ.
### ุงูุชุนููู
ุงุช:
{}
### ุงูุงุณุชุฌุงุจุฉ:
{}"""
# Format the prompt with instruction and an empty output placeholder
formatted_prompt = alpaca_prompt.format(
"ููู ูู
ูู ููุญููู
ุฉ ุงูู
ุตุฑูุฉ ูุงูู
ุฌุชู
ุน ููู ุฃู ูุนุฒุฒูุง ู
ู ูุฏุฑุฉ ุงูุจูุงุฏ ุนูู ุชุญููู ุงูุชูู
ูุฉ ุงูู
ุณุชุฏุงู
ุฉุ " , # instruction
"" # Leave output blank for generation
)
# Tokenize the formatted string directly
input_ids = tokenizer.encode(formatted_prompt, return_tensors="pt") # Use 'cuda' if you want to run on GPU
def to_markdown(text):
text = text.replace('โข','*')
return Markdown(textwrap.indent(text, '>', predicate=lambda _: True))
# Generate text
output = model.generate(
input_ids,
max_length=128, # Adjust max length as needed
num_return_sequences=1, # Number of generated responses
no_repeat_ngram_size=2, # Prevent repetition
top_k=50, # Filter to top-k tokens
top_p=0.9, # Use nucleus sampling
temperature=0.7 , # Control creativity level
)
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
to_markdown(generated_text)
The model response :
- Downloads last month
- 18
Inference Providers
NEW
This model is not currently available via any of the supported third-party Inference Providers, and
the model is not deployed on the HF Inference API.
Model tree for MahmoudIbrahim/Mistral_Nemo_Arabic
Base model
unsloth/Mistral-Nemo-Base-2407-bnb-4bit