WilhelmT commited on
Commit
23e9529
·
verified ·
1 Parent(s): 1afce32

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +13 -10
README.md CHANGED
@@ -88,12 +88,13 @@ from embedl.models.vllm import LLM
88
 
89
  model_id = "embedl/Llama-3.2-1B-Instruct-FlashHead"
90
 
91
- sampling = SamplingParams(max_tokens=128, temperature=0.0)
92
- llm = LLM(model=model_id, trust_remote_code=True)
93
-
94
- prompt = "Write a haiku about coffee."
95
- output = llm.generate([prompt], sampling)
96
- print(output[0].outputs[0].text)
 
97
  ```
98
 
99
  ---
@@ -108,11 +109,13 @@ import asyncio
108
  from embedl.models.vllm.demo import run_repl
109
 
110
  model_id = "embedl/Llama-3.2-1B-Instruct-FlashHead"
111
- asyncio.run(
112
- run_repl(
113
- model=model_id
 
 
 
114
  )
115
- )
116
  ```
117
  ---
118
 
 
88
 
89
  model_id = "embedl/Llama-3.2-1B-Instruct-FlashHead"
90
 
91
+ if __name__ == "__main__":
92
+ sampling = SamplingParams(max_tokens=128, temperature=0.0)
93
+ llm = LLM(model=model_id, trust_remote_code=True)
94
+
95
+ prompt = "Write a haiku about coffee."
96
+ output = llm.generate([prompt], sampling)
97
+ print(output[0].outputs[0].text)
98
  ```
99
 
100
  ---
 
109
  from embedl.models.vllm.demo import run_repl
110
 
111
  model_id = "embedl/Llama-3.2-1B-Instruct-FlashHead"
112
+
113
+ if __name__ == "__main__":
114
+ asyncio.run(
115
+ run_repl(
116
+ model=model_id
117
+ )
118
  )
 
119
  ```
120
  ---
121