mirror of
https://github.com/openai/whisper.git
synced 2025-11-24 14:35:57 +00:00
Wrap model to HPU + update Dockerfile packages
This commit is contained in:
parent
50adb7654b
commit
f6ccd01567
@ -42,5 +42,8 @@ COPY requirements.txt /root/whisper/requirements.txt
|
||||
|
||||
# Install Python packages from requirements.txt
|
||||
RUN pip install --upgrade pip \
|
||||
&& pip install optimum[habana]==1.13.1 \
|
||||
&& pip install -r requirements.txt
|
||||
&& pip install optimum-habana==1.14.1 \
|
||||
&& pip install transformers==4.33.0 \
|
||||
&& pip install huggingface-hub==0.26.2 \
|
||||
&& pip install tiktoken==0.8.0 \
|
||||
&& pip install numba==0.60.0
|
||||
|
||||
@ -165,10 +165,14 @@ def load_model(
|
||||
|
||||
if device == "hpu":
|
||||
from habana_frameworks.torch.utils.library_loader import load_habana_module
|
||||
from habana_frameworks.torch.hpu import wrap_in_hpu_graph
|
||||
|
||||
load_habana_module()
|
||||
if torch.hpu.is_available():
|
||||
return wrap_in_hpu_graph(model)
|
||||
from habana_frameworks.torch.hpu import wrap_in_hpu_graph
|
||||
|
||||
model = model.eval().to(device)
|
||||
|
||||
model = wrap_in_hpu_graph(model)
|
||||
|
||||
return model
|
||||
return model.to(device)
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user