fix: stbalelm
Build / Flatpak (x86_64) (push) Failing after 8m11s Details

This commit is contained in:
0xMRTT 2023-05-24 22:00:28 +02:00
parent f24f3724bf
commit 1b5a91232a
Signed by: 0xMRTT
GPG Key ID: 910B287304120902
5 changed files with 16 additions and 6 deletions

View File

@ -2,6 +2,11 @@
# chmod +x flatpak-pip-generator
# ./flatpak-pip-generator --requirements-file=requirements.txt --output pypi-dependencies
# use https://johannesjh.github.io/req2flatpak/main/cli.html
# 1. pip-compile -o r.txt requirements.txt
# 2. ./req2flatpak.py --requirements-file requirements.txt --target-platforms 310-x86_64 310-aarch64 > build-aux/pypi-dependencies.json
Pygments
baichat_py
googlebardpy

View File

@ -15,7 +15,7 @@ from .hfdialogpt import HuggingFaceDialoGPTLargeProvider
from .hfgpt2large import HuggingFaceGPT2LargeProvider
from .hfgpt2xl import HuggingFaceGPT2XLProvider
from .stablelm import StableLMProvider
# from .starcoder import StarCoderProvider
from .starcoder import StarCoderProvider
PROVIDERS = {
"alpacalora": AlpacaLoRAProvider,

View File

@ -19,12 +19,13 @@ class BaseGradioProvider(BavarderProvider):
self.client = Client(self.url)
def ask(self, prompt):
print("ASKING " + "="*100)
try:
result = client.predict(
response = self.client.predict(
prompt, # str representing string value in 'Chat Message Box' Textbox component
"null", # str representing filepath to JSON file in 'parameter_3' Chatbot component
fn_index=0
)
print(response)
except Exception as e:
print(e)
self.win.banner.props.title = str(e)

View File

@ -1,6 +1,6 @@
from .gradio import BaseGradioProvider
from .huggingface import BaseHFProvider
class StableLMProvider(BaseGradioProvider):
class StableLMProvider(BaseHFProvider):
name = "StableLM"
slug = "stablelm"
url = "https://stabilityai-stablelm-tuned-alpha-chat.hf.space/"
model = "stabilityai/stablelm-tuned-alpha-3b"

View File

@ -13,14 +13,18 @@ class BaseTransformerProvider(BavarderProvider):
slug = None
checkpoint = None
device = "cpu"
is_setup = False
def __init__(self, win, app, *args, **kwargs):
super().__init__(win, app, *args, **kwargs)
def setup(self):
self.tokenizer = AutoTokenizer.from_pretrained(self.checkpoint)
self.model = AutoModelForCausalLM.from_pretrained(self.checkpoint).to(self.device)
def ask(self, prompt):
if not self.is_setup:
self.setup()
try:
inputs = tokenizer.encode(prompt, return_tensors="pt").to(self.device)
outputs = model.generate(inputs)