Added a method to detect reasoning models and turn reasoning off.

This commit is contained in:
2026-03-20 04:47:33 -04:00
parent 83bd05ff28
commit 1f5d20b558
4 changed files with 168 additions and 69 deletions

View File

@@ -11,15 +11,35 @@ init python:
import re
EMOTION_REGEX = re.compile(r"EMOTION:\w+")
EMOTION_TOKEN_REGEX = re.compile(rf"{EMOTION_REGEX.pattern} ?")
EMOJI_REGEX = re.compile(
"["
"\U0001f1e6-\U0001f1ff" # flags
"\U0001f300-\U0001f5ff" # symbols and pictographs
"\U0001f600-\U0001f64f" # emoticons
"\U0001f680-\U0001f6ff" # transport and map
"\U0001f900-\U0001f9ff" # supplemental symbols and pictographs
"\U0001fa70-\U0001faff" # symbols and pictographs extended
"\U00002702-\U000027b0" # dingbats
"\U0001f3fb-\U0001f3ff" # skin tone modifiers
"\u200d" # zero-width joiner
"\ufe0f" # emoji variation selector
"]+",
flags=re.UNICODE,
)
EMOTIONS = [
'happy',
'sad',
'surprised',
'embarrassed',
'flirty',
'angry',
'thinking',
'confused'
"happy",
"sad",
"surprised",
"embarrassed",
"flirty",
"angry",
"thinking",
"confused",
]
SYSTEM_PROMPT = """
@@ -54,6 +74,12 @@ EMOTION:happy Hey dummy! Sorry to barge in! Ya feel like hanging out?\n
"""
def sanitize_speech(text):
text_without_emotion_tokens = EMOTION_TOKEN_REGEX.sub("", text)
return EMOJI_REGEX.sub("", text_without_emotion_tokens)
def parse_emotion(line):
def _normalize_emotion(em):
# If not a valid emotion, then search for a match in the
@@ -67,14 +93,14 @@ def parse_emotion(line):
return em
try:
e = re.compile(r'EMOTION:\w+')
m = e.match(line)
m = EMOTION_REGEX.match(line)
if m is not None:
emotion = m.group().split(':')[1]
emotion = m.group().split(":")[1]
text = line[m.span()[1]:]
sanitized = sanitize_speech(text)
return _normalize_emotion(emotion), text
return _normalize_emotion(emotion), sanitized
return None, line
@@ -82,34 +108,88 @@ def parse_emotion(line):
return None, str(e)
def sanitize_speech(text):
# This removes all non-ASCII characters (useful for emojis)
return text.encode('ascii', 'ignore').decode('ascii')
def set_model_capabilities() -> bool:
"""
LM Studio throws Bad Request if the reasoning flag is set for a model
that doesn't support it. This method tries to determine if the currently
configured model supports reasoning to signal to the fetch_llm function
disable it.
"""
try:
headers = {"Authorization": f"Bearer {persistent.api_key}"}
data = {
"model": persistent.model,
"input": "Start the conversation.",
"reasoning": "off",
"system_prompt": SYSTEM_PROMPT,
}
renpy.fetch(
f"{persistent.base_url}/api/v1/chat",
headers=headers,
json=data,
result="json",
)
except renpy.FetchError as fe:
# renpy.fetch returned a BadRequest, assume this means LM Studio
# rejected the request because the model doesn't support the
# reasoning setting in chat.
if hasattr(fe, "status_code") and fe.status_code == 400:
persistent.disable_reasoning = False
return True, None
else:
return False, str(fe)
except Exception as e:
# Something else happened.
return False, str(e)
else:
# The fetch worked, so the reasoning setting is available.
persistent.disable_reasoning = True
return True, None
def fetch_llm(message: str) -> str:
"""
Queries the chat with a model endpoint of the configured LM Studio server.
"""
global last_response_id
try:
# Set basic request data.
# Set request data.
headers = {"Authorization": f"Bearer {persistent.api_key}"}
data = {"model": persistent.model,
"input": message,
"system_prompt": SYSTEM_PROMPT}
data = {
"model": persistent.model,
"input": message,
"system_prompt": SYSTEM_PROMPT,
}
if persistent.disable_reasoning:
data["reasoning"] = "off"
# Add the previous response ID if any to continue the conversation.
if last_response_id is not None:
data["previous_response_id"] = last_response_id
response = renpy.fetch(f"{persistent.base_url}/api/v1/chat",
headers=headers,
json=data,
result="json")
# Fetch from LM Studio and parse the response.
response = renpy.fetch(
f"{persistent.base_url}/api/v1/chat",
headers=headers,
json=data,
result="json",
)
last_response_id = response["response_id"]
text = response["output"][0]["content"]
return text.split('\n')
return text.split("\n")
except Exception as e:
return [f'Failed to fetch with error: {e}']
return [f"Failed to fetch with error: {e}"]

View File

@@ -23,7 +23,7 @@ define gui.show_name = True
## The version of the game.
define config.version = "0.2"
define config.version = "0.3"
## Text that is placed on the game's about screen. Place the text between the
@@ -84,17 +84,18 @@ define config.intra_transition = dissolve
## A transition that is used after a game has been loaded.
define config.after_load_transition = None
define config.after_load_transition = dissolve
## Used when entering the main menu after the game has ended.
define config.end_game_transition = None
define config.end_game_transition = dissolve
## A variable to set the transition used when the game starts does not exist.
## Instead, use a with statement after showing the initial scene.
define config.end_splash_transition = dissolve
## Window management ###########################################################
##
@@ -217,3 +218,4 @@ define config.minimum_presplash_time = 2.0
default persistent.base_url = 'http://localhost:1234'
default persistent.api_key = ''
default persistent.model = 'gemma-3-4b-it'
default persistent.disable_reasoning = False

View File

@@ -1,39 +1,56 @@
define a = Character("Anita", color = "#aaaa00", callback = speaker("a"), image = "anita")
label start:
play music ["zeropage_ambiphonic303chilloutmix.mp3",
"zeropage_ambientdance.mp3",
"zeropage_ambiose.mp3" ] fadeout 0.5 fadein 0.5
scene bg room
show anita happy with dissolve
python:
response = fetch_llm('Start the conversation.')[0]
sanitized = sanitize_speech(response)
emotion, line = parse_emotion(sanitized)
a "[line]"
while True:
python:
message = renpy.input(prompt = "What do you say to her?")
response = fetch_llm(message)
i = 0
while i < len(response):
python:
r = response[i].strip()
s = sanitize_speech(r)
if s != '':
$ emotion, line = parse_emotion(s)
if emotion is not None:
show expression f'anita {emotion}'
a "[line]"
$ i += 1
return
define a = Character("Anita", color = "#aaaa00", callback = speaker("a"), image = "anita")
label start:
stop music fadeout 1.0
scene bg room
with Dissolve(2.0)
$ success, error = set_model_capabilities()
if not success:
call failure(error) from _call_failure
return
play music ["zeropage_ambiphonic303chilloutmix.mp3",
"zeropage_ambientdance.mp3",
"zeropage_ambiose.mp3" ] fadeout 0.5 fadein 0.5
show anita happy with dissolve
python:
response = fetch_llm('Start the conversation.')[0]
emotion, line = parse_emotion(response)
a "[line]"
while True:
python:
message = renpy.input(prompt = "What do you say to her?")
response = fetch_llm(message)
i = 0
while i < len(response):
python:
r = response[i].strip()
if r != '':
$ emotion, line = parse_emotion(r)
if emotion is not None:
show expression f'anita {emotion}'
a "[line]"
$ i += 1
return
label failure(error):
"""Alas! Figuring out the capabilities of the configured model failed with the following error.
[error]
Unfortunately the program cannot continue, returning to the main menu."""
return

View File

@@ -1,9 +1,9 @@
{
"build_update": false,
"packages": [
"win",
"linux",
"mac"
"mac",
"win"
],
"add_from": true,
"force_recompile": true,