Skip to content

Commit 09154a0

Browse files
authored
Merge pull request #9 from mdrideout/nov_2025_updates
chore: release 0.62.1 (AI Chat example update)
2 parents e1a480c + a1151e7 commit 09154a0

24 files changed

Lines changed: 173 additions & 131 deletions

File tree

CHANGELOG.md

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,17 @@
22

33
All notable changes to Junjo will be documented in this file.
44

5+
## 0.62.1 - 2026-02-14
6+
7+
This patch release focuses on updates to the AI Chat example.
8+
9+
### Changed
10+
11+
- Restored direct Gemini node usage across `examples/ai_chat` workflows instead of routing through provider abstractions.
12+
- Standardized Gemini model usage to `gemini-3-flash-preview` for text/schema generation and `gemini-2.5-flash-image` for image generation/editing flows.
13+
- Hardened Gemini schema request handling for empty/blocked responses and max-token edge cases.
14+
- Updated `examples/ai_chat/README.md` and backend `.env.example` so Gemini is the default path with Grok as an optional experimentation path.
15+
516
## 0.62.0 - 2026-02-08
617

718
This release is primarily a cleanup and polish pass across existing features.
@@ -39,4 +50,3 @@ and intentionally diverges from Junjo AI Studio version numbering.
3950

4051
- This is mostly non-feature cleanup/polish, but consumers importing removed internal modules will need to migrate imports.
4152
- Example projects changed substantially; treat `examples/ai_chat` as an updated reference implementation rather than a drop-in patch.
42-

examples/ai_chat/README.md

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,15 @@ This is a more complete example of Junjo, showcasing:
1414

1515
## AI provider
1616

17-
This example is currently wired up to use **xAI (Grok)** for text + image generation (via `GrokTool`).
17+
This example is currently wired up to use **Gemini** for all workflow AI calls via `GeminiTool`.
1818

19-
If you'd rather use **Gemini**, you can swap the workflow nodes to use `GeminiTool` instead (both live in `backend/src/app/ai_services/`).
19+
Current node defaults:
20+
21+
- Text generation / structured output: `gemini-3-flash-preview`
22+
- Image generation / image edit: `gemini-2.5-flash-image`
23+
24+
For experimentation, you can switch specific nodes to `GrokTool` (or back) by editing the tool import and model in
25+
the node files under `backend/src/app/workflows/`.
2026

2127
## Run the example
2228

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,13 @@
11
# Create your Junjo AI Studio API Key inside Junjo Studio
22
JUNJO_AI_STUDIO_API_KEY="[junjo_ai_studio_api_key_here]"
33

4-
# xAI is the currently setup provider of chat and image generation responses.
5-
XAI_API_KEY="[xai_api_key_here]"
6-
7-
# Junjo nodes can be configured to use GeminiTool instead of GrokTool
4+
# Current default implementation in workflow nodes uses GeminiTool with:
5+
# - text/schema model: gemini-3-flash-preview
6+
# - image model: gemini-2.5-flash-image
87
GEMINI_API_KEY="[gemini_api_key_here]"
98

10-
# Required by xAI SDK to produce opentelemetry
11-
OTEL_EXPORTER_OTLP_PROTOCOL="grpc"
9+
# Optional for experimentation if you switch any nodes to GrokTool
10+
XAI_API_KEY="[xai_api_key_here]"
11+
12+
# Required by xAI SDK OTEL setup in app/otel_config.py (used when GrokTool is enabled)
13+
OTEL_EXPORTER_OTLP_PROTOCOL="grpc"

examples/ai_chat/backend/src/app/ai_services/gemini/gemini_tool.py

Lines changed: 37 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -207,27 +207,46 @@ async def schema_request(self, schema: type[T]) -> T:
207207
logger.info(f"Making schema request with prompt: {self._prompt}")
208208

209209
logger.debug(f"Making schema request with model: {self._model}, prompt: {self._prompt}, schema: {schema}")
210-
response = await self._client.aio.models.generate_content(
211-
model=self._model,
212-
contents=self._prompt,
213-
config=types.GenerateContentConfig(
214-
max_output_tokens=500,
215-
temperature=2,
216-
response_mime_type="application/json",
217-
response_schema=schema,
218-
safety_settings=self._safety_settings_off(),
219-
),
220-
)
221-
logger.info(f"Raw response: {response}")
222210

223-
schema_response = response.parsed
224-
if schema_response is None:
225-
logger.error(f"Parsed schema response is None: {response}")
211+
for attempt, max_output_tokens in enumerate((1024, 2048), start=1):
212+
config_kwargs: dict[str, Any] = {
213+
"max_output_tokens": max_output_tokens,
214+
"temperature": 0,
215+
"response_mime_type": "application/json",
216+
"response_schema": schema,
217+
"safety_settings": self._safety_settings_off(),
218+
}
219+
if hasattr(types, "ThinkingConfig"):
220+
config_kwargs["thinking_config"] = types.ThinkingConfig(thinking_budget=0)
221+
222+
response = await self._client.aio.models.generate_content(
223+
model=self._model,
224+
contents=self._prompt,
225+
config=types.GenerateContentConfig(**config_kwargs),
226+
)
227+
logger.info(f"Raw response (schema attempt {attempt}): {response}")
228+
229+
schema_response = response.parsed
230+
if schema_response is not None:
231+
return schema.model_validate(schema_response)
232+
233+
raw_text = (response.text or "").strip()
234+
if raw_text:
235+
try:
236+
return schema.model_validate_json(raw_text)
237+
except Exception:
238+
logger.warning(f"Failed JSON fallback parsing on schema attempt {attempt}.")
226239

227-
# Validate again using the provided model
228-
validated = schema.model_validate(schema_response)
240+
finish_reason = None
241+
if response.candidates:
242+
finish_reason = response.candidates[0].finish_reason
243+
244+
logger.warning(
245+
f"Parsed schema response is None on attempt {attempt}. "
246+
f"finish_reason={finish_reason}, max_output_tokens={max_output_tokens}"
247+
)
229248

230-
return validated
249+
raise ValueError("Gemini schema_request failed: parsed response was None after retries.")
231250

232251
async def gemini_image_request(self) -> bytes:
233252
"""

examples/ai_chat/backend/src/app/workflows/create_contact/avatar_subflow/nodes/avatar_inspiration/node.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from junjo.node import Node
22
from loguru import logger
33

4-
from app.ai_services.grok import GrokTool
4+
from app.ai_services.gemini.gemini_tool import GeminiTool
55
from app.workflows.create_contact.avatar_subflow.nodes.avatar_inspiration.prompt import avatar_inspiration_prompt
66
from app.workflows.create_contact.avatar_subflow.store import AvatarSubflowStore
77

@@ -46,11 +46,11 @@ async def service(self, store: AvatarSubflowStore) -> None:
4646
)
4747
logger.info(f"Creating response with prompt: {prompt}")
4848

49-
grok_tool = GrokTool(prompt=prompt, model="grok-4-1-fast-non-reasoning")
50-
grok_result = await grok_tool.text_request()
51-
logger.info(f"Grok result: {grok_result}")
49+
gemini_tool = GeminiTool(prompt=prompt, model="gemini-3-flash-preview")
50+
gemini_result = await gemini_tool.text_request()
51+
logger.info(f"Gemini result: {gemini_result}")
5252

5353
# Update the state with the avatar id
54-
await store.set_inspiration_prompt(grok_result)
54+
await store.set_inspiration_prompt(gemini_result)
5555

5656
return

examples/ai_chat/backend/src/app/workflows/create_contact/avatar_subflow/nodes/create_avatar/node.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
from loguru import logger
33
from nanoid import generate
44

5-
from app.ai_services.grok import GrokTool
5+
from app.ai_services.gemini.gemini_tool import GeminiTool
66
from app.util.save_image_file import save_image_file
77
from app.workflows.create_contact.avatar_subflow.nodes.create_avatar.prompt import create_avatar_prompt
88
from app.workflows.create_contact.avatar_subflow.store import AvatarSubflowStore
@@ -51,9 +51,9 @@ async def service(self, store: AvatarSubflowStore) -> None:
5151
)
5252
logger.info(f"Creating image with prompt: {prompt}")
5353

54-
grok_tool = GrokTool(prompt=prompt, model="grok-imagine-image")
55-
image_bytes = await grok_tool.image_request()
56-
logger.info(f"Grok result image size: {len(image_bytes) / 1024} kb")
54+
gemini_tool = GeminiTool(prompt=prompt, model="gemini-2.5-flash-image")
55+
image_bytes = await gemini_tool.gemini_image_request()
56+
logger.info(f"Gemini result image size: {len(image_bytes) / 1024} kb")
5757

5858
# Create an id for the avatar
5959
avatar_id = generate()

examples/ai_chat/backend/src/app/workflows/create_contact/nodes/create_bio/node.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from junjo.node import Node
22
from loguru import logger
33

4-
from app.ai_services.grok import GrokTool
4+
from app.ai_services.gemini.gemini_tool import GeminiTool
55
from app.workflows.create_contact.nodes.create_bio.prompt import create_bio_prompt
66
from app.workflows.create_contact.store import CreateContactStore
77

@@ -38,11 +38,11 @@ async def service(self, store: CreateContactStore) -> None:
3838
)
3939
logger.info(f"Creating response with prompt: {prompt}")
4040

41-
grok_tool = GrokTool(prompt=prompt, model="grok-4-1-fast-non-reasoning")
42-
grok_result = await grok_tool.text_request()
43-
logger.info(f"Grok result: {grok_result}")
41+
gemini_tool = GeminiTool(prompt=prompt, model="gemini-3-flash-preview")
42+
gemini_result = await gemini_tool.text_request()
43+
logger.info(f"Gemini result: {gemini_result}")
4444

4545
# Update the state with the bio
46-
await store.set_bio(grok_result)
46+
await store.set_bio(gemini_result)
4747

4848
return
Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from app.ai_services.grok import GrokTool
1+
from app.ai_services.gemini.gemini_tool import GeminiTool
22
from app.workflows.create_contact.nodes.create_bio.test.test_prompt import test_evaluate_bio_prompt
33
from app.workflows.create_contact.nodes.create_bio.test.test_schema import TestCreateBioSchema
44

@@ -9,7 +9,7 @@ async def eval_create_bio_node(bio: str) -> TestCreateBioSchema:
99
# Create the request to gemini for avatar inspiration
1010
prompt = test_evaluate_bio_prompt(bio)
1111

12-
grok_tool = GrokTool(prompt=prompt, model="grok-4-1-fast-non-reasoning")
13-
grok_result = await grok_tool.schema_request(schema=TestCreateBioSchema)
12+
gemini_tool = GeminiTool(prompt=prompt, model="gemini-3-flash-preview")
13+
gemini_result = await gemini_tool.schema_request(schema=TestCreateBioSchema)
1414

15-
return grok_result
15+
return gemini_result

examples/ai_chat/backend/src/app/workflows/create_contact/nodes/create_name/node.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from junjo.node import Node
22
from loguru import logger
33

4-
from app.ai_services.grok import GrokTool
4+
from app.ai_services.gemini.gemini_tool import GeminiTool
55
from app.workflows.create_contact.nodes.create_name.prompt import create_name_prompt
66
from app.workflows.create_contact.nodes.create_name.schema import CreateNameSchema
77
from app.workflows.create_contact.store import CreateContactStore
@@ -39,12 +39,12 @@ async def service(self, store: CreateContactStore) -> None:
3939
)
4040
logger.info(f"Creating response with prompt: {prompt}")
4141

42-
grok_tool = GrokTool(prompt=prompt, model="grok-4-1-fast-non-reasoning")
43-
grok_result = await grok_tool.schema_request(CreateNameSchema)
44-
logger.info(f"Grok result: {grok_result}")
42+
gemini_tool = GeminiTool(prompt=prompt, model="gemini-3-flash-preview")
43+
gemini_result = await gemini_tool.schema_request(CreateNameSchema)
44+
logger.info(f"Gemini result: {gemini_result}")
4545

4646
# Update the state with the first name
47-
await store.set_first_name(grok_result.first_name)
48-
await store.set_last_name(grok_result.last_name)
47+
await store.set_first_name(gemini_result.first_name)
48+
await store.set_last_name(gemini_result.last_name)
4949

5050
return

examples/ai_chat/backend/src/app/workflows/create_contact/nodes/select_location/services/get_nearest_city_state.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
from loguru import logger
22

3-
from app.ai_services.grok import GrokTool
3+
from app.ai_services.gemini.gemini_tool import GeminiTool
44
from app.workflows.create_contact.nodes.select_location.schemas import LocCityState
55
from app.workflows.create_contact.nodes.select_location.services.get_nearest_city_state_prompt import (
66
get_nearest_city_state_prompt,
@@ -16,8 +16,8 @@ async def get_nearest_city_state(lat: float, long: float) -> LocCityState:
1616
prompt = get_nearest_city_state_prompt(lat, long)
1717
logger.info(f"Creating response with prompt: {prompt}")
1818

19-
grok_tool = GrokTool(prompt=prompt, model="grok-4-1-fast-non-reasoning")
20-
grok_result = await grok_tool.schema_request(schema=LocCityState)
21-
logger.info(f"Grok result: {grok_result}")
19+
gemini_tool = GeminiTool(prompt=prompt, model="gemini-3-flash-preview")
20+
gemini_result = await gemini_tool.schema_request(schema=LocCityState)
21+
logger.info(f"Gemini result: {gemini_result}")
2222

23-
return grok_result
23+
return gemini_result

0 commit comments

Comments
 (0)