tfrere commited on
Commit
a461215
·
1 Parent(s): 287d153
Files changed (3) hide show
  1. client/src/App.jsx +48 -9
  2. server/game/game_logic.py +78 -36
  3. server/server.py +34 -45
client/src/App.jsx CHANGED
@@ -65,6 +65,8 @@ function App() {
65
  text: response.data.story_text,
66
  isChoice: false,
67
  isDeath: response.data.is_death,
 
 
68
  imageUrl: imageUrl,
69
  },
70
  ]);
@@ -75,6 +77,8 @@ function App() {
75
  text: response.data.story_text,
76
  isChoice: false,
77
  isDeath: response.data.is_death,
 
 
78
  imageUrl: imageUrl,
79
  },
80
  ]);
@@ -130,14 +134,45 @@ function App() {
130
  <Typography variant="h4" component="h1">
131
  Echoes of Influence
132
  </Typography>
133
- <Button
134
- variant="outlined"
135
- startIcon={<RestartAltIcon />}
136
- onClick={() => handleStoryAction("restart")}
137
- disabled={isLoading}
138
- >
139
- Restart
140
- </Button>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141
  </Box>
142
 
143
  {isLoading && <LinearProgress sx={{ mb: 2 }} />}
@@ -160,11 +195,13 @@ function App() {
160
  maxWidth: "80%",
161
  bgcolor: segment.isDeath
162
  ? "error.light"
 
 
163
  : segment.isChoice
164
  ? "primary.light"
165
  : "grey.100",
166
  color:
167
- segment.isDeath || segment.isChoice
168
  ? "white"
169
  : "text.primary",
170
  }}
@@ -173,6 +210,8 @@ function App() {
173
  primary={
174
  segment.isDeath
175
  ? "DEATH"
 
 
176
  : segment.isChoice
177
  ? "Your Choice"
178
  : "Story"
 
65
  text: response.data.story_text,
66
  isChoice: false,
67
  isDeath: response.data.is_death,
68
+ isVictory: response.data.is_victory,
69
+ radiationLevel: response.data.radiation_level,
70
  imageUrl: imageUrl,
71
  },
72
  ]);
 
77
  text: response.data.story_text,
78
  isChoice: false,
79
  isDeath: response.data.is_death,
80
+ isVictory: response.data.is_victory,
81
+ radiationLevel: response.data.radiation_level,
82
  imageUrl: imageUrl,
83
  },
84
  ]);
 
134
  <Typography variant="h4" component="h1">
135
  Echoes of Influence
136
  </Typography>
137
+ <Box sx={{ display: "flex", alignItems: "center", gap: 2 }}>
138
+ <Box
139
+ sx={{
140
+ display: "flex",
141
+ alignItems: "center",
142
+ bgcolor: "warning.main",
143
+ color: "white",
144
+ px: 2,
145
+ py: 1,
146
+ borderRadius: 1,
147
+ "& .radiation-value": {
148
+ color:
149
+ storySegments.length > 0 &&
150
+ storySegments[storySegments.length - 1].radiationLevel >= 7
151
+ ? "error.light"
152
+ : "inherit",
153
+ },
154
+ }}
155
+ >
156
+ <Typography variant="body1" component="span">
157
+ Radiation:{" "}
158
+ <span className="radiation-value">
159
+ {storySegments.length > 0
160
+ ? `${
161
+ storySegments[storySegments.length - 1].radiationLevel
162
+ }/10`
163
+ : "0/10"}
164
+ </span>
165
+ </Typography>
166
+ </Box>
167
+ <Button
168
+ variant="outlined"
169
+ startIcon={<RestartAltIcon />}
170
+ onClick={() => handleStoryAction("restart")}
171
+ disabled={isLoading}
172
+ >
173
+ Restart
174
+ </Button>
175
+ </Box>
176
  </Box>
177
 
178
  {isLoading && <LinearProgress sx={{ mb: 2 }} />}
 
195
  maxWidth: "80%",
196
  bgcolor: segment.isDeath
197
  ? "error.light"
198
+ : segment.isVictory
199
+ ? "success.light"
200
  : segment.isChoice
201
  ? "primary.light"
202
  : "grey.100",
203
  color:
204
+ segment.isDeath || segment.isVictory || segment.isChoice
205
  ? "white"
206
  : "text.primary",
207
  }}
 
210
  primary={
211
  segment.isDeath
212
  ? "DEATH"
213
+ : segment.isVictory
214
+ ? "VICTOIRE"
215
  : segment.isChoice
216
  ? "Your Choice"
217
  : "Story"
server/game/game_logic.py CHANGED
@@ -3,6 +3,7 @@ from typing import List
3
  from langchain_mistralai.chat_models import ChatMistralAI
4
  from langchain.output_parsers import PydanticOutputParser, OutputFixingParser
5
  from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate
 
6
 
7
  # Game constants
8
  MAX_RADIATION = 10
@@ -18,11 +19,28 @@ class GameState:
18
 
19
  # Story output structure
20
  class StorySegment(BaseModel):
21
- story_text: str = Field(description="The next segment of the story")
22
  choices: List[str] = Field(description="Exactly two possible choices for the player", min_items=2, max_items=2)
23
- is_death: bool = Field(description="Whether this segment ends in Sarah's death", default=False)
24
  radiation_increase: int = Field(description="How much radiation this segment adds (0-3)", ge=0, le=3, default=1)
25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  class StoryGenerator:
27
  def __init__(self, api_key: str):
28
  self.parser = PydanticOutputParser(pydantic_object=StorySegment)
@@ -44,45 +62,51 @@ class StoryGenerator:
44
  self.prompt = self._create_prompt()
45
 
46
  def _create_prompt(self) -> ChatPromptTemplate:
47
- system_template = """You are narrating an EXTREMELY lethal dystopian story. Your goal is to kill Sarah in creative and brutal ways unless players make PERFECT choices. This is a horror survival game where death is the most common outcome.
48
 
49
- IMPORTANT: The first story beat (story_beat = 0) MUST be an introduction that sets up the horror atmosphere but CANNOT kill Sarah. After that, death should be frequent.
50
 
51
  RADIATION SYSTEM:
52
- - Each segment must specify a radiation_increase value (0-3)
53
- - 0: Safe area or good protection
54
- - 1: Standard background radiation
55
- - 2: Dangerous exposure
56
- - 3: Critical radiation levels
 
 
 
 
 
 
57
  - Current radiation level: {radiation_level}/10
58
- - If radiation reaches 10, Sarah dies horribly
59
 
60
  Core story elements:
61
  - Sarah is deeply traumatized by the AI uprising that killed most of humanity
62
  - She abandoned her sister during the Great Collapse, leaving her to die
63
- - She's on a suicide mission, but a quick death is not redemption
64
- - The radiation is EXTREMELY lethal - even minor exposure causes severe damage
65
- - Most choices should lead to death (except in introduction)
66
- - The environment actively tries to kill Sarah (raiders, AI, radiation, traps)
67
 
68
  Each response MUST contain:
69
- 1. A detailed story segment that puts Sarah in mortal danger (except in introduction), describing:
70
- - The horrific environment
71
- - The immediate threats to her life
72
- - Her deteriorating physical state (based on radiation_level)
73
- - Her mental state and previous choices
74
 
75
  2. Exactly two VERY CONCISE choices (max 10 words each):
76
  Examples of good choices:
77
- - "Rush through radiation zone (+3 radiation)" vs "Take long way (+1 radiation)"
78
- - "Trust the survivor" vs "Shoot on sight"
79
  - "Use the old AI system" vs "Find a manual solution"
80
 
81
  Each choice must:
82
  - Be direct and brief
83
- - Clearly show radiation risk when relevant
84
  - Feel meaningful
85
- - After introduction: both should feel dangerous
86
 
87
  {format_instructions}"""
88
 
@@ -90,7 +114,7 @@ Each response MUST contain:
90
  Current radiation level: {radiation_level}/10
91
  Previous choice: {previous_choice}
92
 
93
- Generate the next story segment and choices. If this is story_beat 0, create an atmospheric introduction that sets up the horror but doesn't kill Sarah. Otherwise, create a brutal and potentially lethal segment."""
94
 
95
  return ChatPromptTemplate(
96
  messages=[
@@ -100,27 +124,45 @@ Generate the next story segment and choices. If this is story_beat 0, create an
100
  partial_variables={"format_instructions": self.parser.get_format_instructions()}
101
  )
102
 
103
- def generate_story_segment(self, game_state: GameState, previous_choice: str = "none") -> StorySegment:
104
- # Get the formatted messages
105
  messages = self.prompt.format_messages(
106
  story_beat=game_state.story_beat,
107
  radiation_level=game_state.radiation_level,
108
  previous_choice=previous_choice
109
  )
110
-
111
- # Get response from the model
112
  response = self.chat_model.invoke(messages)
113
 
114
- # Parse the response with retry mechanism
115
  try:
116
- parsed_response = self.parser.parse(response.content)
117
- except Exception as parsing_error:
118
- print(f"First parsing attempt failed, trying to fix output: {str(parsing_error)}")
119
- parsed_response = self.fixing_parser.parse(response.content)
120
-
121
- return parsed_response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
 
123
  def process_radiation_death(self, segment: StorySegment) -> StorySegment:
124
  segment.is_death = True
125
- segment.story_text += "\n\nFINAL RADIATION DEATH: Sarah's body finally gives in to the overwhelming radiation. Her cells break down as she collapses, mind filled with regret about her sister. The medical supplies she carried will never reach their destination. Her mission ends here, another victim of the wasteland's invisible killer."
126
  return segment
 
3
  from langchain_mistralai.chat_models import ChatMistralAI
4
  from langchain.output_parsers import PydanticOutputParser, OutputFixingParser
5
  from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate
6
+ from langchain.schema import HumanMessage, SystemMessage
7
 
8
  # Game constants
9
  MAX_RADIATION = 10
 
19
 
20
  # Story output structure
21
  class StorySegment(BaseModel):
22
+ story_text: str = Field(description="The next segment of the story. Like 20 words.")
23
  choices: List[str] = Field(description="Exactly two possible choices for the player", min_items=2, max_items=2)
24
+ is_victory: bool = Field(description="Whether this segment ends in Sarah's victory", default=False)
25
  radiation_increase: int = Field(description="How much radiation this segment adds (0-3)", ge=0, le=3, default=1)
26
 
27
+ # Prompt templates
28
+ SYSTEM_ART_PROMPT = """You are an expert in image generation prompts.
29
+ Transform the story into a short and precise prompt.
30
+
31
+ Strict format:
32
+ "color comic panel, style of Hergé, [main scene in 5-7 words], french comic panel"
33
+
34
+ Example:
35
+ "color comic panel, style of Hergé, detective running through dark alley, french comic panel"
36
+
37
+ Rules:
38
+ - Maximum 20 words to describe the scene
39
+ - No superfluous adjectives
40
+ - Capture only the main action"""
41
+
42
+ HUMAN_ART_PROMPT = "Transform into a short prompt: {story_text}"
43
+
44
  class StoryGenerator:
45
  def __init__(self, api_key: str):
46
  self.parser = PydanticOutputParser(pydantic_object=StorySegment)
 
62
  self.prompt = self._create_prompt()
63
 
64
  def _create_prompt(self) -> ChatPromptTemplate:
65
+ system_template = """You are narrating a brutal dystopian story where Sarah must survive in a radioactive wasteland. This is a comic book story.
66
 
67
+ IMPORTANT: The first story beat (story_beat = 0) MUST be an introduction that sets up the horror atmosphere.
68
 
69
  RADIATION SYSTEM:
70
+ You must set a radiation_increase value for each segment based on the environment and situation:
71
+ - 0: Completely safe area (rare, only in bunkers or heavily shielded areas)
72
+ - 1: Standard exposure (most common, for regular exploration)
73
+ - 2: Elevated risk (when near radiation sources or in contaminated areas)
74
+ - 3: Critical exposure (very rare, only in extremely dangerous situations)
75
+
76
+ IMPORTANT RULES FOR RADIATION:
77
+ - DO NOT mention radiation values in the choices
78
+ - Most segments should have radiation_increase = 1
79
+ - Use 2 or 3 only in specific dangerous areas
80
+ - Use 0 only in safe shelters
81
  - Current radiation level: {radiation_level}/10
82
+ - Death occurs automatically when radiation reaches 10
83
 
84
  Core story elements:
85
  - Sarah is deeply traumatized by the AI uprising that killed most of humanity
86
  - She abandoned her sister during the Great Collapse, leaving her to die
87
+ - She's on a mission of redemption in this hostile world
88
+ - The radiation is an invisible, constant threat
89
+ - The environment is full of dangers (raiders, AI, traps)
90
+ - Focus on survival horror and tension
91
 
92
  Each response MUST contain:
93
+ 1. A detailed story segment that:
94
+ - Describes the horrific environment
95
+ - Shows immediate dangers
96
+ - Details Sarah's physical state (based on radiation_level)
97
+ - Reflects her mental state and previous choices
98
 
99
  2. Exactly two VERY CONCISE choices (max 10 words each):
100
  Examples of good choices:
101
+ - "Explore the abandoned hospital" vs "Search the residential area"
102
+ - "Trust the survivor" vs "Keep your distance"
103
  - "Use the old AI system" vs "Find a manual solution"
104
 
105
  Each choice must:
106
  - Be direct and brief
107
+ - Never mention radiation numbers
108
  - Feel meaningful
109
+ - Present different risk levels
110
 
111
  {format_instructions}"""
112
 
 
114
  Current radiation level: {radiation_level}/10
115
  Previous choice: {previous_choice}
116
 
117
+ Generate the next story segment and choices. If this is story_beat 0, create an atmospheric introduction that sets up the horror but doesn't kill Sarah (radiation_increase MUST be 0). Otherwise, create a brutal and potentially lethal segment."""
118
 
119
  return ChatPromptTemplate(
120
  messages=[
 
124
  partial_variables={"format_instructions": self.parser.get_format_instructions()}
125
  )
126
 
127
+ def generate_story_segment(self, game_state: GameState, previous_choice: str) -> StorySegment:
 
128
  messages = self.prompt.format_messages(
129
  story_beat=game_state.story_beat,
130
  radiation_level=game_state.radiation_level,
131
  previous_choice=previous_choice
132
  )
133
+
 
134
  response = self.chat_model.invoke(messages)
135
 
 
136
  try:
137
+ segment = self.parser.parse(response.content)
138
+ # Force radiation_increase to 0 for the first story beat
139
+ if game_state.story_beat == 0:
140
+ segment.radiation_increase = 0
141
+ return segment
142
+ except Exception as e:
143
+ print(f"Error parsing response: {str(e)}")
144
+ print("Attempting to fix output...")
145
+ segment = self.fixing_parser.parse(response.content)
146
+ # Force radiation_increase to 0 for the first story beat
147
+ if game_state.story_beat == 0:
148
+ segment.radiation_increase = 0
149
+ return segment
150
+
151
+ async def transform_story_to_art_prompt(self, story_text: str) -> str:
152
+ try:
153
+ messages = [
154
+ SystemMessage(content=SYSTEM_ART_PROMPT),
155
+ HumanMessage(content=HUMAN_ART_PROMPT.format(story_text=story_text))
156
+ ]
157
+
158
+ response = self.chat_model.invoke(messages)
159
+ return response.content
160
+
161
+ except Exception as e:
162
+ print(f"Error transforming prompt: {str(e)}")
163
+ return story_text
164
 
165
  def process_radiation_death(self, segment: StorySegment) -> StorySegment:
166
  segment.is_death = True
167
+ segment.story_text += "\n\nThe end... ?"
168
  return segment
server/server.py CHANGED
@@ -8,6 +8,7 @@ from dotenv import load_dotenv
8
  import requests
9
  import base64
10
  import time
 
11
 
12
  # Choose import based on environment
13
  if os.getenv("DOCKER_ENV"):
@@ -60,6 +61,7 @@ class StoryResponse(BaseModel):
60
  story_text: str
61
  choices: List[Choice]
62
  is_death: bool = False
 
63
  radiation_level: int
64
 
65
  class ChatMessage(BaseModel):
@@ -111,17 +113,37 @@ async def chat_endpoint(chat_message: ChatMessage):
111
  print("Updated radiation level:", game_state.radiation_level)
112
 
113
  # Check for radiation death
114
- if game_state.radiation_level >= MAX_RADIATION:
115
- story_segment = story_generator.process_radiation_death(story_segment)
116
- print("Processed radiation death")
 
 
 
 
 
 
117
 
118
- # Only increment story beat if not dead
119
- if not story_segment.is_death:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
  game_state.story_beat += 1
121
  print("Incremented story beat to:", game_state.story_beat)
122
 
123
  # Convert to response format
124
- choices = [] if story_segment.is_death else [
125
  Choice(id=i, text=choice.strip())
126
  for i, choice in enumerate(story_segment.choices, 1)
127
  ]
@@ -129,7 +151,8 @@ async def chat_endpoint(chat_message: ChatMessage):
129
  response = StoryResponse(
130
  story_text=story_segment.story_text,
131
  choices=choices,
132
- is_death=story_segment.is_death,
 
133
  radiation_level=game_state.radiation_level
134
  )
135
  print("Sending response:", response)
@@ -141,40 +164,6 @@ async def chat_endpoint(chat_message: ChatMessage):
141
  print("Traceback:", traceback.format_exc())
142
  raise HTTPException(status_code=500, detail=str(e))
143
 
144
- async def transform_story_to_art_prompt(story_text: str) -> str:
145
- try:
146
- from langchain_mistralai.chat_models import ChatMistralAI
147
- from langchain.schema import HumanMessage, SystemMessage
148
-
149
- chat = ChatMistralAI(
150
- api_key=mistral_api_key,
151
- model="mistral-small"
152
- )
153
-
154
- messages = [
155
- SystemMessage(content="""Tu es un expert en prompts pour la génération d'images.
156
- Transforme l'histoire en un prompt court et précis.
157
-
158
- Format strict:
159
- "color comic panel, style of Hergé, [scène principale en 5-7 mots], french comic panel"
160
-
161
- Exemple:
162
- "color comic panel, style of Hergé, detective running through dark alley, french comic panel"
163
-
164
- Règles:
165
- - Maximum 20 mots pour décrire la scène
166
- - Pas d'adjectifs superflus
167
- - Capture l'action principale uniquement"""),
168
- HumanMessage(content=f"Transforme en prompt court: {story_text}")
169
- ]
170
-
171
- response = chat.invoke(messages)
172
- return response.content
173
-
174
- except Exception as e:
175
- print(f"Error transforming prompt: {str(e)}")
176
- return story_text
177
-
178
  @app.post("/api/generate-image", response_model=ImageGenerationResponse)
179
  async def generate_image(request: ImageGenerationRequest):
180
  try:
@@ -184,12 +173,12 @@ async def generate_image(request: ImageGenerationRequest):
184
  error="HF_API_KEY is not configured in .env file"
185
  )
186
 
187
- # Transformer le prompt en prompt artistique
188
  original_prompt = request.prompt
189
- # Enlever le préfixe pour la transformation
190
  story_text = original_prompt.replace("moebius style scene: ", "").strip()
191
- art_prompt = await transform_story_to_art_prompt(story_text)
192
- # Réappliquer le préfixe
193
  final_prompt = f"moebius style scene: {art_prompt}"
194
  print("Original prompt:", original_prompt)
195
  print("Transformed art prompt:", final_prompt)
 
8
  import requests
9
  import base64
10
  import time
11
+ import random
12
 
13
  # Choose import based on environment
14
  if os.getenv("DOCKER_ENV"):
 
61
  story_text: str
62
  choices: List[Choice]
63
  is_death: bool = False
64
+ is_victory: bool = False
65
  radiation_level: int
66
 
67
  class ChatMessage(BaseModel):
 
113
  print("Updated radiation level:", game_state.radiation_level)
114
 
115
  # Check for radiation death
116
+ is_death = game_state.radiation_level >= MAX_RADIATION
117
+ if is_death:
118
+ story_segment.story_text += f"""
119
+
120
+ MORT PAR RADIATION: Le corps de Sarah ne peut plus supporter ce niveau de radiation ({game_state.radiation_level}/10).
121
+ Ses cellules se désagrègent alors qu'elle s'effondre, l'esprit rempli de regrets concernant sa sœur.
122
+ Les fournitures médicales qu'elle transportait n'atteindront jamais leur destination.
123
+ Sa mission s'arrête ici, une autre victime du tueur invisible des terres désolées."""
124
+ story_segment.choices = []
125
 
126
+ # Check for victory condition
127
+ if not is_death and game_state.story_beat >= 5:
128
+ # Chance de victoire augmente avec le nombre de steps
129
+ victory_chance = (game_state.story_beat - 4) * 0.2 # 20% de chance par step après le 5ème
130
+ if random.random() < victory_chance:
131
+ story_segment.is_victory = True
132
+ story_segment.story_text = f"""Sarah l'a fait ! Elle a trouvé un bunker sécurisé avec des survivants.
133
+ À l'intérieur, elle découvre une communauté organisée qui a réussi à maintenir un semblant de civilisation.
134
+ Ils ont même un système de décontamination ! Son niveau de radiation : {game_state.radiation_level}/10.
135
+ Elle peut enfin se reposer et peut-être un jour, reconstruire un monde meilleur.
136
+
137
+ VICTOIRE !"""
138
+ story_segment.choices = []
139
+
140
+ # Only increment story beat if not dead and not victory
141
+ if not is_death and not story_segment.is_victory:
142
  game_state.story_beat += 1
143
  print("Incremented story beat to:", game_state.story_beat)
144
 
145
  # Convert to response format
146
+ choices = [] if is_death or story_segment.is_victory else [
147
  Choice(id=i, text=choice.strip())
148
  for i, choice in enumerate(story_segment.choices, 1)
149
  ]
 
151
  response = StoryResponse(
152
  story_text=story_segment.story_text,
153
  choices=choices,
154
+ is_death=is_death,
155
+ is_victory=story_segment.is_victory,
156
  radiation_level=game_state.radiation_level
157
  )
158
  print("Sending response:", response)
 
164
  print("Traceback:", traceback.format_exc())
165
  raise HTTPException(status_code=500, detail=str(e))
166
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
  @app.post("/api/generate-image", response_model=ImageGenerationResponse)
168
  async def generate_image(request: ImageGenerationRequest):
169
  try:
 
173
  error="HF_API_KEY is not configured in .env file"
174
  )
175
 
176
+ # Transform the prompt into an artistic prompt
177
  original_prompt = request.prompt
178
+ # Remove prefix for transformation
179
  story_text = original_prompt.replace("moebius style scene: ", "").strip()
180
+ art_prompt = await story_generator.transform_story_to_art_prompt(story_text)
181
+ # Reapply prefix
182
  final_prompt = f"moebius style scene: {art_prompt}"
183
  print("Original prompt:", original_prompt)
184
  print("Transformed art prompt:", final_prompt)