Skip to content

Commit 7cb49e4

Browse files
CopilotVinciGit00
andcommitted
Fix whitespace formatting errors (W291, W292, W293)
Co-authored-by: VinciGit00 <88108002+VinciGit00@users.noreply.github.com>
1 parent 4553412 commit 7cb49e4

File tree

7 files changed

+120
-121
lines changed

7 files changed

+120
-121
lines changed

scrapegraphai/graphs/markdownify_graph.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,4 +80,4 @@ def execute(
8080
- Dictionary with the markdown result in the "markdown" key
8181
- List of execution logs
8282
"""
83-
return super().execute(initial_state)
83+
return super().execute(initial_state)

scrapegraphai/helpers/models_tokens.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@
3232
"o1-preview": 128000,
3333
"o1-mini": 128000,
3434
"o1": 128000,
35-
"gpt-4.5-preview": 128000,
3635
"o3-mini": 200000,
3736
},
3837
"azure_openai": {

scrapegraphai/models/xai.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,4 +19,4 @@ def __init__(self, **llm_config):
1919
llm_config["openai_api_key"] = llm_config.pop("api_key")
2020
llm_config["openai_api_base"] = "https://api.x.ai/v1"
2121

22-
super().__init__(**llm_config)
22+
super().__init__(**llm_config)

scrapegraphai/nodes/markdownify_node.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,4 +64,4 @@ def execute(self, state: dict) -> dict:
6464
# Update state with markdown content
6565
state.update({self.output[0]: markdown_content})
6666

67-
return state
67+
return state

scrapegraphai/utils/code_error_analysis.py

Lines changed: 25 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -80,10 +80,10 @@ def validate_validation_errors(cls, v):
8080
def get_optimal_analysis_template(error_type: str) -> str:
8181
"""
8282
Returns the optimal prompt template based on the error type.
83-
83+
8484
Args:
8585
error_type (str): Type of error to analyze.
86-
86+
8787
Returns:
8888
str: The prompt template text.
8989
"""
@@ -106,10 +106,10 @@ def syntax_focused_analysis(state: Dict[str, Any], llm_model) -> str:
106106
107107
Returns:
108108
str: The result of the syntax error analysis.
109-
109+
110110
Raises:
111111
InvalidStateError: If state is missing required keys.
112-
112+
113113
Example:
114114
>>> state = {
115115
'generated_code': 'print("Hello World")',
@@ -123,24 +123,24 @@ def syntax_focused_analysis(state: Dict[str, Any], llm_model) -> str:
123123
generated_code=state.get("generated_code", ""),
124124
errors=state.get("errors", {})
125125
)
126-
126+
127127
# Check if syntax errors exist
128128
if "syntax" not in validated_state.errors:
129129
raise InvalidStateError("No syntax errors found in state dictionary")
130-
130+
131131
# Create prompt template and chain
132132
prompt = PromptTemplate(
133133
template=get_optimal_analysis_template("syntax"),
134134
input_variables=["generated_code", "errors"]
135135
)
136136
chain = prompt | llm_model | StrOutputParser()
137-
137+
138138
# Execute chain with validated state
139139
return chain.invoke({
140140
"generated_code": validated_state.generated_code,
141141
"errors": validated_state.errors["syntax"]
142142
})
143-
143+
144144
except KeyError as e:
145145
raise InvalidStateError(f"Missing required key in state dictionary: {e}")
146146
except Exception as e:
@@ -157,10 +157,10 @@ def execution_focused_analysis(state: Dict[str, Any], llm_model) -> str:
157157
158158
Returns:
159159
str: The result of the execution error analysis.
160-
160+
161161
Raises:
162162
InvalidStateError: If state is missing required keys.
163-
163+
164164
Example:
165165
>>> state = {
166166
'generated_code': 'print(x)',
@@ -178,22 +178,22 @@ def execution_focused_analysis(state: Dict[str, Any], llm_model) -> str:
178178
html_code=state.get("html_code", ""),
179179
html_analysis=state.get("html_analysis", "")
180180
)
181-
181+
182182
# Create prompt template and chain
183183
prompt = PromptTemplate(
184184
template=get_optimal_analysis_template("execution"),
185185
input_variables=["generated_code", "errors", "html_code", "html_analysis"],
186186
)
187187
chain = prompt | llm_model | StrOutputParser()
188-
188+
189189
# Execute chain with validated state
190190
return chain.invoke({
191191
"generated_code": validated_state.generated_code,
192192
"errors": validated_state.errors["execution"],
193193
"html_code": validated_state.html_code,
194194
"html_analysis": validated_state.html_analysis,
195195
})
196-
196+
197197
except KeyError as e:
198198
raise InvalidStateError(f"Missing required key in state dictionary: {e}")
199199
except Exception as e:
@@ -211,10 +211,10 @@ def validation_focused_analysis(state: Dict[str, Any], llm_model) -> str:
211211
212212
Returns:
213213
str: The result of the validation error analysis.
214-
214+
215215
Raises:
216216
InvalidStateError: If state is missing required keys.
217-
217+
218218
Example:
219219
>>> state = {
220220
'generated_code': 'return {"name": "John"}',
@@ -232,22 +232,22 @@ def validation_focused_analysis(state: Dict[str, Any], llm_model) -> str:
232232
json_schema=state.get("json_schema", {}),
233233
execution_result=state.get("execution_result", {})
234234
)
235-
235+
236236
# Create prompt template and chain
237237
prompt = PromptTemplate(
238238
template=get_optimal_analysis_template("validation"),
239239
input_variables=["generated_code", "errors", "json_schema", "execution_result"],
240240
)
241241
chain = prompt | llm_model | StrOutputParser()
242-
242+
243243
# Execute chain with validated state
244244
return chain.invoke({
245245
"generated_code": validated_state.generated_code,
246246
"errors": validated_state.errors["validation"],
247247
"json_schema": validated_state.json_schema,
248248
"execution_result": validated_state.execution_result,
249249
})
250-
250+
251251
except KeyError as e:
252252
raise InvalidStateError(f"Missing required key in state dictionary: {e}")
253253
except Exception as e:
@@ -268,10 +268,10 @@ def semantic_focused_analysis(
268268
269269
Returns:
270270
str: The result of the semantic error analysis.
271-
271+
272272
Raises:
273273
InvalidStateError: If state or comparison_result is missing required keys.
274-
274+
275275
Example:
276276
>>> state = {
277277
'generated_code': 'def add(a, b): return a + b'
@@ -288,28 +288,28 @@ def semantic_focused_analysis(
288288
generated_code=state.get("generated_code", ""),
289289
errors=state.get("errors", {})
290290
)
291-
291+
292292
# Validate comparison_result
293293
if "differences" not in comparison_result:
294294
raise InvalidStateError("comparison_result missing 'differences' key")
295295
if "explanation" not in comparison_result:
296296
raise InvalidStateError("comparison_result missing 'explanation' key")
297-
297+
298298
# Create prompt template and chain
299299
prompt = PromptTemplate(
300300
template=get_optimal_analysis_template("semantic"),
301301
input_variables=["generated_code", "differences", "explanation"],
302302
)
303303
chain = prompt | llm_model | StrOutputParser()
304-
304+
305305
# Execute chain with validated inputs
306306
return chain.invoke({
307307
"generated_code": validated_state.generated_code,
308308
"differences": json.dumps(comparison_result["differences"], indent=2),
309309
"explanation": comparison_result["explanation"],
310310
})
311-
311+
312312
except KeyError as e:
313313
raise InvalidStateError(f"Missing required key: {e}")
314314
except Exception as e:
315-
raise AnalysisError(f"Semantic analysis failed: {str(e)}")
315+
raise AnalysisError(f"Semantic analysis failed: {str(e)}")

0 commit comments

Comments
 (0)