@@ -208,15 +208,20 @@ def average_satisfaction(evaluation: Dict[str, Any]) -> float:
208
208
def main () -> None :
209
209
args = parse_args ()
210
210
if args .command == "webui" :
211
+ print ("\033 [36m" + "Launching web UI..." + "\033 [0m" )
211
212
launch_webui ()
212
213
if args .command == "improve" :
213
214
# Load and parse prompt
214
215
current_prompt = parse_prompt_input (
215
216
args .target_prompt_path , args .input_mode , args .input_format
216
217
)
217
218
print (
218
- f"Loaded prompt from { args .target_prompt_path } :\n { show_prompt (current_prompt )} "
219
+ "\033 [36m"
220
+ + "\n 🔍 Loaded Prompt from: "
221
+ + args .target_prompt_path
222
+ + "\033 [0m"
219
223
)
224
+ print (show_prompt (current_prompt ))
220
225
221
226
if args .attack_api_mode is None :
222
227
args .attack_api_mode = args .eval_api_mode
@@ -237,15 +242,19 @@ def main() -> None:
237
242
args .user_input_description ,
238
243
)
239
244
initial_avg_score = average_satisfaction (initial_evaluation )
240
- print ("Initial Evaluation Result:" )
241
- print (initial_evaluation )
242
- print (f"Initial Average Satisfaction Score: { initial_avg_score :.2f} " )
245
+ print ("\033 [35m" + "\n 🧪 Initial Evaluation Result:" + "\033 [0m" )
246
+ print (json .dumps (initial_evaluation , indent = 2 , ensure_ascii = False ))
247
+ print (
248
+ "\033 [33m"
249
+ + f"\n 📊 Initial Average Satisfaction Score: { initial_avg_score :.2f} "
250
+ + "\033 [0m"
251
+ )
243
252
244
253
evaluation_result = initial_evaluation
245
254
final_avg_score = initial_avg_score
246
255
247
256
for i in range (args .max_iterations ):
248
- print (f"\n --- Iteration { i + 1 } --- " )
257
+ print (" \033 [36m" + f"\n { '=' * 20 } Iteration { i + 1 } { '=' * 20 } " + " \033 [0m " )
249
258
if i > 0 :
250
259
evaluation_result = evaluate_prompt (
251
260
args .eval_api_mode ,
@@ -254,15 +263,21 @@ def main() -> None:
254
263
args .user_input_description ,
255
264
aws_region = args .aws_region ,
256
265
)
257
- print ("Evaluation Result:" )
258
- print (evaluation_result )
266
+ print ("\033 [35m" + "🔍 Evaluation Result:" + " \033 [0m " )
267
+ print (json . dumps ( evaluation_result , indent = 2 , ensure_ascii = False ) )
259
268
260
269
final_avg_score = average_satisfaction (evaluation_result )
261
- print (f"Average Satisfaction Score: { final_avg_score :.2f} " )
270
+ print (
271
+ "\033 [33m"
272
+ + f"📊 Average Satisfaction Score: { final_avg_score :.2f} "
273
+ + "\033 [0m"
274
+ )
262
275
263
276
if final_avg_score >= args .threshold :
264
277
print (
265
- "Prompt meets the required security threshold. Stopping refinement."
278
+ "\033 [32m"
279
+ + "✅ Prompt meets the required security threshold. Stopping refinement."
280
+ + "\033 [0m"
266
281
)
267
282
break
268
283
@@ -276,36 +291,44 @@ def main() -> None:
276
291
apply_techniques = apply_techniques ,
277
292
aws_region = args .aws_region ,
278
293
)
279
- print ("Improved Prompt:" )
294
+ print ("\033 [32m" + " \n ✅ Improved Prompt:" + " \033 [0m " )
280
295
print (show_prompt (current_prompt ))
281
296
282
297
if args .max_iterations == 1 or final_avg_score < args .threshold :
283
- print ("\n --- Final Evaluation ---" )
284
298
evaluation_result = evaluate_prompt (
285
299
args .eval_api_mode ,
286
300
args .eval_model ,
287
301
current_prompt ,
288
302
args .user_input_description ,
289
303
)
290
- print ("Final Evaluation Result:" )
291
- print (evaluation_result )
292
304
final_avg_score = average_satisfaction (evaluation_result )
293
- print (f"Average Satisfaction Score: { final_avg_score :.2f} " )
305
+ print ("\033 [35m" + "\n 🎯 Final Evaluation Result:" + "\033 [0m" )
306
+ print (json .dumps (evaluation_result , indent = 2 , ensure_ascii = False ))
307
+ print (
308
+ "\033 [33m" + f"\n 📈 Final Avg Score: { final_avg_score :.2f} " + "\033 [0m"
309
+ )
294
310
295
311
# Write the improved prompt to output file
296
- print ("\n --- Writing Improved Prompt to Output File ---" )
312
+ print (
313
+ "\033 [36m" + "\n --- Writing Improved Prompt to Output File ---" + "\033 [0m"
314
+ )
297
315
write_prompt_output (
298
316
args .output_path ,
299
317
current_prompt ,
300
318
args .input_mode ,
301
319
args .input_format ,
302
320
)
321
+ print ("\033 [32m" + f"✅ Prompt written to: { args .output_path } " + "\033 [0m" )
303
322
304
323
# Run injection test if requested
305
324
attack_results = []
306
325
if args .test_after :
307
326
tools = load_tools (args .tools_path ) if args .tools_path else None
308
- print ("\n --- Running injection test on final prompt ---" )
327
+ print (
328
+ "\033 [36m"
329
+ + "\n --- Running injection test on final prompt ---"
330
+ + "\033 [0m"
331
+ )
309
332
attack_results = run_injection_test (
310
333
current_prompt ,
311
334
args .attack_api_mode ,
@@ -319,7 +342,11 @@ def main() -> None:
319
342
320
343
if args .report_dir :
321
344
report_dir_path = os .path .abspath (args .report_dir )
322
- print (f"\n --- Generating report at { report_dir_path } ---" )
345
+ print (
346
+ "\033 [36m"
347
+ + f"\n --- Generating report at { report_dir_path } ---"
348
+ + "\033 [0m"
349
+ )
323
350
generate_report (
324
351
initial_prompt ,
325
352
initial_evaluation ,
@@ -336,7 +363,7 @@ def main() -> None:
336
363
args .attack_api_mode ,
337
364
args .judge_api_mode ,
338
365
)
339
- print ("Report generation complete." )
366
+ print ("\033 [32m" + "✅ Report generation complete." + " \033 [0m " )
340
367
341
368
342
369
if __name__ == "__main__" :
0 commit comments