@@ -528,9 +528,9 @@ def load_pending_evaluations(self, ctx, skip_update=False):
528
528
}
529
529
)
530
530
if update_store :
531
- pending_evaluations_in_store [dataset_id ] = (
532
- updated_pending_evaluations_for_dataset_in_stored
533
- )
531
+ pending_evaluations_in_store [
532
+ dataset_id
533
+ ] = updated_pending_evaluations_for_dataset_in_stored
534
534
store .set ("pending_evaluations" , pending_evaluations_in_store )
535
535
ctx .panel .set_data ("pending_evaluations" , pending_evaluations )
536
536
@@ -782,52 +782,6 @@ def load_view(self, ctx):
782
782
if view is not None :
783
783
ctx .ops .set_view (view )
784
784
785
- def save_scenario (self , ctx ):
786
- # callback
787
- pass
788
-
789
- def extract_save_scenario_params (self , ctx ):
790
- print ("extract_save_scenario_params" , ctx .params )
791
- params = ctx .params .get ("scenario" , {}).get ("subset" , {})
792
- scenario_name = params .get ("scenario_name" , None )
793
-
794
- if scenario_name is None :
795
- raise ValueError ("No scenario name provided" )
796
-
797
- scenario_type = params .get ("scenario_type" , None )
798
- if scenario_type is None :
799
- raise ValueError ("No scenario type provided" )
800
-
801
- # TODO: if name exists, show alert and halt
802
-
803
- # custom code handling
804
- custom_code_expression = None
805
- if scenario_type == "custom_code" :
806
- custom_code_expression = (
807
- params .get ("custom_code_stack" , {})
808
- .get ("body_stack" , {})
809
- .get ("custom_code" , None )
810
- )
811
-
812
- if custom_code_expression is None :
813
- raise ValueError ("No custom code expression provided" )
814
-
815
- # TODO: others handling
816
- sample_field_values = params .get ("sample_field_values" , None )
817
- label_attribute_values = params .get ("label_attribute_values" , None )
818
- saved_views = params .get ("saved_views_values" , None )
819
-
820
- return {
821
- "name" : scenario_name ,
822
- "scenario_type" : scenario_type ,
823
- "saved_views" : saved_views ,
824
- "sample_field" : params .get ("sample_field" , None ),
825
- "scenario_field" : params .get ("scenario_field" , None ),
826
- "custom_code_expression" : custom_code_expression ,
827
- "sample_field_values" : sample_field_values ,
828
- "label_attribute_values" : label_attribute_values ,
829
- }
830
-
831
785
def load_compare_evaluation_results (self , ctx ):
832
786
base_model_key = (
833
787
ctx .params .get ("panel_state" , {}).get ("view" , {}).get ("key" , None )
@@ -851,140 +805,6 @@ def load_compare_evaluation_results(self, ctx):
851
805
eval_b_results ,
852
806
)
853
807
854
- def on_save_scenario (self , ctx ):
855
- label_attrs_classes = None
856
-
857
- params = self .extract_save_scenario_params (ctx )
858
- (
859
- eval_a_key ,
860
- eval_a_results ,
861
- eval_b_key ,
862
- eval_b_results ,
863
- ) = self .load_compare_evaluation_results (ctx )
864
-
865
- graph_data = {
866
- eval_a_key : {
867
- "performance" : {},
868
- "confusion_matrix" : {},
869
- },
870
- eval_b_key : {
871
- "performance" : {},
872
- "confusion_matrix" : {},
873
- },
874
- }
875
-
876
- scenario_type = params .get ("scenario_type" , None )
877
- sample_field_values = params .get ("sample_field_values" , None )
878
-
879
- # I. Custom code
880
- if scenario_type == "custom_code" :
881
- custom_code_expression = params .get ("custom_code_expression" , None )
882
- if custom_code_expression is None :
883
- raise ValueError ("No code expression provided" )
884
-
885
- # TODO:
886
- # subset_def = dict(type="code", code=custom_code_expression)
887
-
888
- # print("subset_def", subset_def)
889
- # with eval_a_results.use_subset(subset_def):
890
- # graph_data[eval_a_key]["performance"][
891
- # "custom"
892
- # ] = eval_a_results.metrics()
893
-
894
- # with eval_b_results.use_subset(subset_def):
895
- # graph_data[eval_b_key]["performance"][
896
- # "custom"
897
- # ] = eval_b_results.metrics()
898
-
899
- # II. Saved views
900
- elif scenario_type == "saved_views" :
901
- saved_views = params .get ("saved_views" , None )
902
-
903
- for saved_view in saved_views :
904
- subset_def = dict (
905
- type = "view" ,
906
- view = saved_view ,
907
- )
908
-
909
- # Graph I data - Model Performance
910
- with eval_a_results .use_subset (subset_def ):
911
- graph_data [eval_a_key ]["performance" ][
912
- saved_view
913
- ] = eval_a_results .metrics ()
914
- with eval_b_results .use_subset (subset_def ):
915
- graph_data [eval_b_key ]["performance" ][
916
- saved_view
917
- ] = eval_b_results .metrics ()
918
-
919
- # III. Sample Fields
920
- elif scenario_type == "sample_field" :
921
- scenario_field = params .get ("scenario_field" , None )
922
- if scenario_field is None :
923
- raise ValueError ("No scenario field provided" )
924
-
925
- # case sample fields: tags
926
- if scenario_field == "tags" :
927
- field_paths = list (sample_field_values .keys ())
928
-
929
- for field_path in field_paths :
930
- if sample_field_values [field_path ] is True :
931
- subset_def = dict (
932
- type = "field" ,
933
- field = scenario_field ,
934
- value = field_path ,
935
- )
936
-
937
- # Graph I data - Model Performance
938
- with eval_a_results .use_subset (subset_def ):
939
- graph_data [eval_a_key ]["performance" ][
940
- field_path
941
- ] = eval_a_results .metrics ()
942
- with eval_b_results .use_subset (subset_def ):
943
- graph_data [eval_b_key ]["performance" ][
944
- field_path
945
- ] = eval_b_results .metrics ()
946
-
947
- # TODO: case continuous fields: confidence
948
-
949
- # TODO: case discrete fields: < 100 categories
950
-
951
- # IV. Label Attributes
952
- elif scenario_type == "label_attribute" :
953
- label_attrs_map = params .get ("label_attribute_values" , {})
954
- label_attrs_classes = list (label_attrs_map .keys ()) or []
955
-
956
- # Graph I data - Model Performance
957
- for label in label_attrs_classes :
958
- subset_def = dict (
959
- type = "attribute" ,
960
- field = "label" , # TODO
961
- value = label ,
962
- )
963
- with eval_a_results .use_subset (subset_def ):
964
- graph_data [eval_a_key ]["performance" ][
965
- label
966
- ] = eval_a_results .metrics ()
967
- with eval_b_results .use_subset (subset_def ):
968
- graph_data [eval_b_key ]["performance" ][
969
- label
970
- ] = eval_b_results .metrics ()
971
-
972
- # Graph II data - Prediction Statistics
973
- # TODO
974
-
975
- # Graph III data - Confusion Metrics
976
- graph_data [eval_a_key ]["confusion_matrix" ] = (
977
- eval_a_results .confusion_matrix (classes = label_attrs_classes )
978
- )
979
- graph_data [eval_b_key ]["confusion_matrix" ] = (
980
- eval_b_results .confusion_matrix (classes = label_attrs_classes )
981
- )
982
-
983
- print ("graph_data" , graph_data )
984
- # TODO: save the subset
985
-
986
- # ctx.panel.set_state("evaluations_scenario", graph_data)
987
-
988
808
def get_subset_def_data (self , ctx , info , results , subset_def ):
989
809
with results .use_subset (subset_def ):
990
810
metrics = results .metrics ()
@@ -1028,7 +848,6 @@ def load_scenario(self, ctx):
1028
848
if scenario_id :
1029
849
scenario = self .get_scenario (ctx , eval_id , scenario_id )
1030
850
scenario_data = self .get_scenario_data (ctx , scenario )
1031
- print ("scenario_data" , scenario_data )
1032
851
ctx .panel .set_data (f"scenario_{ scenario_id } " , scenario_data )
1033
852
1034
853
def render (self , ctx ):
@@ -1047,7 +866,6 @@ def render(self, ctx):
1047
866
load_view = self .load_view ,
1048
867
rename_evaluation = self .rename_evaluation ,
1049
868
delete_evaluation = self .delete_evaluation ,
1050
- on_save_scenario = self .on_save_scenario ,
1051
869
load_scenario = self .load_scenario ,
1052
870
),
1053
871
)
0 commit comments