Skip to content

Commit c3a2f69

Browse files
manivoxel51brimoor
authored andcommitted
cleanup
1 parent cee5f3e commit c3a2f69

File tree

4 files changed

+4
-196
lines changed

4 files changed

+4
-196
lines changed

app/packages/core/src/plugins/SchemaIO/components/ButtonView.tsx

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@ import Button from "./Button";
1010
import TooltipProvider from "./TooltipProvider";
1111

1212
export default function ButtonView(props: ViewPropsType) {
13-
console.log("props", props.path, props);
1413
const { schema, path, onClick } = props;
1514
const { view = {} } = schema;
1615
const {

app/packages/core/src/plugins/SchemaIO/components/NativeModelEvaluationView/Evaluation.tsx

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,6 @@ import {
5858
getNumericDifference,
5959
useTriggerEvent,
6060
} from "./utils";
61-
import { usePanelId } from "@fiftyone/spaces";
6261

6362
const KEY_COLOR = "#ff6d04";
6463
const COMPARE_KEY_COLOR = "#03a9f4";
@@ -67,7 +66,6 @@ const DEFAULT_BAR_CONFIG = { sortBy: "default" };
6766
const NONE_CLASS = "(none)";
6867

6968
export default function Evaluation(props: EvaluationProps) {
70-
console.log("Evaluation props", props);
7169
const {
7270
name,
7371
id,
@@ -82,7 +80,6 @@ export default function Evaluation(props: EvaluationProps) {
8280
notes = {},
8381
loadView,
8482
onRename,
85-
onSaveScenario,
8683
loadScenario,
8784
} = props;
8885
const theme = useTheme();
@@ -91,7 +88,6 @@ export default function Evaluation(props: EvaluationProps) {
9188
const [editNoteState, setEditNoteState] = useState({ open: false, note: "" });
9289
const [classPerformanceConfig, setClassPerformanceConfig] =
9390
useState<PLOT_CONFIG_TYPE>({});
94-
const panelId = usePanelId();
9591
const [classPerformanceDialogConfig, setClassPerformanceDialogConfig] =
9692
useState<PLOT_CONFIG_DIALOG_TYPE>(DEFAULT_BAR_CONFIG);
9793
const [confusionMatrixConfig, setConfusionMatrixConfig] =
@@ -1708,7 +1704,6 @@ type EvaluationProps = {
17081704
notes: Record<string, string>;
17091705
loadView: (type: string, params: any) => void;
17101706
onRename: (oldName: string, newName: string) => void;
1711-
onSaveScenario: (scenario: any) => void;
17121707
};
17131708

17141709
function ColorSquare(props: { color: string }) {

app/packages/core/src/plugins/SchemaIO/components/NativeModelEvaluationView/index.tsx

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@ export default function NativeModelEvaluationView(props) {
3232
load_view,
3333
rename_evaluation,
3434
delete_evaluation,
35-
on_save_scenario,
3635
load_scenario,
3736
} = view;
3837
const {
@@ -66,7 +65,7 @@ export default function NativeModelEvaluationView(props) {
6665
const [showCTA, setShowCTA] = React.useState(false);
6766
const onEvaluate = useCallback(() => {
6867
if (constants.IS_APP_MODE_FIFTYONE) {
69-
// setShowCTA(true);
68+
setShowCTA(true);
7069
} else {
7170
triggerEvent(on_evaluate_model);
7271
}
@@ -166,9 +165,6 @@ export default function NativeModelEvaluationView(props) {
166165
triggerEvent(load_view, { type, options });
167166
}}
168167
onRename={onRename}
169-
onSaveScenario={(scenario: any) => {
170-
triggerEvent(on_save_scenario, { scenario });
171-
}}
172168
/>
173169
)}
174170
{page === "overview" &&

plugins/panels/model_evaluation/__init__.py

Lines changed: 3 additions & 185 deletions
Original file line numberDiff line numberDiff line change
@@ -528,9 +528,9 @@ def load_pending_evaluations(self, ctx, skip_update=False):
528528
}
529529
)
530530
if update_store:
531-
pending_evaluations_in_store[dataset_id] = (
532-
updated_pending_evaluations_for_dataset_in_stored
533-
)
531+
pending_evaluations_in_store[
532+
dataset_id
533+
] = updated_pending_evaluations_for_dataset_in_stored
534534
store.set("pending_evaluations", pending_evaluations_in_store)
535535
ctx.panel.set_data("pending_evaluations", pending_evaluations)
536536

@@ -782,52 +782,6 @@ def load_view(self, ctx):
782782
if view is not None:
783783
ctx.ops.set_view(view)
784784

785-
def save_scenario(self, ctx):
786-
# callback
787-
pass
788-
789-
def extract_save_scenario_params(self, ctx):
790-
print("extract_save_scenario_params", ctx.params)
791-
params = ctx.params.get("scenario", {}).get("subset", {})
792-
scenario_name = params.get("scenario_name", None)
793-
794-
if scenario_name is None:
795-
raise ValueError("No scenario name provided")
796-
797-
scenario_type = params.get("scenario_type", None)
798-
if scenario_type is None:
799-
raise ValueError("No scenario type provided")
800-
801-
# TODO: if name exists, show alert and halt
802-
803-
# custom code handling
804-
custom_code_expression = None
805-
if scenario_type == "custom_code":
806-
custom_code_expression = (
807-
params.get("custom_code_stack", {})
808-
.get("body_stack", {})
809-
.get("custom_code", None)
810-
)
811-
812-
if custom_code_expression is None:
813-
raise ValueError("No custom code expression provided")
814-
815-
# TODO: others handling
816-
sample_field_values = params.get("sample_field_values", None)
817-
label_attribute_values = params.get("label_attribute_values", None)
818-
saved_views = params.get("saved_views_values", None)
819-
820-
return {
821-
"name": scenario_name,
822-
"scenario_type": scenario_type,
823-
"saved_views": saved_views,
824-
"sample_field": params.get("sample_field", None),
825-
"scenario_field": params.get("scenario_field", None),
826-
"custom_code_expression": custom_code_expression,
827-
"sample_field_values": sample_field_values,
828-
"label_attribute_values": label_attribute_values,
829-
}
830-
831785
def load_compare_evaluation_results(self, ctx):
832786
base_model_key = (
833787
ctx.params.get("panel_state", {}).get("view", {}).get("key", None)
@@ -851,140 +805,6 @@ def load_compare_evaluation_results(self, ctx):
851805
eval_b_results,
852806
)
853807

854-
def on_save_scenario(self, ctx):
855-
label_attrs_classes = None
856-
857-
params = self.extract_save_scenario_params(ctx)
858-
(
859-
eval_a_key,
860-
eval_a_results,
861-
eval_b_key,
862-
eval_b_results,
863-
) = self.load_compare_evaluation_results(ctx)
864-
865-
graph_data = {
866-
eval_a_key: {
867-
"performance": {},
868-
"confusion_matrix": {},
869-
},
870-
eval_b_key: {
871-
"performance": {},
872-
"confusion_matrix": {},
873-
},
874-
}
875-
876-
scenario_type = params.get("scenario_type", None)
877-
sample_field_values = params.get("sample_field_values", None)
878-
879-
# I. Custom code
880-
if scenario_type == "custom_code":
881-
custom_code_expression = params.get("custom_code_expression", None)
882-
if custom_code_expression is None:
883-
raise ValueError("No code expression provided")
884-
885-
# TODO:
886-
# subset_def = dict(type="code", code=custom_code_expression)
887-
888-
# print("subset_def", subset_def)
889-
# with eval_a_results.use_subset(subset_def):
890-
# graph_data[eval_a_key]["performance"][
891-
# "custom"
892-
# ] = eval_a_results.metrics()
893-
894-
# with eval_b_results.use_subset(subset_def):
895-
# graph_data[eval_b_key]["performance"][
896-
# "custom"
897-
# ] = eval_b_results.metrics()
898-
899-
# II. Saved views
900-
elif scenario_type == "saved_views":
901-
saved_views = params.get("saved_views", None)
902-
903-
for saved_view in saved_views:
904-
subset_def = dict(
905-
type="view",
906-
view=saved_view,
907-
)
908-
909-
# Graph I data - Model Performance
910-
with eval_a_results.use_subset(subset_def):
911-
graph_data[eval_a_key]["performance"][
912-
saved_view
913-
] = eval_a_results.metrics()
914-
with eval_b_results.use_subset(subset_def):
915-
graph_data[eval_b_key]["performance"][
916-
saved_view
917-
] = eval_b_results.metrics()
918-
919-
# III. Sample Fields
920-
elif scenario_type == "sample_field":
921-
scenario_field = params.get("scenario_field", None)
922-
if scenario_field is None:
923-
raise ValueError("No scenario field provided")
924-
925-
# case sample fields: tags
926-
if scenario_field == "tags":
927-
field_paths = list(sample_field_values.keys())
928-
929-
for field_path in field_paths:
930-
if sample_field_values[field_path] is True:
931-
subset_def = dict(
932-
type="field",
933-
field=scenario_field,
934-
value=field_path,
935-
)
936-
937-
# Graph I data - Model Performance
938-
with eval_a_results.use_subset(subset_def):
939-
graph_data[eval_a_key]["performance"][
940-
field_path
941-
] = eval_a_results.metrics()
942-
with eval_b_results.use_subset(subset_def):
943-
graph_data[eval_b_key]["performance"][
944-
field_path
945-
] = eval_b_results.metrics()
946-
947-
# TODO: case continuous fields: confidence
948-
949-
# TODO: case discrete fields: < 100 categories
950-
951-
# IV. Label Attributes
952-
elif scenario_type == "label_attribute":
953-
label_attrs_map = params.get("label_attribute_values", {})
954-
label_attrs_classes = list(label_attrs_map.keys()) or []
955-
956-
# Graph I data - Model Performance
957-
for label in label_attrs_classes:
958-
subset_def = dict(
959-
type="attribute",
960-
field="label", # TODO
961-
value=label,
962-
)
963-
with eval_a_results.use_subset(subset_def):
964-
graph_data[eval_a_key]["performance"][
965-
label
966-
] = eval_a_results.metrics()
967-
with eval_b_results.use_subset(subset_def):
968-
graph_data[eval_b_key]["performance"][
969-
label
970-
] = eval_b_results.metrics()
971-
972-
# Graph II data - Prediction Statistics
973-
# TODO
974-
975-
# Graph III data - Confusion Metrics
976-
graph_data[eval_a_key]["confusion_matrix"] = (
977-
eval_a_results.confusion_matrix(classes=label_attrs_classes)
978-
)
979-
graph_data[eval_b_key]["confusion_matrix"] = (
980-
eval_b_results.confusion_matrix(classes=label_attrs_classes)
981-
)
982-
983-
print("graph_data", graph_data)
984-
# TODO: save the subset
985-
986-
# ctx.panel.set_state("evaluations_scenario", graph_data)
987-
988808
def get_subset_def_data(self, ctx, info, results, subset_def):
989809
with results.use_subset(subset_def):
990810
metrics = results.metrics()
@@ -1028,7 +848,6 @@ def load_scenario(self, ctx):
1028848
if scenario_id:
1029849
scenario = self.get_scenario(ctx, eval_id, scenario_id)
1030850
scenario_data = self.get_scenario_data(ctx, scenario)
1031-
print("scenario_data", scenario_data)
1032851
ctx.panel.set_data(f"scenario_{scenario_id}", scenario_data)
1033852

1034853
def render(self, ctx):
@@ -1047,7 +866,6 @@ def render(self, ctx):
1047866
load_view=self.load_view,
1048867
rename_evaluation=self.rename_evaluation,
1049868
delete_evaluation=self.delete_evaluation,
1050-
on_save_scenario=self.on_save_scenario,
1051869
load_scenario=self.load_scenario,
1052870
),
1053871
)

0 commit comments

Comments
 (0)