@@ -35,7 +35,7 @@ def __eval_mosi_classification(self, y_pred, y_true):
35
35
# three classes
36
36
y_pred_3 = np .argmax (y_pred , axis = 1 )
37
37
Mult_acc_3 = accuracy_score (y_pred_3 , y_true )
38
- F1_score_3 = f1_score (y_pred_3 , y_true , average = 'weighted' )
38
+ F1_score_3 = f1_score (y_true , y_pred_3 , average = 'weighted' )
39
39
# two classes
40
40
y_pred = np .array ([[v [0 ], v [2 ]] for v in y_pred ])
41
41
# with 0 (<= 0 or > 0)
@@ -45,14 +45,14 @@ def __eval_mosi_classification(self, y_pred, y_true):
45
45
y_true_2 .append (0 if v <= 1 else 1 )
46
46
y_true_2 = np .array (y_true_2 )
47
47
Has0_acc_2 = accuracy_score (y_pred_2 , y_true_2 )
48
- Has0_F1_score = f1_score (y_pred_2 , y_true_2 , average = 'weighted' )
48
+ Has0_F1_score = f1_score (y_true_2 , y_pred_2 , average = 'weighted' )
49
49
# without 0 (< 0 or > 0)
50
50
non_zeros = np .array ([i for i , e in enumerate (y_true ) if e != 1 ])
51
51
y_pred_2 = y_pred [non_zeros ]
52
52
y_pred_2 = np .argmax (y_pred_2 , axis = 1 )
53
53
y_true_2 = y_true [non_zeros ]
54
54
Non0_acc_2 = accuracy_score (y_pred_2 , y_true_2 )
55
- Non0_F1_score = f1_score (y_pred_2 , y_true_2 , average = 'weighted' )
55
+ Non0_F1_score = f1_score (y_true_2 , y_pred_2 , average = 'weighted' )
56
56
57
57
eval_results = {
58
58
"Has0_acc_2" : round (Has0_acc_2 , 4 ),
@@ -103,12 +103,12 @@ def __eval_mosei_regression(self, y_pred, y_true, exclude_zero=False):
103
103
non_zeros_binary_preds = (test_preds [non_zeros ] > 0 )
104
104
105
105
non_zeros_acc2 = accuracy_score (non_zeros_binary_preds , non_zeros_binary_truth )
106
- non_zeros_f1_score = f1_score (non_zeros_binary_preds , non_zeros_binary_truth , average = 'weighted' )
106
+ non_zeros_f1_score = f1_score (non_zeros_binary_truth , non_zeros_binary_preds , average = 'weighted' )
107
107
108
108
binary_truth = (test_truth >= 0 )
109
109
binary_preds = (test_preds >= 0 )
110
110
acc2 = accuracy_score (binary_preds , binary_truth )
111
- f_score = f1_score (binary_preds , binary_truth , average = 'weighted' )
111
+ f_score = f1_score (binary_truth , binary_preds , average = 'weighted' )
112
112
113
113
eval_results = {
114
114
"Has0_acc_2" : round (acc2 , 4 ),
@@ -164,7 +164,7 @@ def __eval_sims_regression(self, y_pred, y_true):
164
164
mult_a2 = self .__multiclass_acc (test_preds_a2 , test_truth_a2 )
165
165
mult_a3 = self .__multiclass_acc (test_preds_a3 , test_truth_a3 )
166
166
mult_a5 = self .__multiclass_acc (test_preds_a5 , test_truth_a5 )
167
- f_score = f1_score (test_preds_a2 , test_truth_a2 , average = 'weighted' )
167
+ f_score = f1_score (test_truth_a2 , test_preds_a2 , average = 'weighted' )
168
168
169
169
eval_results = {
170
170
"Mult_acc_2" : mult_a2 ,
0 commit comments