diff --git a/IRIS.mlapp b/IRIS.mlapp index ef041a4..e4f6849 100644 Binary files a/IRIS.mlapp and b/IRIS.mlapp differ diff --git a/Scripts/htmls/giphy.gif b/Scripts/htmls/giphy.gif new file mode 100644 index 0000000..dc802ad Binary files /dev/null and b/Scripts/htmls/giphy.gif differ diff --git a/Scripts/htmls/stat.html b/Scripts/htmls/stat.html new file mode 100644 index 0000000..bd29b67 --- /dev/null +++ b/Scripts/htmls/stat.html @@ -0,0 +1,21 @@ + + + + + + + + + + + +Paris + + + diff --git a/Scripts/htmls/stat2.gif b/Scripts/htmls/stat2.gif new file mode 100644 index 0000000..76ddd21 Binary files /dev/null and b/Scripts/htmls/stat2.gif differ diff --git a/Scripts/htmls/stat2.html b/Scripts/htmls/stat2.html new file mode 100644 index 0000000..aeab4f3 --- /dev/null +++ b/Scripts/htmls/stat2.html @@ -0,0 +1,21 @@ + + + + + + + + + + + +Paris + + + diff --git a/Scripts/htmls/statistical.html b/Scripts/htmls/statistical.html new file mode 100644 index 0000000..959081f --- /dev/null +++ b/Scripts/htmls/statistical.html @@ -0,0 +1,29 @@ + + + + + +
+

Ttest Statistical Result

+

One-sample ttest results

+ + + + + + + + + + + + + + + + + +
Featurep-val<0.05HypothesiststatDegrees of FreedomStandard Deviation
10001TomM30
+
+ + diff --git a/Scripts/models/BaggedTrees.asv b/Scripts/models/BaggedTrees.asv new file mode 100644 index 0000000..14d6bc1 --- /dev/null +++ b/Scripts/models/BaggedTrees.asv @@ -0,0 +1,212 @@ + +function [trainedClassifier, validationAccuracy] = CoarseKNNS(trainingData,response2,Folds,HoldOut,classt,categoricalVal) +% [trainedClassifier, validationAccuracy] = trainClassifier(trainingData) +% Returns a trained classifier and its accuracy. This code recreates the +% classification model trained in Classification Learner app. Use the +% generated code to automate training the same model with new data, or to +% learn how to programmatically train models. +% +response2 +% Input: +% trainingData: A matrix with the same number of columns and data type +% as the matrix imported into the app. +% +% Output: +% trainedClassifier: A struct containing the trained classifier. The +% struct contains various fields with information about the trained +% classifier. +% +% trainedClassifier.predictFcn: A function to make predictions on new +% data. +% +% validationAccuracy: A double containing the accuracy in percent. In +% the app, the History list displays this overall accuracy score for +% each model. +% +% Use the code to train the model with new data. To retrain your +% classifier, call the function from the command line with your original +% data or new data as the input argument trainingData. +% +% For example, to retrain a classifier trained with the original data set +% T, enter: +% [trainedClassifier, validationAccuracy] = trainClassifier(T) +% +% To make predictions with the returned 'trainedClassifier' on new data T2, +% use +% yfit = trainedClassifier.predictFcn(T2) +% +% T2 must be a matrix containing only the predictor columns used for +% training. For details, enter: +% trainedClassifier.HowToPredict + +% Auto-generated by MATLAB on 31-May-2020 03:30:11 + + +% Extract predictors and response +% This code processes the data into the right shape for training the +% model. +% Convert input to table +summary(trainingData) + +inputTable=trainingData; + +predictorsInd=(1:size(trainingData,2)); + + + +predictorsInd(response2)=[]; +predictorNames = trainingData.Properties.VariableNames(predictorsInd); +predictors = inputTable(:, predictorNames); +response = inputTable.(inputTable.Properties.VariableNames{response2}); + + + +isCategoricalPredictor = categoricalVal; +classes=unique(response(~isnan(response))); +% This code specifies all the classifier options and trains the classifier. + + + + +template = templateTree(... + 'MaxNumSplits', 64); +classificationKNN = fitcensemble(... + predictors, ... + response, ... + 'Method', 'Bag', ... + 'NumLearningCycles', 30, ... + 'Learners', template, ... + 'ClassNames', classes); + + + +% Create the result struct with predict function +predictorExtractionFcn = @(y) y(:, predictorNames); +knnPredictFcn = @(x) predict(classificationKNN, x); +trainedClassifier.predictFcn = @(x) knnPredictFcn(predictorExtractionFcn(x)); + + + +% Add additional fields to the result struct +trainedClassifier.Classification = classificationKNN; +trainedClassifier.About = 'This struct is a trained model exported from Classification Learner R2020a.'; +trainedClassifier.HowToPredict = sprintf('To make predictions on a new predictor column matrix, X, use: \n yfit = c.predictFcn(X) \nreplacing ''c'' with the name of the variable that is this struct, e.g. ''trainedModel''. \n \nX must contain exactly 13 columns because this model was trained using 13 predictors. \nX must contain only predictor columns in exactly the same order and format as your training \ndata. Do not include the response column or any columns you did not import into the app. \n \nFor more information, see How to predict using an exported model.'); + +% Extract predictors and response +% This code processes the data into the right shape for training the +% model. +% Convert input to table +inputTable=trainingData; +predictorsInd=(1:size(trainingData,2)); +predictorsInd(response2)=[]; +predictorNames = trainingData.Properties.VariableNames(predictorsInd); +predictors = inputTable(:, predictorNames) +response = inputTable.(string(inputTable.Properties.VariableNames{response2})); +response=response(~isnan(response)); +% Perform cross-validation + + + + +if HoldOut>0 +disp("Holdout method is using with the value of '"+num2str(HoldOut)+"'") +cvp = cvpartition(response, 'Holdout', HoldOut); +trainingPredictors = predictors(cvp.training, :); +trainingResponse = response(cvp.training, :); +trainingIsCategoricalPredictor = isCategoricalPredictor; + + + + +template = templateTree(... + 'MaxNumSplits', 64); +classificationKNN = fitcensemble(... + trainingPredictors, ... + trainingResponse, ... + 'Method', 'Bag', ... + 'NumLearningCycles', 30, ... + 'Learners', template, ... + 'ClassNames', classes); + + +% Create the result struct with predict function +predictorExtractionFcn = @(y) y(:, predictorNames); +knnPredictFcn = @(x) predict(classificationKNN, x); +trainedClassifier.predictFcn = @(x) knnPredictFcn(predictorExtractionFcn(x)); + + +% Add additional fields to the result struct +trainedClassifier.Classification = classificationKNN; +trainedClassifier.About = 'This struct is a trained model exported from Classification Learner R2020a.'; +trainedClassifier.HowToPredict = sprintf('To make predictions on a new predictor column matrix, X, use: \n yfit = c.predictFcn(X) \nreplacing ''c'' with the name of the variable that is this struct, e.g. ''trainedModel''. \n \nX must contain exactly 13 columns because this model was trained using 13 predictors. \nX must contain only predictor columns in exactly the same order and format as your training \ndata. Do not include the response column or any columns you did not import into the app. \n \nFor more information, see How to predict using an exported model.'); + +% Extract predictors and response +% This code processes the data into the right shape for training the +% model. +% Convert input to table +inputTable=trainingData; +predictorsInd=(1:size(trainingData,2)); +predictorsInd(response2)=[]; +predictorNames = trainingData.Properties.VariableNames(predictorsInd); +predictors = inputTable(:, predictorNames) +response = inputTable.(string(inputTable.Properties.VariableNames{response2})); +response=response(~isnan(response)); +% Perform cross-validation + +predictors = predictors(cvp.test, :); +response = response(cvp.test, :); +[validationPredictions, validationScores] = trainedClassifier.predictFcn(predictors); +correctPredictions = (validationPredictions == response); +validationAccuracy = sum(correctPredictions)/length(correctPredictions); + + +else +disp("K-Fold method is using with '"+num2str(Folds)+"' folds") + +partitionedModel = crossval(trainedClassifier.Classification, 'KFold', Folds); +% Compute validation predictions + +[validationPredictions, validationScores] = kfoldPredict(partitionedModel); +% Compute validation accuracy; +validationAccuracy = 1 - kfoldLoss(partitionedModel, 'LossFun', 'ClassifError'); +disp("resp") + +end + + + + + +ygt=response; +ypr=validationPredictions; +for i=classes' + TP=sum((ygt==i).*(ypr==i)) + FN=sum((ygt==i).*~(ypr==i)) + FP=sum(~(ygt==i).*(ypr==i)) + TN=sum(~(ygt==i).*~(ypr==i)) + cmVals{i+1}=[TP FN FP TN] + + Sens{i+1}=(TP)/(TP+FN); + Specificity{i+1}=(TN)/(FP+TN); + +end +trainedClassifier.RequiredVariables=predictorNames; +try +[X,Y,T,AUC,OPTROCPT,SUBY,SUBYNAMES] = perfcurve(response,validationPredictions,1); +trainedClassifier.plots.AUC=AUC; +trainedClassifier.plots.OPTROCPT=OPTROCPT; +trainedClassifier.plots.T=T; +trainedClassifier.plots.SUBY=SUBY; +trainedClassifier.plots.SUBYNAMES=SUBYNAMES; +trainedClassifier.plots.X=X; +trainedClassifier.plots.Y=Y; +trainedClassifier.plots.Ygt=response; +trainedClassifier.plots.Ypr=validationPredictions; +trainedClassifier.plots.sensitivity=Sens; +trainedClassifier.plots.specificity=Specificity; +trainedClassifier.plots.cmVals=cmVals; +trainedClassifier.plots.Accuracy=validationAccuracy; +trainedClassifier.classes=classt; + +end + diff --git a/Scripts/models/BaggedTrees.m b/Scripts/models/BaggedTrees.m index 20c90d4..ce286d0 100644 --- a/Scripts/models/BaggedTrees.m +++ b/Scripts/models/BaggedTrees.m @@ -191,8 +191,17 @@ end trainedClassifier.RequiredVariables=predictorNames; - +try [X,Y,T,AUC,OPTROCPT,SUBY,SUBYNAMES] = perfcurve(response,validationPredictions,1); +catch +X=-1; +Y=-1; +T=-1; +AUC=-1; +OPTROCPT=-1; +SUBY=-1; +SUBYNAMES=-1; +end trainedClassifier.plots.AUC=AUC; trainedClassifier.plots.OPTROCPT=OPTROCPT; trainedClassifier.plots.T=T; diff --git a/Scripts/models/BoostedTrees.m b/Scripts/models/BoostedTrees.m index 571a7b2..aef9c18 100644 --- a/Scripts/models/BoostedTrees.m +++ b/Scripts/models/BoostedTrees.m @@ -189,8 +189,17 @@ end trainedClassifier.RequiredVariables=predictorNames; - +try [X,Y,T,AUC,OPTROCPT,SUBY,SUBYNAMES] = perfcurve(response,validationPredictions,1); +catch +X=-1; +Y=-1; +T=-1; +AUC=-1; +OPTROCPT=-1; +SUBY=-1; +SUBYNAMES=-1; +end trainedClassifier.plots.AUC=AUC; trainedClassifier.plots.OPTROCPT=OPTROCPT; trainedClassifier.plots.T=T; diff --git a/Scripts/models/CoarseGaussianSVM.m b/Scripts/models/CoarseGaussianSVM.m index 059972c..e471b4f 100644 --- a/Scripts/models/CoarseGaussianSVM.m +++ b/Scripts/models/CoarseGaussianSVM.m @@ -191,8 +191,17 @@ end trainedClassifier.RequiredVariables=predictorNames; - +try [X,Y,T,AUC,OPTROCPT,SUBY,SUBYNAMES] = perfcurve(response,validationPredictions,1); +catch +X=-1; +Y=-1; +T=-1; +AUC=-1; +OPTROCPT=-1; +SUBY=-1; +SUBYNAMES=-1; +end trainedClassifier.plots.AUC=AUC; trainedClassifier.plots.OPTROCPT=OPTROCPT; trainedClassifier.plots.T=T; diff --git a/Scripts/models/CoarseKNNS.m b/Scripts/models/CoarseKNNS.m index 367aafc..c3ccdc4 100644 --- a/Scripts/models/CoarseKNNS.m +++ b/Scripts/models/CoarseKNNS.m @@ -187,7 +187,17 @@ end trainedClassifier.RequiredVariables=predictorNames; +try [X,Y,T,AUC,OPTROCPT,SUBY,SUBYNAMES] = perfcurve(response,validationPredictions,1); +catch +X=-1; +Y=-1; +T=-1; +AUC=-1; +OPTROCPT=-1; +SUBY=-1; +SUBYNAMES=-1; +end trainedClassifier.plots.AUC=AUC; trainedClassifier.plots.OPTROCPT=OPTROCPT; trainedClassifier.plots.T=T; diff --git a/Scripts/models/CoarseTreee.m b/Scripts/models/CoarseTreee.m index 3d26b2e..977c96e 100644 --- a/Scripts/models/CoarseTreee.m +++ b/Scripts/models/CoarseTreee.m @@ -191,8 +191,17 @@ end trainedClassifier.RequiredVariables=predictorNames; - +try [X,Y,T,AUC,OPTROCPT,SUBY,SUBYNAMES] = perfcurve(response,validationPredictions,1); +catch +X=-1; +Y=-1; +T=-1; +AUC=-1; +OPTROCPT=-1; +SUBY=-1; +SUBYNAMES=-1; +end trainedClassifier.plots.AUC=AUC; trainedClassifier.plots.OPTROCPT=OPTROCPT; trainedClassifier.plots.T=T; diff --git a/Scripts/models/CosineKNNS.m b/Scripts/models/CosineKNNS.m index 6af0bd6..de695d1 100644 --- a/Scripts/models/CosineKNNS.m +++ b/Scripts/models/CosineKNNS.m @@ -191,8 +191,17 @@ end trainedClassifier.RequiredVariables=predictorNames; - +try [X,Y,T,AUC,OPTROCPT,SUBY,SUBYNAMES] = perfcurve(response,validationPredictions,1); +catch +X=-1; +Y=-1; +T=-1; +AUC=-1; +OPTROCPT=-1; +SUBY=-1; +SUBYNAMES=-1; +end trainedClassifier.plots.AUC=AUC; trainedClassifier.plots.OPTROCPT=OPTROCPT; trainedClassifier.plots.T=T; diff --git a/Scripts/models/CubicKNNS.m b/Scripts/models/CubicKNNS.m index 561ca37..ab871d1 100644 --- a/Scripts/models/CubicKNNS.m +++ b/Scripts/models/CubicKNNS.m @@ -193,8 +193,17 @@ end trainedClassifier.RequiredVariables=predictorNames; - +try [X,Y,T,AUC,OPTROCPT,SUBY,SUBYNAMES] = perfcurve(response,validationPredictions,1); +catch +X=-1; +Y=-1; +T=-1; +AUC=-1; +OPTROCPT=-1; +SUBY=-1; +SUBYNAMES=-1; +end trainedClassifier.plots.AUC=AUC; trainedClassifier.plots.OPTROCPT=OPTROCPT; trainedClassifier.plots.T=T; diff --git a/Scripts/models/CubicSVM.m b/Scripts/models/CubicSVM.m index 5babd51..21742b8 100644 --- a/Scripts/models/CubicSVM.m +++ b/Scripts/models/CubicSVM.m @@ -203,8 +203,17 @@ end trainedClassifier.RequiredVariables=predictorNames; - +try [X,Y,T,AUC,OPTROCPT,SUBY,SUBYNAMES] = perfcurve(response,validationPredictions,1); +catch +X=-1; +Y=-1; +T=-1; +AUC=-1; +OPTROCPT=-1; +SUBY=-1; +SUBYNAMES=-1; +end trainedClassifier.plots.AUC=AUC; trainedClassifier.plots.OPTROCPT=OPTROCPT; trainedClassifier.plots.T=T; diff --git a/Scripts/models/DefaultModelIDHMut.m b/Scripts/models/DefaultModelIDHMut.m index 2cd084b..208c6af 100644 --- a/Scripts/models/DefaultModelIDHMut.m +++ b/Scripts/models/DefaultModelIDHMut.m @@ -110,8 +110,17 @@ end trainedClassifier.RequiredVariables=predictorNames; - +try [X,Y,T,AUC,OPTROCPT,SUBY,SUBYNAMES] = perfcurve(response,validationPredictions,1); +catch +X=-1; +Y=-1; +T=-1; +AUC=-1; +OPTROCPT=-1; +SUBY=-1; +SUBYNAMES=-1; +end trainedClassifier.plots.AUC=AUC; trainedClassifier.plots.OPTROCPT=OPTROCPT; trainedClassifier.plots.T=T; diff --git a/Scripts/models/DefaultModelIDHMut2.m b/Scripts/models/DefaultModelIDHMut2.m index 3dc2d27..1a96019 100644 --- a/Scripts/models/DefaultModelIDHMut2.m +++ b/Scripts/models/DefaultModelIDHMut2.m @@ -135,8 +135,17 @@ end trainedClassifier.RequiredVariables=predictorNames; - -[X,Y,T,AUC,OPTROCPT,SUBY,SUBYNAMES] = perfcurve(validationResponse,validationPredictions,1); +try +[X,Y,T,AUC,OPTROCPT,SUBY,SUBYNAMES] = perfcurve(response,validationPredictions,1); +catch +X=-1; +Y=-1; +T=-1; +AUC=-1; +OPTROCPT=-1; +SUBY=-1; +SUBYNAMES=-1; +end trainedClassifier.plots.AUC=AUC; trainedClassifier.plots.OPTROCPT=OPTROCPT; trainedClassifier.plots.T=T; diff --git a/Scripts/models/Discriminant.m b/Scripts/models/Discriminant.m index 4f9ae04..f1ca1e0 100644 --- a/Scripts/models/Discriminant.m +++ b/Scripts/models/Discriminant.m @@ -188,8 +188,17 @@ end trainedClassifier.RequiredVariables=predictorNames; - +try [X,Y,T,AUC,OPTROCPT,SUBY,SUBYNAMES] = perfcurve(response,validationPredictions,1); +catch +X=-1; +Y=-1; +T=-1; +AUC=-1; +OPTROCPT=-1; +SUBY=-1; +SUBYNAMES=-1; +end trainedClassifier.plots.AUC=AUC; trainedClassifier.plots.OPTROCPT=OPTROCPT; trainedClassifier.plots.T=T; diff --git a/Scripts/models/FGSVM.m b/Scripts/models/FGSVM.m index 4dbca92..9474e2e 100644 --- a/Scripts/models/FGSVM.m +++ b/Scripts/models/FGSVM.m @@ -199,8 +199,17 @@ end trainedClassifier.RequiredVariables=predictorNames; - +try [X,Y,T,AUC,OPTROCPT,SUBY,SUBYNAMES] = perfcurve(response,validationPredictions,1); +catch +X=-1; +Y=-1; +T=-1; +AUC=-1; +OPTROCPT=-1; +SUBY=-1; +SUBYNAMES=-1; +end trainedClassifier.plots.AUC=AUC; trainedClassifier.plots.OPTROCPT=OPTROCPT; trainedClassifier.plots.T=T; diff --git a/Scripts/models/FineKNNS.m b/Scripts/models/FineKNNS.m index 4d4de84..4bcd8cb 100644 --- a/Scripts/models/FineKNNS.m +++ b/Scripts/models/FineKNNS.m @@ -191,8 +191,17 @@ end trainedClassifier.RequiredVariables=predictorNames; - +try [X,Y,T,AUC,OPTROCPT,SUBY,SUBYNAMES] = perfcurve(response,validationPredictions,1); +catch +X=-1; +Y=-1; +T=-1; +AUC=-1; +OPTROCPT=-1; +SUBY=-1; +SUBYNAMES=-1; +end trainedClassifier.plots.AUC=AUC; trainedClassifier.plots.OPTROCPT=OPTROCPT; trainedClassifier.plots.T=T; diff --git a/Scripts/models/FineTree.m b/Scripts/models/FineTree.m index 99d1015..c439f7d 100644 --- a/Scripts/models/FineTree.m +++ b/Scripts/models/FineTree.m @@ -185,8 +185,17 @@ end trainedClassifier.RequiredVariables=predictorNames; - +try [X,Y,T,AUC,OPTROCPT,SUBY,SUBYNAMES] = perfcurve(response,validationPredictions,1); +catch +X=-1; +Y=-1; +T=-1; +AUC=-1; +OPTROCPT=-1; +SUBY=-1; +SUBYNAMES=-1; +end trainedClassifier.plots.AUC=AUC; trainedClassifier.plots.OPTROCPT=OPTROCPT; trainedClassifier.plots.T=T; diff --git a/Scripts/models/LinearSVM2.m b/Scripts/models/LinearSVM2.m index 7301947..29c8ce0 100644 --- a/Scripts/models/LinearSVM2.m +++ b/Scripts/models/LinearSVM2.m @@ -199,8 +199,17 @@ end trainedClassifier.RequiredVariables=predictorNames; - +try [X,Y,T,AUC,OPTROCPT,SUBY,SUBYNAMES] = perfcurve(response,validationPredictions,1); +catch +X=-1; +Y=-1; +T=-1; +AUC=-1; +OPTROCPT=-1; +SUBY=-1; +SUBYNAMES=-1; +end trainedClassifier.plots.AUC=AUC; trainedClassifier.plots.OPTROCPT=OPTROCPT; trainedClassifier.plots.T=T; diff --git a/Scripts/models/MediumGaussianSVM.m b/Scripts/models/MediumGaussianSVM.m index 9053095..68dbbc6 100644 --- a/Scripts/models/MediumGaussianSVM.m +++ b/Scripts/models/MediumGaussianSVM.m @@ -196,8 +196,17 @@ end trainedClassifier.RequiredVariables=predictorNames; - +try [X,Y,T,AUC,OPTROCPT,SUBY,SUBYNAMES] = perfcurve(response,validationPredictions,1); +catch +X=-1; +Y=-1; +T=-1; +AUC=-1; +OPTROCPT=-1; +SUBY=-1; +SUBYNAMES=-1; +end trainedClassifier.plots.AUC=AUC; trainedClassifier.plots.OPTROCPT=OPTROCPT; trainedClassifier.plots.T=T; diff --git a/Scripts/models/MediumKNNS.m b/Scripts/models/MediumKNNS.m index 91f9bd0..90b57c2 100644 --- a/Scripts/models/MediumKNNS.m +++ b/Scripts/models/MediumKNNS.m @@ -191,8 +191,17 @@ end trainedClassifier.RequiredVariables=predictorNames; - +try [X,Y,T,AUC,OPTROCPT,SUBY,SUBYNAMES] = perfcurve(response,validationPredictions,1); +catch +X=-1; +Y=-1; +T=-1; +AUC=-1; +OPTROCPT=-1; +SUBY=-1; +SUBYNAMES=-1; +end trainedClassifier.plots.AUC=AUC; trainedClassifier.plots.OPTROCPT=OPTROCPT; trainedClassifier.plots.T=T; diff --git a/Scripts/models/MediumTree.m b/Scripts/models/MediumTree.m index 97ca832..9f57dc3 100644 --- a/Scripts/models/MediumTree.m +++ b/Scripts/models/MediumTree.m @@ -183,8 +183,17 @@ end trainedClassifier.RequiredVariables=predictorNames; - +try [X,Y,T,AUC,OPTROCPT,SUBY,SUBYNAMES] = perfcurve(response,validationPredictions,1); +catch +X=-1; +Y=-1; +T=-1; +AUC=-1; +OPTROCPT=-1; +SUBY=-1; +SUBYNAMES=-1; +end trainedClassifier.plots.AUC=AUC; trainedClassifier.plots.OPTROCPT=OPTROCPT; trainedClassifier.plots.T=T; diff --git a/Scripts/models/QuadraticSVM.m b/Scripts/models/QuadraticSVM.m index 3209f99..048e9f0 100644 --- a/Scripts/models/QuadraticSVM.m +++ b/Scripts/models/QuadraticSVM.m @@ -196,8 +196,17 @@ end trainedClassifier.RequiredVariables=predictorNames; - +try [X,Y,T,AUC,OPTROCPT,SUBY,SUBYNAMES] = perfcurve(response,validationPredictions,1); +catch +X=-1; +Y=-1; +T=-1; +AUC=-1; +OPTROCPT=-1; +SUBY=-1; +SUBYNAMES=-1; +end trainedClassifier.plots.AUC=AUC; trainedClassifier.plots.OPTROCPT=OPTROCPT; trainedClassifier.plots.T=T; diff --git a/Scripts/models/RUSBoosted.m b/Scripts/models/RUSBoosted.m index 0fba7ac..5132a25 100644 --- a/Scripts/models/RUSBoosted.m +++ b/Scripts/models/RUSBoosted.m @@ -187,8 +187,17 @@ end trainedClassifier.RequiredVariables=predictorNames; - +try [X,Y,T,AUC,OPTROCPT,SUBY,SUBYNAMES] = perfcurve(response,validationPredictions,1); +catch +X=-1; +Y=-1; +T=-1; +AUC=-1; +OPTROCPT=-1; +SUBY=-1; +SUBYNAMES=-1; +end trainedClassifier.plots.AUC=AUC; trainedClassifier.plots.OPTROCPT=OPTROCPT; trainedClassifier.plots.T=T; diff --git a/Scripts/models/WeightedKNNS.m b/Scripts/models/WeightedKNNS.m index 6fd5fc8..da83220 100644 --- a/Scripts/models/WeightedKNNS.m +++ b/Scripts/models/WeightedKNNS.m @@ -206,8 +206,17 @@ end trainedClassifier.RequiredVariables=predictorNames; - +try [X,Y,T,AUC,OPTROCPT,SUBY,SUBYNAMES] = perfcurve(response,validationPredictions,1); +catch +X=-1; +Y=-1; +T=-1; +AUC=-1; +OPTROCPT=-1; +SUBY=-1; +SUBYNAMES=-1; +end trainedClassifier.plots.AUC=AUC; trainedClassifier.plots.OPTROCPT=OPTROCPT; trainedClassifier.plots.T=T; diff --git a/Scripts/models/linearDiscriminant.m b/Scripts/models/linearDiscriminant.m index ca56a05..b025024 100644 --- a/Scripts/models/linearDiscriminant.m +++ b/Scripts/models/linearDiscriminant.m @@ -187,8 +187,17 @@ end trainedClassifier.RequiredVariables=predictorNames; - +try [X,Y,T,AUC,OPTROCPT,SUBY,SUBYNAMES] = perfcurve(response,validationPredictions,1); +catch +X=-1; +Y=-1; +T=-1; +AUC=-1; +OPTROCPT=-1; +SUBY=-1; +SUBYNAMES=-1; +end trainedClassifier.plots.AUC=AUC; trainedClassifier.plots.OPTROCPT=OPTROCPT; trainedClassifier.plots.T=T; diff --git a/Scripts/reporter.m b/Scripts/reporter.m index 3635638..5c2cabc 100644 --- a/Scripts/reporter.m +++ b/Scripts/reporter.m @@ -5,6 +5,7 @@ function reporter(f,tableA,name,groups,groupsize,iterations,compact) % groups={{[0,1]},{[0,1]},{[0,1]},{[0,1]},{[0,1]}} % groupsize % iterations={{[50,50,50]},{[50,50,50]},{[50,50,50]},{[50,50,50]},{[50,50,50,50]}} +makeDOMCompilable(); import mlreportgen.dom.*; import mlreportgen.report.*; diff --git a/Scripts/statistical.mlapp b/Scripts/statistical.mlapp new file mode 100644 index 0000000..c33fa0b Binary files /dev/null and b/Scripts/statistical.mlapp differ diff --git a/Scripts/ttest_html.asv b/Scripts/ttest_html.asv new file mode 100644 index 0000000..264b022 --- /dev/null +++ b/Scripts/ttest_html.asv @@ -0,0 +1,80 @@ +function html = ttest_html(data,group,variables,test_type) + +if test_type=="t-test" + [h,p,ci,stats]=ttest(data) +elseif test_type=="Two-sample t-test" + un=unique(group) + un=un(~isnan(un)) + + indices=group==un(1)' + disp(indices) + [h,p,ci,stats]=ttest2(data(indices,:),data(~indices,:)) +elseif test_type=="Paired t-test" + un=unique(group) + un=un(~isnan(un)) + + indices=group==un(1)' + disp(indices) + [h,p,ci,stats]=ttest2(data(indices,:),data(~indices,:)) +elseif test_type=="One-Way ANOVA" + [h,p,stats]=manova1(data,group) +end + +start={'', + '', + '
', + '

Ttest Statistical Result

', + '

One-sample ttest results

', + '', + '', + '', + '', + '', + '', + '', + '', + ''} + +vals={'', + '', + '', + '', + '', + '', + '', + '' + } + +endage={ + +'
Featurep-valHypothesis < 0.05tstatDegrees of FreedomStandard Deviation
CHCHCHCHCHCH
', +'
', +'', +''} + + + + +for i =1:length(p) + vals2=vals + if p(i)<0.05 + + vals2{2}=strrep(vals2{2},'rgb(0,0,0)','rgb(155,0,0)') + vals2{2}=strrep(vals2{2},'CH','CH*') + + end + vals2{2}=strrep(vals2{2},'CH',variables{i}) + vals2{3}=strrep(vals2{3},'CH',num2str(p(i),3)) + vals2{4}=strrep(vals2{4},'CH',num2str(h(i),3)) + vals2{5}=strrep(vals2{5},'CH',num2str(stats.tstat(i),3)) + vals2{6}=strrep(vals2{6},'CH',num2str(stats.df(i),3)) + vals2{7}=strrep(vals2{7},'CH',num2str(stats.sd(i),3)) + + start=[start;vals2]; +end + +start=[start;endage] +html=join(start,' ') + + +end \ No newline at end of file diff --git a/Scripts/ttest_html.m b/Scripts/ttest_html.m new file mode 100644 index 0000000..264b022 --- /dev/null +++ b/Scripts/ttest_html.m @@ -0,0 +1,80 @@ +function html = ttest_html(data,group,variables,test_type) + +if test_type=="t-test" + [h,p,ci,stats]=ttest(data) +elseif test_type=="Two-sample t-test" + un=unique(group) + un=un(~isnan(un)) + + indices=group==un(1)' + disp(indices) + [h,p,ci,stats]=ttest2(data(indices,:),data(~indices,:)) +elseif test_type=="Paired t-test" + un=unique(group) + un=un(~isnan(un)) + + indices=group==un(1)' + disp(indices) + [h,p,ci,stats]=ttest2(data(indices,:),data(~indices,:)) +elseif test_type=="One-Way ANOVA" + [h,p,stats]=manova1(data,group) +end + +start={'', + '', + '
', + '

Ttest Statistical Result

', + '

One-sample ttest results

', + '', + '', + '', + '', + '', + '', + '', + '', + ''} + +vals={'', + '', + '', + '', + '', + '', + '', + '' + } + +endage={ + +'
Featurep-valHypothesis < 0.05tstatDegrees of FreedomStandard Deviation
CHCHCHCHCHCH
', +'
', +'', +''} + + + + +for i =1:length(p) + vals2=vals + if p(i)<0.05 + + vals2{2}=strrep(vals2{2},'rgb(0,0,0)','rgb(155,0,0)') + vals2{2}=strrep(vals2{2},'CH','CH*') + + end + vals2{2}=strrep(vals2{2},'CH',variables{i}) + vals2{3}=strrep(vals2{3},'CH',num2str(p(i),3)) + vals2{4}=strrep(vals2{4},'CH',num2str(h(i),3)) + vals2{5}=strrep(vals2{5},'CH',num2str(stats.tstat(i),3)) + vals2{6}=strrep(vals2{6},'CH',num2str(stats.df(i),3)) + vals2{7}=strrep(vals2{7},'CH',num2str(stats.sd(i),3)) + + start=[start;vals2]; +end + +start=[start;endage] +html=join(start,' ') + + +end \ No newline at end of file diff --git a/icons/statistical.gif b/icons/statistical.gif new file mode 100644 index 0000000..66e4e2a Binary files /dev/null and b/icons/statistical.gif differ diff --git a/icons/statistics.png b/icons/statistics.png new file mode 100644 index 0000000..3f096dc Binary files /dev/null and b/icons/statistics.png differ