From 3ce8fef6e9de610e69a8a156a9ec2f1f1644b238 Mon Sep 17 00:00:00 2001 From: Oscar Kjell Date: Wed, 4 Dec 2024 18:58:36 +0100 Subject: [PATCH] chaning parameter name dataset_to_merge_assessments --- R/2_4_0_textPredict_Assess_Classify.R | 10 +++++----- R/2_4_1_textPredictTextTrained.R | 6 +++--- R/2_4_2_textPredictImplicitMotives.R | 18 +++++++++--------- man/textPredict.Rd | 10 +++++----- .../test_2_6_textPredict_implicitmotives.R | 12 ++++++------ 5 files changed, 28 insertions(+), 28 deletions(-) diff --git a/R/2_4_0_textPredict_Assess_Classify.R b/R/2_4_0_textPredict_Assess_Classify.R index 3ec821e7..02ed2ca1 100644 --- a/R/2_4_0_textPredict_Assess_Classify.R +++ b/R/2_4_0_textPredict_Assess_Classify.R @@ -43,7 +43,7 @@ #' @param story_id (vector; only for "texttrained"-model_type) Vector of story-ids. Specify this to get story level scores (i.e., summed sentence #' probabilities corrected for word count). When there is both story_id and participant_id indicated, the function #' returns a list including both story level and person level prediction corrected for word count. (default = NULL) -#' @param dataset_to_merge_predictions (R-object, tibble; only for "texttrained"-model_type) Insert your data here to integrate predictions to your dataset, +#' @param dataset_to_merge_assessments (R-object, tibble; only for "texttrained"-model_type) Insert your data here to integrate predictions to your dataset, #' (default = NULL). #' @param save_embeddings (boolean; only for "texttrained"-model_type) If set to TRUE, embeddings will be saved with a unique identifier, and #' will be automatically opened next time textPredict is run with the same text. (default = TRUE) @@ -110,7 +110,7 @@ #' texts = implicit_motive_data$satisfactiontexts, #' model_info = "implicit_power_roberta_large_L23_v1", #' participant_id = implicit_motive_data$participant_id, -#' dataset_to_merge_predictions = implicit_motive_data +#' dataset_to_merge_assessments = implicit_motive_data #' ) #' #' # Examine results @@ -159,7 +159,7 @@ textPredict <- function( save_dir = "wd", save_name = "textPredict", story_id = NULL, - dataset_to_merge_predictions = NULL, + dataset_to_merge_assessments = NULL, previous_sentence = FALSE, ## fine-tuned model specific parameters ## tokenizer_parallelism = FALSE, @@ -270,7 +270,7 @@ textPredict <- function( save_dir = save_dir, save_name = save_name, story_id = story_id, - dataset_to_merge_predictions = dataset_to_merge_predictions, + dataset_to_merge_assessments = dataset_to_merge_assessments, previous_sentence = previous_sentence, ...) @@ -317,7 +317,7 @@ textPredict <- function( show_texts = show_texts, participant_id = participant_id, story_id = story_id, - dataset_to_merge_predictions = dataset_to_merge_predictions, + dataset_to_merge_assessments = dataset_to_merge_assessments, previous_sentence = previous_sentence, device = device, ) diff --git a/R/2_4_1_textPredictTextTrained.R b/R/2_4_1_textPredictTextTrained.R index d27b41ed..9a85c671 100644 --- a/R/2_4_1_textPredictTextTrained.R +++ b/R/2_4_1_textPredictTextTrained.R @@ -342,7 +342,7 @@ textReturnEmbedding <- function( #' @param story_id (vector; only works for implicit motives models) Vector of story-ids. Specify this to get story level scores (i.e., summed sentence #' probabilities corrected for word count). When there is both story_id and participant_id indicated, the function #' returns a list including both story level and person level prediction corrected for word count. (default = NULL) -#' @param dataset_to_merge_predictions (tibble; only works for implicit motives models) Insert your data here to integrate predictions to your dataset, +#' @param dataset_to_merge_assessments (tibble; only works for implicit motives models) Insert your data here to integrate predictions to your dataset, #' (default = NULL). #' @param previous_sentence (Boolean; only works for implicit motives models) If set to TRUE, word-embeddings will be averaged over the current and previous #' sentence per story-id. For this, both participant-id and story-id must be specified. @@ -394,7 +394,7 @@ textReturnEmbedding <- function( #' texts = implicit_motive_data$satisfactiontexts, #' model_info = "implicit_power_roberta_large_L23_v1", #' participant_id = implicit_motive_data$participant_id, -#' dataset_to_merge_predictions = implicit_motive_data +#' dataset_to_merge_assessments = implicit_motive_data #' ) #' #' # Examine results @@ -436,7 +436,7 @@ textPredictTextTrained <- function( show_texts = FALSE, participant_id = NULL, story_id = NULL, - dataset_to_merge_predictions = NULL, + dataset_to_merge_assessments = NULL, previous_sentence = FALSE, device = "cpu", ...) { diff --git a/R/2_4_2_textPredictImplicitMotives.R b/R/2_4_2_textPredictImplicitMotives.R index 29122fc8..c4040dad 100644 --- a/R/2_4_2_textPredictImplicitMotives.R +++ b/R/2_4_2_textPredictImplicitMotives.R @@ -348,7 +348,7 @@ implicit_motives_results <- function( # If no merge was successful, show the message if (!merge_success) { message(colourise( - "Note: dataset_to_merge_predictions does not have the same number of rows as the predictions and cannot be merged.\n", + "Note: dataset_to_merge_assessments does not have the same number of rows as the predictions and cannot be merged.\n", "brown" )) } @@ -509,7 +509,7 @@ get_implicit_model_info <- function( #' @param story_id (vector; only works for implicit motives models) Vector of story-ids. Specify this to get story level scores (i.e., summed sentence #' probabilities corrected for word count). When there is both story_id and participant_id indicated, the function #' returns a list including both story level and person level prediction corrected for word count. (default = NULL) -#' @param dataset_to_merge_predictions (tibble; only works for implicit motives models) Insert your data here to integrate predictions to your dataset, +#' @param dataset_to_merge_assessments (tibble; only works for implicit motives models) Insert your data here to integrate predictions to your dataset, #' (default = NULL). #' @param previous_sentence (Boolean; only works for implicit motives models) If set to TRUE, word-embeddings will be averaged over the current and previous #' sentence per story-id. For this, both participant-id and story-id must be specified. @@ -561,7 +561,7 @@ get_implicit_model_info <- function( #' texts = implicit_motive_data$satisfactiontexts, #' model_info = "implicit_power_roberta_large_L23_v1", #' participant_id = implicit_motive_data$participant_id, -#' dataset_to_merge_predictions = implicit_motive_data +#' dataset_to_merge_assessments = implicit_motive_data #' ) #' #' # Examine results @@ -604,7 +604,7 @@ textPredictImplicitMotives <- function( show_texts = FALSE, participant_id = NULL, story_id = NULL, - dataset_to_merge_predictions = NULL, + dataset_to_merge_assessments = NULL, previous_sentence = FALSE, device = "cpu", ...) { @@ -628,10 +628,10 @@ textPredictImplicitMotives <- function( model_name <- model_info lower_case_model <- as.character(tolower(model_name)) - if (is.null(participant_id) & is.null(story_id) & !is.null(dataset_to_merge_predictions)){ - message(colourise(paste("Note: The 'texts' were not at the sentence level, and while dataset_to_merge_predictions", + if (is.null(participant_id) & is.null(story_id) & !is.null(dataset_to_merge_assessments)){ + message(colourise(paste("Note: The 'texts' were not at the sentence level, and while dataset_to_merge_assessments", " was provided, participant_id and story_id were missing. ", - "The function treated each row_id as a participant_id when merging assessments into dataset_to_merge_predictions. \n", sep=""), + "The function treated each row_id as a participant_id when merging assessments into dataset_to_merge_assessments. \n", sep=""), "purple")) use_row_id_name <- TRUE @@ -681,7 +681,7 @@ textPredictImplicitMotives <- function( show_texts = show_texts, participant_id = participant_id, story_id = story_id, - dataset_to_merge_predictions = dataset_to_merge_predictions, + dataset_to_merge_assessments = dataset_to_merge_assessments, previous_sentence = previous_sentence, device = device ) @@ -731,7 +731,7 @@ textPredictImplicitMotives <- function( story_id = story_id, predicted_scores2 = predicted_scores2, texts = texts, - dataset = dataset_to_merge_predictions, + dataset = dataset_to_merge_assessments, lower_case_model = lower_case_model ) diff --git a/man/textPredict.Rd b/man/textPredict.Rd index 00e48398..4e4b6825 100644 --- a/man/textPredict.Rd +++ b/man/textPredict.Rd @@ -26,7 +26,7 @@ textPredict( save_dir = "wd", save_name = "textPredict", story_id = NULL, - dataset_to_merge_predictions = NULL, + dataset_to_merge_assessments = NULL, previous_sentence = FALSE, tokenizer_parallelism = FALSE, logging_level = "error", @@ -57,7 +57,7 @@ textAssess( save_dir = "wd", save_name = "textPredict", story_id = NULL, - dataset_to_merge_predictions = NULL, + dataset_to_merge_assessments = NULL, previous_sentence = FALSE, tokenizer_parallelism = FALSE, logging_level = "error", @@ -88,7 +88,7 @@ textClassify( save_dir = "wd", save_name = "textPredict", story_id = NULL, - dataset_to_merge_predictions = NULL, + dataset_to_merge_assessments = NULL, previous_sentence = FALSE, tokenizer_parallelism = FALSE, logging_level = "error", @@ -163,7 +163,7 @@ to model_info.} probabilities corrected for word count). When there is both story_id and participant_id indicated, the function returns a list including both story level and person level prediction corrected for word count. (default = NULL)} -\item{dataset_to_merge_predictions}{(R-object, tibble; only for "texttrained"-model_type) Insert your data here to integrate predictions to your dataset, +\item{dataset_to_merge_assessments}{(R-object, tibble; only for "texttrained"-model_type) Insert your data here to integrate predictions to your dataset, (default = NULL).} \item{previous_sentence}{(boolean; only for "texttrained"-model_type) If set to TRUE, word-embeddings will be averaged over the current and previous @@ -240,7 +240,7 @@ implicit_motives <- textPredict( texts = implicit_motive_data$satisfactiontexts, model_info = "implicit_power_roberta_large_L23_v1", participant_id = implicit_motive_data$participant_id, - dataset_to_merge_predictions = implicit_motive_data + dataset_to_merge_assessments = implicit_motive_data ) # Examine results diff --git a/tests/testthat/test_2_6_textPredict_implicitmotives.R b/tests/testthat/test_2_6_textPredict_implicitmotives.R index 7a6e1708..1493cab8 100644 --- a/tests/testthat/test_2_6_textPredict_implicitmotives.R +++ b/tests/testthat/test_2_6_textPredict_implicitmotives.R @@ -126,7 +126,7 @@ test_that("textPredict Implicit motives", { texts = PSE_stories_participant_level$stories, model_info = "implicitpower_roberta23_nilsson2024", participant_id = PSE_stories_participant_level$Participant_ID, - dataset_to_merge_predictions = PSE_stories_participant_level + dataset_to_merge_assessments = PSE_stories_participant_level ) testthat::expect_that(predictions_participant_1, testthat::is_a("list")) testthat::expect_equal(length(predictions_participant_1), 3) @@ -144,7 +144,7 @@ test_that("textPredict Implicit motives", { model_info = "implicitpower_roberta23_nilsson2024", show_texts = T # participant_id = PSE_stories_participant_level$Participant_ID, - # dataset_to_merge_predictions = PSE_stories_participant_level + # dataset_to_merge_assessments = PSE_stories_participant_level ) testthat::expect_that(predictions_participant_2, testthat::is_a("tbl_df")) testthat::expect_equal(length(predictions_participant_2), 4) @@ -156,7 +156,7 @@ test_that("textPredict Implicit motives", { model_info = "implicitachievement_roberta23_nilsson2024", participant_id = PSE_stories_story_level$Participant_ID, story_id = PSE_stories_story_level$story_id, - dataset_to_merge_predictions = PSE_stories_story_level + dataset_to_merge_assessments = PSE_stories_story_level ) testthat::expect_that(predictions_story_1, testthat::is_a("list")) testthat::expect_equal(length(predictions_story_1), 4) @@ -170,7 +170,7 @@ test_that("textPredict Implicit motives", { model_info = "implicitachievement_roberta23_nilsson2024", #participant_id = PSE_stories_story_level$Participant_ID, #story_id = PSE_stories_story_level$story_id, - dataset_to_merge_predictions = PSE_stories_story_level + dataset_to_merge_assessments = PSE_stories_story_level ) testthat::expect_that(predictions_story_2, testthat::is_a("list")) testthat::expect_equal(length(predictions_story_2), 3) @@ -182,7 +182,7 @@ test_that("textPredict Implicit motives", { model_info = "implicitaffiliation_roberta23_nilsson2024", participant_id = PSE_stories_sentence_level$Participant_ID, story_id = PSE_stories_sentence_level$story_id, - dataset_to_merge_predictions = PSE_stories_sentence_level + dataset_to_merge_assessments = PSE_stories_sentence_level ) testthat::expect_that(predictions_sentence_1, testthat::is_a("list")) testthat::expect_equal(length(predictions_sentence_1), 4) @@ -195,7 +195,7 @@ test_that("textPredict Implicit motives", { model_info = "implicitaffiliation_roberta23_nilsson2024", #participant_id = PSE_stories_sentence_level$Participant_ID, #story_id = PSE_stories_sentence_level$story_id, - dataset_to_merge_predictions = PSE_stories_sentence_level + dataset_to_merge_assessments = PSE_stories_sentence_level ) testthat::expect_that(predictions_sentence_2, testthat::is_a("list")) testthat::expect_equal(length(predictions_sentence_2), 3)