This document contains supplementary analyses for Punishment Study 2.1.2:
# Required packages
required_packages <- c(
"tidyverse",
"psych",
"cocor",
"boot",
"interactions",
"broom",
"knitr",
"car"
)
# Install missing packages
new_packages <- required_packages[!(required_packages %in% installed.packages()[,"Package"])]
if(length(new_packages)) install.packages(new_packages)
# Load packages
lapply(required_packages, library, character.only = TRUE)
## [[1]]
## [1] "lubridate" "forcats" "stringr" "dplyr" "purrr" "readr"
## [7] "tidyr" "tibble" "ggplot2" "tidyverse" "stats" "graphics"
## [13] "grDevices" "utils" "datasets" "methods" "base"
##
## [[2]]
## [1] "psych" "lubridate" "forcats" "stringr" "dplyr" "purrr"
## [7] "readr" "tidyr" "tibble" "ggplot2" "tidyverse" "stats"
## [13] "graphics" "grDevices" "utils" "datasets" "methods" "base"
##
## [[3]]
## [1] "cocor" "psych" "lubridate" "forcats" "stringr" "dplyr"
## [7] "purrr" "readr" "tidyr" "tibble" "ggplot2" "tidyverse"
## [13] "stats" "graphics" "grDevices" "utils" "datasets" "methods"
## [19] "base"
##
## [[4]]
## [1] "boot" "cocor" "psych" "lubridate" "forcats" "stringr"
## [7] "dplyr" "purrr" "readr" "tidyr" "tibble" "ggplot2"
## [13] "tidyverse" "stats" "graphics" "grDevices" "utils" "datasets"
## [19] "methods" "base"
##
## [[5]]
## [1] "interactions" "boot" "cocor" "psych" "lubridate"
## [6] "forcats" "stringr" "dplyr" "purrr" "readr"
## [11] "tidyr" "tibble" "ggplot2" "tidyverse" "stats"
## [16] "graphics" "grDevices" "utils" "datasets" "methods"
## [21] "base"
##
## [[6]]
## [1] "broom" "interactions" "boot" "cocor" "psych"
## [6] "lubridate" "forcats" "stringr" "dplyr" "purrr"
## [11] "readr" "tidyr" "tibble" "ggplot2" "tidyverse"
## [16] "stats" "graphics" "grDevices" "utils" "datasets"
## [21] "methods" "base"
##
## [[7]]
## [1] "knitr" "broom" "interactions" "boot" "cocor"
## [6] "psych" "lubridate" "forcats" "stringr" "dplyr"
## [11] "purrr" "readr" "tidyr" "tibble" "ggplot2"
## [16] "tidyverse" "stats" "graphics" "grDevices" "utils"
## [21] "datasets" "methods" "base"
##
## [[8]]
## [1] "car" "carData" "knitr" "broom" "interactions"
## [6] "boot" "cocor" "psych" "lubridate" "forcats"
## [11] "stringr" "dplyr" "purrr" "readr" "tidyr"
## [16] "tibble" "ggplot2" "tidyverse" "stats" "graphics"
## [21] "grDevices" "utils" "datasets" "methods" "base"
options(scipen = 999)
options(digits = 4)
# Load the cleaned data from main analysis
# UPDATE PATH AS NEEDED
df_clean <- read.csv("punishment_212_cleaned_data.csv", stringsAsFactors = FALSE)
cat("Loaded cleaned dataset: N =", nrow(df_clean), "\n\n")
## Loaded cleaned dataset: N = 496
This analysis examines whether different operationalizations of punitiveness show similar correlation patterns with psychological correlates.
cat("=== CORRELATIONS BY PUNITIVENESS MEASURE ===\n\n")
## === CORRELATIONS BY PUNITIVENESS MEASURE ===
# Define punitiveness measures
punitiveness_measures <- c(
"punishmore_comp", # Punish More (2 items)
"parsimony_comp", # Parsimony (2 items) - note: low reliability
"threestrikes_comp", # Three Strikes (2 items)
"LWOP", # Life Without Parole (single item)
"deathpenalty", # Death Penalty (single item)
"Sentence_z" # Sentence (z-scored within vignette)
)
punitiveness_labels <- c(
"Punish More", "Parsimony", "Three Strikes",
"LWOP", "Death Penalty", "Sentence"
)
# Define key correlates (one from each cluster)
key_correlates <- c(
"crime_concerns_agg", # Crime Concerns cluster
"emotions_agg", # Emotions cluster
"hostile_agg", # Hostile Aggression cluster
"personality_agg" # Personality cluster
)
correlate_labels <- c("Crime Concerns", "Emotions", "Hostile Aggression", "Personality")
# Compute correlation matrix
cor_matrix <- matrix(NA, nrow = length(punitiveness_measures),
ncol = length(key_correlates))
rownames(cor_matrix) <- punitiveness_labels
colnames(cor_matrix) <- correlate_labels
p_matrix <- cor_matrix # For p-values
for(i in 1:length(punitiveness_measures)) {
for(j in 1:length(key_correlates)) {
test <- cor.test(df_clean[[punitiveness_measures[i]]],
df_clean[[key_correlates[j]]])
cor_matrix[i, j] <- test$estimate
p_matrix[i, j] <- test$p.value
}
}
# Format for display
cat("Correlation Matrix (r values):\n")
## Correlation Matrix (r values):
print(round(cor_matrix, 2))
## Crime Concerns Emotions Hostile Aggression Personality
## Punish More 0.28 0.58 0.62 0.59
## Parsimony 0.18 0.41 0.49 0.39
## Three Strikes 0.33 0.48 0.55 0.55
## LWOP 0.33 0.48 0.47 0.33
## Death Penalty 0.24 0.46 0.56 0.55
## Sentence 0.17 0.24 0.25 0.13
cat("\n\nP-values:\n")
##
##
## P-values:
print(round(p_matrix, 4))
## Crime Concerns Emotions Hostile Aggression Personality
## Punish More 0.0000 0 0 0.0000
## Parsimony 0.0000 0 0 0.0000
## Three Strikes 0.0000 0 0 0.0000
## LWOP 0.0000 0 0 0.0000
## Death Penalty 0.0000 0 0 0.0000
## Sentence 0.0001 0 0 0.0046
# Add significance stars
sig_matrix <- matrix(
paste0(format(round(cor_matrix, 2), nsmall = 2),
ifelse(p_matrix < .001, "***",
ifelse(p_matrix < .01, "**",
ifelse(p_matrix < .05, "*", "")))),
nrow = nrow(cor_matrix),
dimnames = dimnames(cor_matrix)
)
cat("\n\nFormatted Table:\n")
##
##
## Formatted Table:
print(noquote(sig_matrix))
## Crime Concerns Emotions Hostile Aggression Personality
## Punish More 0.28*** 0.58*** 0.62*** 0.59***
## Parsimony 0.18*** 0.41*** 0.49*** 0.39***
## Three Strikes 0.33*** 0.48*** 0.55*** 0.55***
## LWOP 0.33*** 0.48*** 0.47*** 0.33***
## Death Penalty 0.24*** 0.46*** 0.56*** 0.55***
## Sentence 0.17*** 0.24*** 0.25*** 0.13**
cat("\nNote: * p < .05, ** p < .01, *** p < .001\n")
##
## Note: * p < .05, ** p < .01, *** p < .001
cat("\n=== VARIABILITY IN CORRELATIONS ACROSS PUNITIVENESS MEASURES ===\n\n")
##
## === VARIABILITY IN CORRELATIONS ACROSS PUNITIVENESS MEASURES ===
# For each correlate cluster, compute range of correlations across punitiveness measures
variability_summary <- data.frame(
Correlate = correlate_labels,
Min_r = apply(cor_matrix, 2, min),
Max_r = apply(cor_matrix, 2, max),
Range = apply(cor_matrix, 2, function(x) max(x) - min(x)),
Mean_r = apply(cor_matrix, 2, mean),
SD_r = apply(cor_matrix, 2, sd)
)
print(as.data.frame(variability_summary), row.names = FALSE)
## Correlate Min_r Max_r Range Mean_r SD_r
## Crime Concerns 0.1704 0.3286 0.1583 0.2549 0.06893
## Emotions 0.2415 0.5757 0.3342 0.4412 0.11132
## Hostile Aggression 0.2480 0.6238 0.3758 0.4895 0.12979
## Personality 0.1271 0.5950 0.4679 0.4236 0.17805
cat("\n\nInterpretation:\n")
##
##
## Interpretation:
cat("- Larger Range/SD indicates less consistency across punitiveness operationalizations\n")
## - Larger Range/SD indicates less consistency across punitiveness operationalizations
cat("- Parsimony (low reliability) likely contributes to variability\n")
## - Parsimony (low reliability) likely contributes to variability
cat("\n=== COMPARING PUNITIVENESS MEASURES: HOSTILE VS CRIME CONCERNS ===\n\n")
##
## === COMPARING PUNITIVENESS MEASURES: HOSTILE VS CRIME CONCERNS ===
cat("Testing whether Hostile Aggression > Crime Concerns holds for EACH punitiveness measure:\n\n")
## Testing whether Hostile Aggression > Crime Concerns holds for EACH punitiveness measure:
results_list <- list()
for(i in 1:length(punitiveness_measures)) {
pun_var <- punitiveness_measures[i]
r_hostile <- cor(df_clean[[pun_var]], df_clean$hostile_agg,
use = "pairwise.complete.obs")
r_crime <- cor(df_clean[[pun_var]], df_clean$crime_concerns_agg,
use = "pairwise.complete.obs")
r_hostile_crime <- cor(df_clean$hostile_agg, df_clean$crime_concerns_agg,
use = "pairwise.complete.obs")
n <- sum(complete.cases(df_clean[, c(pun_var, "hostile_agg", "crime_concerns_agg")]))
steiger <- cocor.dep.groups.overlap(
r.jk = r_hostile,
r.jh = r_crime,
r.kh = r_hostile_crime,
n = n,
test = "steiger1980"
)
results_list[[i]] <- data.frame(
Measure = punitiveness_labels[i],
r_Hostile = round(r_hostile, 2),
r_Crime = round(r_crime, 2),
Difference = round(r_hostile - r_crime, 2),
Z = round(steiger@steiger1980$statistic, 2),
p = steiger@steiger1980$p.value,
Supported = ifelse(r_hostile > r_crime & steiger@steiger1980$p.value < .05,
"YES", "NO")
)
}
steiger_results <- do.call(rbind, results_list)
steiger_results$p <- format(steiger_results$p, scientific = FALSE, digits = 4)
print(as.data.frame(steiger_results), row.names = FALSE)
## Measure r_Hostile r_Crime Difference Z p Supported
## Punish More 0.62 0.28 0.34 8.23 0.000000000000000222 YES
## Parsimony 0.49 0.18 0.31 6.70 0.000000000020302648 YES
## Three Strikes 0.55 0.33 0.22 5.06 0.000000411416142931 YES
## LWOP 0.47 0.33 0.15 3.27 0.001082488916676283 YES
## Death Penalty 0.56 0.24 0.32 7.23 0.000000000000498490 YES
## Sentence 0.25 0.17 0.08 1.60 0.110653580517522609 NO
cat("\n\nSummary: H2 supported for", sum(steiger_results$Supported == "YES"),
"of", nrow(steiger_results), "punitiveness measures\n")
##
##
## Summary: H2 supported for 5 of 6 punitiveness measures
cat("=== BOOTSTRAP CIs FOR H2 (Steiger's Z) ===\n\n")
## === BOOTSTRAP CIs FOR H2 (Steiger's Z) ===
# Function to compute difference in correlations for bootstrapping
cor_diff_function <- function(data, indices) {
d <- data[indices, ]
r_hostile <- cor(d$punitiveness_agg, d$hostile_agg, use = "pairwise.complete.obs")
r_crime <- cor(d$punitiveness_agg, d$crime_concerns_agg, use = "pairwise.complete.obs")
return(r_hostile - r_crime)
}
set.seed(42) # For reproducibility
# Prepare data
boot_data <- df_clean %>%
select(punitiveness_agg, hostile_agg, crime_concerns_agg) %>%
na.omit()
cat("Bootstrap sample size: N =", nrow(boot_data), "\n")
## Bootstrap sample size: N = 496
cat("Running 10,000 bootstrap iterations...\n\n")
## Running 10,000 bootstrap iterations...
# Run bootstrap
boot_results <- boot(
data = boot_data,
statistic = cor_diff_function,
R = 10000
)
# Display results
cat("Bootstrap Results:\n")
## Bootstrap Results:
cat("Original difference (r_Hostile - r_Crime):", round(boot_results$t0, 3), "\n")
## Original difference (r_Hostile - r_Crime): 0.264
cat("Bootstrap mean:", round(mean(boot_results$t), 3), "\n")
## Bootstrap mean: 0.264
cat("Bootstrap SE:", round(sd(boot_results$t), 3), "\n\n")
## Bootstrap SE: 0.049
# Confidence intervals
boot_ci <- boot.ci(boot_results, type = c("perc", "bca", "norm"))
cat("95% Confidence Intervals:\n")
## 95% Confidence Intervals:
cat(" Percentile method:", round(boot_ci$percent[4], 3), "to",
round(boot_ci$percent[5], 3), "\n")
## Percentile method: 0.169 to 0.361
cat(" BCa method:", round(boot_ci$bca[4], 3), "to",
round(boot_ci$bca[5], 3), "\n")
## BCa method: 0.174 to 0.367
cat(" Normal method:", round(boot_ci$normal[2], 3), "to",
round(boot_ci$normal[3], 3), "\n\n")
## Normal method: 0.169 to 0.361
# Does CI exclude zero?
ci_lower <- boot_ci$bca[4]
ci_upper <- boot_ci$bca[5]
excludes_zero <- ci_lower > 0 | ci_upper < 0
cat("BCa 95% CI excludes zero:", ifelse(excludes_zero, "YES", "NO"), "\n")
## BCa 95% CI excludes zero: YES
cat("Interpretation: The difference is",
ifelse(excludes_zero, "statistically significant", "not significant"),
"at alpha = .05\n\n")
## Interpretation: The difference is statistically significant at alpha = .05
# Histogram of bootstrap distribution
png("bootstrap_h2_distribution.png", width = 800, height = 600, res = 150)
hist(boot_results$t, breaks = 50, col = "steelblue", border = "white",
main = "Bootstrap Distribution of r(Hostile) - r(Crime)",
xlab = "Difference in Correlations",
xlim = c(0, 0.6))
abline(v = boot_results$t0, col = "red", lwd = 2, lty = 1)
abline(v = boot_ci$bca[4], col = "darkgreen", lwd = 2, lty = 2)
abline(v = boot_ci$bca[5], col = "darkgreen", lwd = 2, lty = 2)
abline(v = 0, col = "black", lwd = 2, lty = 3)
legend("topright",
legend = c("Observed", "95% BCa CI", "Zero"),
col = c("red", "darkgreen", "black"),
lty = c(1, 2, 3), lwd = 2)
dev.off()
## quartz_off_screen
## 2
cat("Saved: bootstrap_h2_distribution.png\n\n")
## Saved: bootstrap_h2_distribution.png
cat("=== BOOTSTRAP CIs FOR ALL CLUSTERS VS CRIME CONCERNS ===\n\n")
## === BOOTSTRAP CIs FOR ALL CLUSTERS VS CRIME CONCERNS ===
# Function factory for different clusters
make_cor_diff <- function(cluster_var) {
function(data, indices) {
d <- data[indices, ]
r_cluster <- cor(d$punitiveness_agg, d[[cluster_var]], use = "pairwise.complete.obs")
r_crime <- cor(d$punitiveness_agg, d$crime_concerns_agg, use = "pairwise.complete.obs")
return(r_cluster - r_crime)
}
}
clusters_to_test <- c("hostile_agg", "emotions_agg", "personality_agg")
cluster_labels <- c("Hostile Aggression", "Emotions", "Personality")
boot_summary <- data.frame(
Cluster = character(),
Observed_Diff = numeric(),
Boot_Mean = numeric(),
Boot_SE = numeric(),
CI_Lower = numeric(),
CI_Upper = numeric(),
Excludes_Zero = character(),
stringsAsFactors = FALSE
)
for(i in 1:length(clusters_to_test)) {
# Prepare data
vars_needed <- c("punitiveness_agg", clusters_to_test[i], "crime_concerns_agg")
boot_data_i <- df_clean %>% select(all_of(vars_needed)) %>% na.omit()
# Run bootstrap
boot_i <- boot(
data = boot_data_i,
statistic = make_cor_diff(clusters_to_test[i]),
R = 5000
)
boot_ci_i <- boot.ci(boot_i, type = "bca")
boot_summary <- rbind(boot_summary, data.frame(
Cluster = cluster_labels[i],
Observed_Diff = round(boot_i$t0, 3),
Boot_Mean = round(mean(boot_i$t), 3),
Boot_SE = round(sd(boot_i$t), 3),
CI_Lower = round(boot_ci_i$bca[4], 3),
CI_Upper = round(boot_ci_i$bca[5], 3),
Excludes_Zero = ifelse(boot_ci_i$bca[4] > 0 | boot_ci_i$bca[5] < 0, "YES", "NO")
))
}
cat("Bootstrap 95% BCa CIs for (Cluster - Crime Concerns) Difference:\n\n")
## Bootstrap 95% BCa CIs for (Cluster - Crime Concerns) Difference:
print(boot_summary, row.names = FALSE)
## Cluster Observed_Diff Boot_Mean Boot_SE CI_Lower CI_Upper
## Hostile Aggression 0.264 0.263 0.049 0.177 0.369
## Emotions 0.213 0.212 0.053 0.115 0.321
## Personality 0.153 0.153 0.051 0.057 0.259
## Excludes_Zero
## YES
## YES
## YES
cat("\n")
cat("=== POLITICAL ORIENTATION AS MODERATOR ===\n\n")
## === POLITICAL ORIENTATION AS MODERATOR ===
# Verify political groups exist
if(!"political_group" %in% names(df_clean)) {
df_clean <- df_clean %>%
mutate(political_group = case_when(
politid <= 3 ~ "Liberal",
politid == 4 ~ "Moderate",
politid >= 5 ~ "Conservative"
))
}
# Distribution
cat("Political Group Distribution:\n")
## Political Group Distribution:
table(df_clean$political_group, useNA = "ifany")
##
## Conservative Liberal Moderate
## 190 190 116
cat("\n\n")
# Center political ID for interaction tests
df_clean$politid_c <- scale(df_clean$politid, scale = FALSE)[,1]
cat("=== INTERACTION TESTS: Political Orientation x Correlate ===\n\n")
## === INTERACTION TESTS: Political Orientation x Correlate ===
# Key correlates to test
correlates_to_test <- c(
"hostile_agg", "crime_concerns_agg", "emotions_agg", "personality_agg",
"rwa_comp", "sdo_comp", "raceresent_comp", "hatred_comp"
)
correlate_names <- c(
"Hostile Aggression", "Crime Concerns", "Emotions", "Personality",
"RWA", "SDO", "Racial Resentment", "Hatred"
)
interaction_results <- data.frame(
Correlate = character(),
Main_Effect_b = numeric(),
Main_Effect_p = numeric(),
Political_b = numeric(),
Political_p = numeric(),
Interaction_b = numeric(),
Interaction_p = numeric(),
Interaction_Sig = character(),
stringsAsFactors = FALSE
)
for(i in 1:length(correlates_to_test)) {
correlate_var <- correlates_to_test[i]
# Standardize correlate for interpretability
df_clean$correlate_z <- scale(df_clean[[correlate_var]])[,1]
# Run interaction model
model <- lm(punitiveness_agg ~ correlate_z * politid_c, data = df_clean)
model_summary <- summary(model)
coefs <- coef(model_summary)
interaction_results <- rbind(interaction_results, data.frame(
Correlate = correlate_names[i],
Main_Effect_b = round(coefs["correlate_z", "Estimate"], 3),
Main_Effect_p = coefs["correlate_z", "Pr(>|t|)"],
Political_b = round(coefs["politid_c", "Estimate"], 3),
Political_p = coefs["politid_c", "Pr(>|t|)"],
Interaction_b = round(coefs["correlate_z:politid_c", "Estimate"], 3),
Interaction_p = coefs["correlate_z:politid_c", "Pr(>|t|)"],
Interaction_Sig = ifelse(coefs["correlate_z:politid_c", "Pr(>|t|)"] < .05, "*", "")
))
}
# Format p-values
interaction_results$Main_Effect_p <- format(interaction_results$Main_Effect_p,
scientific = FALSE, digits = 3)
interaction_results$Political_p <- format(interaction_results$Political_p,
scientific = FALSE, digits = 3)
interaction_results$Interaction_p <- format(interaction_results$Interaction_p,
scientific = FALSE, digits = 3)
cat("Interaction Test Results:\n")
## Interaction Test Results:
cat("Model: Punitiveness ~ Correlate(z) * Political_ID(centered)\n\n")
## Model: Punitiveness ~ Correlate(z) * Political_ID(centered)
print(as.data.frame(interaction_results), row.names = FALSE)
## Correlate Main_Effect_b Main_Effect_p
## Hostile Aggression 0.433 0.0000000000000000000000000000000000000589
## Crime Concerns 0.213 0.0000000005221353291445955697327361491459
## Emotions 0.382 0.0000000000000000000000000000087238951105
## Personality 0.324 0.0000000000000005465970616239029940629697
## RWA 0.350 0.0000000000000000110878210021776622749166
## SDO 0.171 0.0000098047700458965083468920384546230196
## Racial Resentment 0.309 0.0000000000000066583742784677135982772151
## Hatred 0.382 0.0000000000000000000000000000027732975274
## Political_b Political_p Interaction_b Interaction_p Interaction_Sig
## 0.081 0.0000010908894116 -0.026 0.074
## 0.137 0.0000000000000571 -0.024 0.174
## 0.096 0.0000000170314826 -0.009 0.565
## 0.061 0.0031261058211971 -0.008 0.616
## 0.050 0.0175255121396556 -0.023 0.186
## 0.119 0.0000000032088381 0.000 0.991
## 0.069 0.0008019148318104 0.005 0.765
## 0.102 0.0000000014094963 -0.012 0.445
cat("\n* = Significant interaction (p < .05)\n")
##
## * = Significant interaction (p < .05)
cat("Positive interaction = stronger correlate-punitiveness relationship for conservatives\n")
## Positive interaction = stronger correlate-punitiveness relationship for conservatives
cat("Negative interaction = stronger correlate-punitiveness relationship for liberals\n\n")
## Negative interaction = stronger correlate-punitiveness relationship for liberals
# Find significant interactions
sig_interactions <- interaction_results %>%
filter(Interaction_Sig == "*") %>%
pull(Correlate)
if(length(sig_interactions) > 0) {
cat("=== PLOTTING SIGNIFICANT INTERACTIONS ===\n\n")
for(correlate_name in sig_interactions) {
correlate_var <- correlates_to_test[which(correlate_names == correlate_name)]
# Standardize for plotting
df_clean$correlate_z <- scale(df_clean[[correlate_var]])[,1]
# Fit model
model <- lm(punitiveness_agg ~ correlate_z * politid_c, data = df_clean)
# Create interaction plot
png(paste0("interaction_", gsub(" ", "_", correlate_name), ".png"),
width = 800, height = 600, res = 150)
# Simple slopes plot
interactions::interact_plot(
model,
pred = correlate_z,
modx = politid_c,
modx.values = c(-2, 0, 2), # Liberal (-2), Moderate (0), Conservative (+2)
modx.labels = c("Liberal (-2)", "Moderate (0)", "Conservative (+2)"),
main.title = paste("Political Orientation x", correlate_name),
x.label = paste(correlate_name, "(standardized)"),
y.label = "Punitiveness",
colors = c("blue", "gray50", "red")
)
dev.off()
cat("Saved: interaction_", gsub(" ", "_", correlate_name), ".png\n", sep = "")
}
} else {
cat("No significant interactions to plot.\n")
}
## No significant interactions to plot.
cat("\n")
if(length(sig_interactions) > 0) {
cat("=== SIMPLE SLOPES ANALYSIS ===\n\n")
for(correlate_name in sig_interactions) {
correlate_var <- correlates_to_test[which(correlate_names == correlate_name)]
# Standardize
df_clean$correlate_z <- scale(df_clean[[correlate_var]])[,1]
# Fit model
model <- lm(punitiveness_agg ~ correlate_z * politid_c, data = df_clean)
cat(paste0("--- ", correlate_name, " ---\n"))
# Simple slopes at -1 SD, mean, +1 SD of political orientation
ss <- interactions::sim_slopes(model, pred = correlate_z, modx = politid_c,
modx.values = c(-1.5, 0, 1.5))
print(ss)
cat("\n")
}
}
cat("=== CORRELATIONS BY POLITICAL GROUP ===\n\n")
## === CORRELATIONS BY POLITICAL GROUP ===
# Compute correlations for each political group
groups <- c("Liberal", "Moderate", "Conservative")
group_cors <- data.frame(
Correlate = correlate_names,
stringsAsFactors = FALSE
)
for(grp in groups) {
subset_data <- df_clean %>% filter(political_group == grp)
cors <- sapply(correlates_to_test, function(var) {
cor(subset_data$punitiveness_agg, subset_data[[var]], use = "pairwise.complete.obs")
})
group_cors[[grp]] <- round(cors, 2)
}
# Add overall
group_cors$Overall <- round(sapply(correlates_to_test, function(var) {
cor(df_clean$punitiveness_agg, df_clean[[var]], use = "pairwise.complete.obs")
}), 2)
# Add range
group_cors$Range <- group_cors$Conservative - group_cors$Liberal
print(as.data.frame(group_cors), row.names = FALSE)
## Correlate Liberal Moderate Conservative Overall Range
## Hostile Aggression 0.57 0.47 0.56 0.59 -0.01
## Crime Concerns 0.35 0.31 0.20 0.32 -0.15
## Emotions 0.53 0.40 0.54 0.53 0.01
## Personality 0.41 0.34 0.39 0.47 -0.02
## RWA 0.46 0.43 0.37 0.50 -0.09
## SDO 0.22 0.13 0.29 0.32 0.07
## Racial Resentment 0.38 0.29 0.43 0.46 0.05
## Hatred 0.51 0.45 0.53 0.53 0.02
cat("\nPositive Range = stronger relationship for conservatives\n")
##
## Positive Range = stronger relationship for conservatives
cat("Negative Range = stronger relationship for liberals\n\n")
## Negative Range = stronger relationship for liberals
cat("=== SAVING SUPPLEMENTARY OUTPUTS ===\n\n")
## === SAVING SUPPLEMENTARY OUTPUTS ===
# Save punitiveness measure correlations
write.csv(as.data.frame(cor_matrix), "punitiveness_measures_correlations.csv")
cat("Saved: punitiveness_measures_correlations.csv\n")
## Saved: punitiveness_measures_correlations.csv
# Save Steiger comparisons
write.csv(steiger_results, "steiger_by_punitiveness_measure.csv", row.names = FALSE)
cat("Saved: steiger_by_punitiveness_measure.csv\n")
## Saved: steiger_by_punitiveness_measure.csv
# Save bootstrap results
write.csv(boot_summary, "bootstrap_ci_results.csv", row.names = FALSE)
cat("Saved: bootstrap_ci_results.csv\n")
## Saved: bootstrap_ci_results.csv
# Save interaction results
write.csv(interaction_results, "political_interaction_tests.csv", row.names = FALSE)
cat("Saved: political_interaction_tests.csv\n")
## Saved: political_interaction_tests.csv
# Save group correlations
write.csv(group_cors, "correlations_by_political_group.csv", row.names = FALSE)
cat("Saved: correlations_by_political_group.csv\n\n")
## Saved: correlations_by_political_group.csv
cat("\n", paste(rep("=", 70), collapse = ""), "\n")
##
## ======================================================================
cat("SUPPLEMENTARY ANALYSES SUMMARY\n")
## SUPPLEMENTARY ANALYSES SUMMARY
cat(paste(rep("=", 70), collapse = ""), "\n\n")
## ======================================================================
cat("1. PUNITIVENESS MEASURE COMPARISONS:\n")
## 1. PUNITIVENESS MEASURE COMPARISONS:
cat(" - H2 (Hostile > Crime) holds for", sum(steiger_results$Supported == "YES"),
"/", nrow(steiger_results), "punitiveness operationalizations\n")
## - H2 (Hostile > Crime) holds for 5 / 6 punitiveness operationalizations
cat(" - Parsimony shows weakest correlations (consistent with low reliability)\n\n")
## - Parsimony shows weakest correlations (consistent with low reliability)
cat("2. BOOTSTRAP CIs FOR H2:\n")
## 2. BOOTSTRAP CIs FOR H2:
cat(" - Difference (Hostile - Crime):", round(boot_results$t0, 3), "\n")
## - Difference (Hostile - Crime): 0.264
cat(" - 95% BCa CI: [", round(boot_ci$bca[4], 3), ",", round(boot_ci$bca[5], 3), "]\n")
## - 95% BCa CI: [ 0.174 , 0.367 ]
cat(" - CI excludes zero:", ifelse(excludes_zero, "YES (significant)", "NO"), "\n\n")
## - CI excludes zero: YES (significant)
cat("3. POLITICAL MODERATION:\n")
## 3. POLITICAL MODERATION:
n_sig <- sum(interaction_results$Interaction_Sig == "*")
cat(" - Significant interactions:", n_sig, "/", nrow(interaction_results), "\n")
## - Significant interactions: 0 / 8
if(n_sig > 0) {
cat(" - Moderated correlates:", paste(sig_interactions, collapse = ", "), "\n")
}
cat("\n")
cat(paste(rep("=", 70), collapse = ""), "\n")
## ======================================================================
cat("SUPPLEMENTARY ANALYSES COMPLETE\n")
## SUPPLEMENTARY ANALYSES COMPLETE
cat(paste(rep("=", 70), collapse = ""), "\n")
## ======================================================================