The Parsimony composite showed unacceptably low reliability (alpha = .48). This analysis examines the two items separately to understand:
The two items (after reverse coding, higher = more punitive):
library(tidyverse)
library(psych)
library(cocor)
library(knitr)
options(scipen = 999)
options(digits = 4)
# Load the cleaned data
df_clean <- read.csv("punishment_212_cleaned_data.csv", stringsAsFactors = FALSE)
cat("Loaded N =", nrow(df_clean), "\n\n")
## Loaded N = 496
cat("=== PARSIMONY ITEM DESCRIPTIVES ===\n\n")
## === PARSIMONY ITEM DESCRIPTIVES ===
# Note: These are the REVERSED items (higher = more punitive)
cat("parsimony_1_R: 'Punishments should be as light as possible' (reversed)\n")
## parsimony_1_R: 'Punishments should be as light as possible' (reversed)
cat(" Interpreting high scores: Rejecting absolute minimization of punishment\n\n")
## Interpreting high scores: Rejecting absolute minimization of punishment
cat("parsimony_2_R: 'State should not punish more than necessary' (reversed)\n")
## parsimony_2_R: 'State should not punish more than necessary' (reversed)
cat(" Interpreting high scores: Accepting excessive/disproportionate punishment\n\n")
## Interpreting high scores: Accepting excessive/disproportionate punishment
# Descriptives
desc_table <- df_clean %>%
summarise(
across(c(parsimony_1_R, parsimony_2_R, parsimony_comp),
list(
Mean = ~mean(., na.rm = TRUE),
SD = ~sd(., na.rm = TRUE),
Median = ~median(., na.rm = TRUE)
))
) %>%
pivot_longer(everything()) %>%
separate(name, into = c("Variable", "Stat"), sep = "_(?=[^_]+$)") %>%
pivot_wider(names_from = Stat, values_from = value)
print(as.data.frame(desc_table), row.names = FALSE)
## Variable Mean SD Median
## parsimony_1_R 5.417 1.553 6
## parsimony_2_R 2.996 1.667 3
## parsimony_comp 4.207 1.306 4
cat("\n")
# Distribution comparison
cat("Distribution of Responses (1-7 scale):\n\n")
## Distribution of Responses (1-7 scale):
cat("parsimony_1_R:\n")
## parsimony_1_R:
print(table(df_clean$parsimony_1_R))
##
## 1 2 3 4 5 6 7
## 12 21 28 60 88 140 147
cat("\nparsimony_2_R:\n")
##
## parsimony_2_R:
print(table(df_clean$parsimony_2_R))
##
## 1 2 3 4 5 6 7
## 108 107 116 84 33 21 27
cat("\n=== CROSS-TABULATION OF ITEMS ===\n\n")
##
## === CROSS-TABULATION OF ITEMS ===
# How do people respond to both items?
crosstab <- table(df_clean$parsimony_1_R, df_clean$parsimony_2_R)
cat("Rows = parsimony_1_R (light as possible), Columns = parsimony_2_R (not more than necessary)\n\n")
## Rows = parsimony_1_R (light as possible), Columns = parsimony_2_R (not more than necessary)
print(crosstab)
##
## 1 2 3 4 5 6 7
## 1 10 0 1 0 0 0 1
## 2 9 6 1 5 0 0 0
## 3 9 7 5 4 2 1 0
## 4 15 15 15 11 4 0 0
## 5 21 22 30 9 5 1 0
## 6 18 36 40 27 10 8 1
## 7 26 21 24 28 12 11 25
# Correlation between items
item_cor <- cor(df_clean$parsimony_1_R, df_clean$parsimony_2_R, use = "pairwise.complete.obs")
cat(sprintf("\nCorrelation between items: r = %.2f\n", item_cor))
##
## Correlation between items: r = 0.31
cat("(This explains the low alpha - items only moderately correlated)\n")
## (This explains the low alpha - items only moderately correlated)
cat("\n=== MEAN COMPARISON ===\n\n")
##
## === MEAN COMPARISON ===
# Paired t-test
t_result <- t.test(df_clean$parsimony_1_R, df_clean$parsimony_2_R, paired = TRUE)
cat(sprintf("parsimony_1_R Mean: %.2f (SD = %.2f)\n",
mean(df_clean$parsimony_1_R, na.rm = TRUE),
sd(df_clean$parsimony_1_R, na.rm = TRUE)))
## parsimony_1_R Mean: 5.42 (SD = 1.55)
cat(sprintf("parsimony_2_R Mean: %.2f (SD = %.2f)\n",
mean(df_clean$parsimony_2_R, na.rm = TRUE),
sd(df_clean$parsimony_2_R, na.rm = TRUE)))
## parsimony_2_R Mean: 3.00 (SD = 1.67)
cat(sprintf("\nPaired t-test: t(%d) = %.2f, p = %.4f\n",
t_result$parameter, t_result$statistic, t_result$p.value))
##
## Paired t-test: t(495) = 28.56, p = 0.0000
# Effect size
mean_diff <- mean(df_clean$parsimony_1_R - df_clean$parsimony_2_R, na.rm = TRUE)
sd_diff <- sd(df_clean$parsimony_1_R - df_clean$parsimony_2_R, na.rm = TRUE)
d <- mean_diff / sd_diff
cat(sprintf("Cohen's d = %.2f\n", d))
## Cohen's d = 1.28
cat("\nInterpretation:\n")
##
## Interpretation:
if(mean(df_clean$parsimony_1_R, na.rm = TRUE) > mean(df_clean$parsimony_2_R, na.rm = TRUE)) {
cat("People are MORE willing to reject 'light as possible' than 'not more than necessary'\n")
cat("This makes sense: rejecting proportionality is more extreme than rejecting minimization\n")
} else {
cat("People are LESS willing to reject 'light as possible' than 'not more than necessary'\n")
}
## People are MORE willing to reject 'light as possible' than 'not more than necessary'
## This makes sense: rejecting proportionality is more extreme than rejecting minimization
cat("=== CORRELATIONS WITH PUNITIVENESS ===\n\n")
## === CORRELATIONS WITH PUNITIVENESS ===
# Correlation of each item with the OTHER punitiveness measures
# (excluding parsimony from the aggregate for this)
# Create punitiveness without parsimony
df_clean$punitiveness_no_parsimony <- rowMeans(
df_clean[, c("punishmore_comp", "threestrikes_comp", "LWOP", "deathpenalty")],
na.rm = TRUE
)
df_clean$punitiveness_no_parsimony_z <- scale(df_clean$punitiveness_no_parsimony)[,1]
# Also include Sentence_z
df_clean$punitiveness_no_pars_full <- rowMeans(
cbind(df_clean$punitiveness_no_parsimony_z, df_clean$Sentence_z),
na.rm = TRUE
)
cat("Correlations with Punitiveness (excluding parsimony from DV):\n\n")
## Correlations with Punitiveness (excluding parsimony from DV):
cor1 <- cor.test(df_clean$parsimony_1_R, df_clean$punitiveness_no_pars_full)
cor2 <- cor.test(df_clean$parsimony_2_R, df_clean$punitiveness_no_pars_full)
cat(sprintf("parsimony_1_R ('light as possible' R): r = %.3f, p < .001\n", cor1$estimate))
## parsimony_1_R ('light as possible' R): r = 0.424, p < .001
cat(sprintf("parsimony_2_R ('not more than necessary' R): r = %.3f, p < .001\n", cor2$estimate))
## parsimony_2_R ('not more than necessary' R): r = 0.417, p < .001
# Steiger test comparing the two
r12 <- cor(df_clean$parsimony_1_R, df_clean$parsimony_2_R, use = "pairwise.complete.obs")
n <- sum(complete.cases(df_clean[, c("parsimony_1_R", "parsimony_2_R", "punitiveness_no_pars_full")]))
steiger <- cocor.dep.groups.overlap(
r.jk = cor1$estimate,
r.jh = cor2$estimate,
r.kh = r12,
n = n,
test = "steiger1980"
)
cat(sprintf("\nSteiger's Z comparing item correlations: Z = %.2f, p = %.4f\n",
steiger@steiger1980$statistic, steiger@steiger1980$p.value))
##
## Steiger's Z comparing item correlations: Z = 0.16, p = 0.8739
cat("Interpretation: ",
ifelse(steiger@steiger1980$p.value < .05,
"Items correlate DIFFERENTLY with punitiveness",
"Items correlate SIMILARLY with punitiveness"), "\n")
## Interpretation: Items correlate SIMILARLY with punitiveness
cat("\n=== CORRELATIONS WITH PUNITIVENESS COMPONENTS ===\n\n")
##
## === CORRELATIONS WITH PUNITIVENESS COMPONENTS ===
punitiveness_components <- c("punishmore_comp", "threestrikes_comp", "LWOP",
"deathpenalty", "Sentence_z")
component_labels <- c("Punish More", "Three Strikes", "LWOP", "Death Penalty", "Sentence")
# Create correlation table
cor_table <- data.frame(Component = component_labels)
cor_table$parsimony_1_R <- sapply(punitiveness_components, function(var) {
round(cor(df_clean$parsimony_1_R, df_clean[[var]], use = "pairwise.complete.obs"), 2)
})
cor_table$parsimony_2_R <- sapply(punitiveness_components, function(var) {
round(cor(df_clean$parsimony_2_R, df_clean[[var]], use = "pairwise.complete.obs"), 2)
})
cor_table$Difference <- cor_table$parsimony_1_R - cor_table$parsimony_2_R
print(as.data.frame(cor_table), row.names = FALSE)
## Component parsimony_1_R parsimony_2_R Difference
## Punish More 0.48 0.53 -0.05
## Three Strikes 0.21 0.42 -0.21
## LWOP 0.32 0.28 0.04
## Death Penalty 0.36 0.36 0.00
## Sentence 0.26 0.18 0.08
cat("\nPositive Difference = parsimony_1_R correlates more strongly\n")
##
## Positive Difference = parsimony_1_R correlates more strongly
cat("Negative Difference = parsimony_2_R correlates more strongly\n")
## Negative Difference = parsimony_2_R correlates more strongly
cat("=== CORRELATIONS WITH CORRELATE CLUSTERS ===\n\n")
## === CORRELATIONS WITH CORRELATE CLUSTERS ===
cluster_vars <- c("crime_concerns_agg", "emotions_agg", "hostile_agg", "personality_agg")
cluster_labels <- c("Crime Concerns", "Emotions", "Hostile Aggression", "Personality")
cluster_cor_table <- data.frame(Cluster = cluster_labels)
cluster_cor_table$parsimony_1_R <- sapply(cluster_vars, function(var) {
round(cor(df_clean$parsimony_1_R, df_clean[[var]], use = "pairwise.complete.obs"), 2)
})
cluster_cor_table$parsimony_2_R <- sapply(cluster_vars, function(var) {
round(cor(df_clean$parsimony_2_R, df_clean[[var]], use = "pairwise.complete.obs"), 2)
})
cluster_cor_table$parsimony_comp <- sapply(cluster_vars, function(var) {
round(cor(df_clean$parsimony_comp, df_clean[[var]], use = "pairwise.complete.obs"), 2)
})
cluster_cor_table$Difference <- cluster_cor_table$parsimony_1_R - cluster_cor_table$parsimony_2_R
print(as.data.frame(cluster_cor_table), row.names = FALSE)
## Cluster parsimony_1_R parsimony_2_R parsimony_comp Difference
## Crime Concerns 0.11 0.19 0.18 -0.08
## Emotions 0.33 0.34 0.41 -0.01
## Hostile Aggression 0.35 0.45 0.49 -0.10
## Personality 0.20 0.43 0.39 -0.23
cat("\n=== CORRELATIONS WITH INDIVIDUAL CONSTRUCTS ===\n\n")
##
## === CORRELATIONS WITH INDIVIDUAL CONSTRUCTS ===
construct_vars <- c(
"crime_rates_comp", "fear_comp",
"hatred_comp", "anger_comp",
"exclusion_comp", "degradation_comp", "suffering_comp",
"prisonvi_comp", "harsh_comp", "revenge_comp",
"rwa_comp", "sdo_comp", "venge_comp", "vprone_comp",
"raceresent_comp", "bloodsports_comp"
)
construct_labels <- c(
"Crime Rates", "Fear",
"Hatred", "Anger",
"Exclusion", "Degradation", "Suffering",
"Prison Violence", "Harsh Conditions", "Revenge",
"RWA", "SDO", "Vengefulness", "Violence Prone",
"Racial Resentment", "Blood Sports"
)
construct_cor_table <- data.frame(Construct = construct_labels)
construct_cor_table$parsimony_1_R <- sapply(construct_vars, function(var) {
round(cor(df_clean$parsimony_1_R, df_clean[[var]], use = "pairwise.complete.obs"), 2)
})
construct_cor_table$parsimony_2_R <- sapply(construct_vars, function(var) {
round(cor(df_clean$parsimony_2_R, df_clean[[var]], use = "pairwise.complete.obs"), 2)
})
construct_cor_table$Difference <- construct_cor_table$parsimony_1_R - construct_cor_table$parsimony_2_R
# Sort by difference
construct_cor_table <- construct_cor_table %>% arrange(desc(Difference))
print(as.data.frame(construct_cor_table), row.names = FALSE)
## Construct parsimony_1_R parsimony_2_R Difference
## Crime Rates 0.24 0.21 0.03
## Revenge 0.25 0.24 0.01
## Anger 0.25 0.25 0.00
## Exclusion 0.34 0.35 -0.01
## Hatred 0.35 0.36 -0.01
## Suffering 0.25 0.33 -0.08
## RWA 0.28 0.37 -0.09
## Vengefulness 0.00 0.10 -0.10
## Racial Resentment 0.29 0.40 -0.11
## Prison Violence 0.28 0.41 -0.13
## Harsh Conditions 0.34 0.48 -0.14
## Fear -0.02 0.12 -0.14
## Degradation 0.24 0.38 -0.14
## Violence Prone 0.20 0.35 -0.15
## SDO 0.15 0.37 -0.22
## Blood Sports -0.12 0.10 -0.22
cat("\nConstructs where items diverge most (|Diff| > .05):\n")
##
## Constructs where items diverge most (|Diff| > .05):
print(construct_cor_table %>% filter(abs(Difference) > 0.05), row.names = FALSE)
## Construct parsimony_1_R parsimony_2_R Difference
## Suffering 0.25 0.33 -0.08
## RWA 0.28 0.37 -0.09
## Vengefulness 0.00 0.10 -0.10
## Racial Resentment 0.29 0.40 -0.11
## Prison Violence 0.28 0.41 -0.13
## Harsh Conditions 0.34 0.48 -0.14
## Fear -0.02 0.12 -0.14
## Degradation 0.24 0.38 -0.14
## Violence Prone 0.20 0.35 -0.15
## SDO 0.15 0.37 -0.22
## Blood Sports -0.12 0.10 -0.22
cat("=== H2 (HOSTILE > CRIME) FOR EACH PARSIMONY ITEM ===\n\n")
## === H2 (HOSTILE > CRIME) FOR EACH PARSIMONY ITEM ===
# parsimony_1_R
r1_hostile <- cor(df_clean$parsimony_1_R, df_clean$hostile_agg, use = "pairwise.complete.obs")
r1_crime <- cor(df_clean$parsimony_1_R, df_clean$crime_concerns_agg, use = "pairwise.complete.obs")
r_hc <- cor(df_clean$hostile_agg, df_clean$crime_concerns_agg, use = "pairwise.complete.obs")
n <- nrow(df_clean)
steiger1 <- cocor.dep.groups.overlap(r1_hostile, r1_crime, r_hc, n, test = "steiger1980")
cat("parsimony_1_R ('light as possible' reversed):\n")
## parsimony_1_R ('light as possible' reversed):
cat(sprintf(" r(Hostile) = %.3f, r(Crime) = %.3f\n", r1_hostile, r1_crime))
## r(Hostile) = 0.346, r(Crime) = 0.107
cat(sprintf(" Difference = %.3f, Z = %.2f, p = %.4f\n",
r1_hostile - r1_crime, steiger1@steiger1980$statistic, steiger1@steiger1980$p.value))
## Difference = 0.240, Z = 4.98, p = 0.0000
cat(sprintf(" H2 Supported: %s\n\n",
ifelse(r1_hostile > r1_crime & steiger1@steiger1980$p.value < .05, "YES", "NO")))
## H2 Supported: YES
# parsimony_2_R
r2_hostile <- cor(df_clean$parsimony_2_R, df_clean$hostile_agg, use = "pairwise.complete.obs")
r2_crime <- cor(df_clean$parsimony_2_R, df_clean$crime_concerns_agg, use = "pairwise.complete.obs")
steiger2 <- cocor.dep.groups.overlap(r2_hostile, r2_crime, r_hc, n, test = "steiger1980")
cat("parsimony_2_R ('not more than necessary' reversed):\n")
## parsimony_2_R ('not more than necessary' reversed):
cat(sprintf(" r(Hostile) = %.3f, r(Crime) = %.3f\n", r2_hostile, r2_crime))
## r(Hostile) = 0.445, r(Crime) = 0.189
cat(sprintf(" Difference = %.3f, Z = %.2f, p = %.4f\n",
r2_hostile - r2_crime, steiger2@steiger1980$statistic, steiger2@steiger1980$p.value))
## Difference = 0.256, Z = 5.53, p = 0.0000
cat(sprintf(" H2 Supported: %s\n\n",
ifelse(r2_hostile > r2_crime & steiger2@steiger1980$p.value < .05, "YES", "NO")))
## H2 Supported: YES
# Composite for comparison
rc_hostile <- cor(df_clean$parsimony_comp, df_clean$hostile_agg, use = "pairwise.complete.obs")
rc_crime <- cor(df_clean$parsimony_comp, df_clean$crime_concerns_agg, use = "pairwise.complete.obs")
steigerc <- cocor.dep.groups.overlap(rc_hostile, rc_crime, r_hc, n, test = "steiger1980")
cat("parsimony_comp (composite - for comparison):\n")
## parsimony_comp (composite - for comparison):
cat(sprintf(" r(Hostile) = %.3f, r(Crime) = %.3f\n", rc_hostile, rc_crime))
## r(Hostile) = 0.490, r(Crime) = 0.184
cat(sprintf(" Difference = %.3f, Z = %.2f, p = %.4f\n",
rc_hostile - rc_crime, steigerc@steiger1980$statistic, steigerc@steiger1980$p.value))
## Difference = 0.306, Z = 6.70, p = 0.0000
cat(sprintf(" H2 Supported: %s\n",
ifelse(rc_hostile > rc_crime & steigerc@steiger1980$p.value < .05, "YES", "NO")))
## H2 Supported: YES
cat("\n")
cat(paste(rep("=", 70), collapse = ""), "\n")
## ======================================================================
cat("INTERPRETATION AND RECOMMENDATIONS\n")
## INTERPRETATION AND RECOMMENDATIONS
cat(paste(rep("=", 70), collapse = ""), "\n\n")
## ======================================================================
cat("WHY THE LOW RELIABILITY?\n\n")
## WHY THE LOW RELIABILITY?
cat("The two items measure related but distinct constructs:\n\n")
## The two items measure related but distinct constructs:
cat("parsimony_1_R: 'Punishments should be as light as possible'\n")
## parsimony_1_R: 'Punishments should be as light as possible'
cat(" - This is an ABSOLUTIST position\n")
## - This is an ABSOLUTIST position
cat(" - Rejecting it means wanting SOME level of punishment (most people)\n")
## - Rejecting it means wanting SOME level of punishment (most people)
cat(" - Higher variance, more central tendency\n\n")
## - Higher variance, more central tendency
cat("parsimony_2_R: 'State should not punish more than necessary'\n")
## parsimony_2_R: 'State should not punish more than necessary'
cat(" - This is a PROPORTIONALITY principle\n")
## - This is a PROPORTIONALITY principle
cat(" - Rejecting it means accepting EXCESSIVE punishment\n")
## - Rejecting it means accepting EXCESSIVE punishment
cat(" - This is a more EXTREME position to reject\n\n")
## - This is a more EXTREME position to reject
cat("The items ask fundamentally different questions:\n")
## The items ask fundamentally different questions:
cat(" - Item 1: Do you want ANY punishment? (Most say yes)\n")
## - Item 1: Do you want ANY punishment? (Most say yes)
cat(" - Item 2: Do you want EXCESSIVE punishment? (More variation)\n\n")
## - Item 2: Do you want EXCESSIVE punishment? (More variation)
cat("RECOMMENDATION:\n\n")
## RECOMMENDATION:
# Compare correlations
mean_r1 <- mean(abs(c(r1_hostile, r1_crime)))
mean_r2 <- mean(abs(c(r2_hostile, r2_crime)))
if(mean_r1 > mean_r2) {
cat("parsimony_1_R shows STRONGER correlations with psychological factors.\n")
cat("Consider using parsimony_1_R alone or reporting items separately.\n")
} else {
cat("parsimony_2_R shows STRONGER correlations with psychological factors.\n")
cat("Consider using parsimony_2_R alone or reporting items separately.\n")
}
## parsimony_2_R shows STRONGER correlations with psychological factors.
## Consider using parsimony_2_R alone or reporting items separately.
cat("\nAlternatively, given low reliability, consider:\n")
##
## Alternatively, given low reliability, consider:
cat(" 1. Dropping parsimony from the punitiveness aggregate\n")
## 1. Dropping parsimony from the punitiveness aggregate
cat(" 2. Reporting results with and without parsimony\n")
## 2. Reporting results with and without parsimony
cat(" 3. Discussing this as a measurement limitation\n")
## 3. Discussing this as a measurement limitation
cat("\n=== SAVING OUTPUTS ===\n\n")
##
## === SAVING OUTPUTS ===
# Save correlation tables
write.csv(construct_cor_table, "parsimony_items_construct_correlations.csv", row.names = FALSE)
cat("Saved: parsimony_items_construct_correlations.csv\n")
## Saved: parsimony_items_construct_correlations.csv
write.csv(cluster_cor_table, "parsimony_items_cluster_correlations.csv", row.names = FALSE)
cat("Saved: parsimony_items_cluster_correlations.csv\n")
## Saved: parsimony_items_cluster_correlations.csv
cat("\n")
cat(paste(rep("=", 70), collapse = ""), "\n")
## ======================================================================
cat("PARSIMONY ANALYSIS COMPLETE\n")
## PARSIMONY ANALYSIS COMPLETE
cat(paste(rep("=", 70), collapse = ""), "\n")
## ======================================================================