viewof cea_functions = cell(`
//sum the elements of a list
sum_list(x) = {
reduce(x, 0, {|a, b| a + b})
}
//square root
sqrt(x) = {
pow(x,1/2)
}
//sample from binomial distribution
binomial(n,p) = {
sum_list( List.make(n, bernoulli(p)) )
}
//Calculate the cost-effectiveness
cost_effectiveness(value,cost) = value/cost
//generate a normal distribution from a 95% CI [lower,upper]
//lower: lower bound of the 95% CI
//upper: upper bound of the 95% CI
normal_from_ci(lower,upper)={
mean = (upper+lower)/2
std = (upper-lower)/(2*1.96)
normal(mean,std)
}
//Construct a log-normal distribution with the expected value ev and the standard deviation standard_deviation
lognormal_from_mean_and_std(ev,standard_deviation) ={
mu = log(ev^2/pow(standard_deviation^2+ev^2,1/2));
sigma = pow( log(standard_deviation^2/ev^2 +1 ),1/2);
lognormal(mu,sigma)
}
log_normal_parameters_from_mean_and_std(ev,standard_deviation)={
mu = log(ev^2/pow(standard_deviation^2+ev^2,1/2))
sigma = pow( log(standard_deviation^2/ev^2 +1 ),1/2)
[mu, sigma]
}
ev_logNormal(mu,sigma)={
exp(mu+1/2*pow(sigma,2))
}
std_logNormal(mu,sigma)={
pow( (exp(pow(sigma,2))-1)*exp(2*mu+pow(sigma,2)) ,1/2)
}
//This function computes estimates of the parameters mu and sigma of the log-Normal distribution from a series of samples. It handles the common problem that some samples may have negative values by applying the maximum-likelihood estimate to shifted samples and then undoing the shift on the fitted distribution.
fit_logNormal(samples) = {
shift = -min(samples)+0.001
shifted_samples = samples+shift
mu_shifted = mean(log(shifted_samples))
sigma_shifted = pow(variance(log(shifted_samples)),1/2)
ev_unshifted = ev_logNormal(mu_shifted,sigma_shifted)-shift
std_unshifted = std_logNormal(mu_shifted,sigma_shifted)
log_normal_parameters_from_mean_and_std(ev_unshifted,std_unshifted)
}
//Perform Bayesian inference to compute the posterior distribution from the prior distribution and the likelihood function.
posterior(prior,likelihood) = {
normalize(prior.*likelihood)
}
//compute the posterior distribution of for a log-normal likelihood with a log-normal prior
posterior_log_normal(mu_prior,sigma_prior,x,sigma_x) = {
tau_prior = pow(sigma_prior,-2)
tau_x = pow(sigma_x,-2)
mu_posterior = (tau_prior*mu_prior + tau_x*log(x)) / (tau_prior+tau_x)
sigma_posterior = pow(tau_prior+tau_x,-1/2)
[mu_posterior,sigma_posterior]
}
//expected value of choosing between two interventions under uncertainty about their cost-effectiveness
value_of_uncertainty(ce_new,previous_best,budget) ={
max([mean(ce_new),mean(previous_best)])*budget
}
//computes the posterior distribution of the cost-effectiveness for a Gaussian likelihood function and an arbitrary prior distribution
posterior_ce(prior,observed_ce,precision)={
likelihood = normal(observed_ce,pow(precision,-1/2))
posterior(prior,likelihood)
}
// computes the value of partial information for a Gaussian likelihood function and an arbitrary prior distribution
value_of_partial_information_gaussian_likelihood(prior_ce_new,observed_ce,precision,budget) = {
posterior_ce_new = posterior_ce(prior_ce_new,observed_ce,precision)
budget*(max([mean(posterior_ce_new),mean(previous_best)])-max([mean(prior_ce_new),mean(previous_best)]))
}
// computes the value of partial information assuming that both the prior and the likelihood function are Gaussian
value_of_partial_information_gaussian(prior,prior_precision,observed_ce,precision,budget,previous_best) = {
prior_mean = mean(prior)
posterior_mean = (prior_precision*prior_mean+precision*observed_ce)/(prior_precision+precision)
max_ce_new = max(posterior_mean->SampleSet.fromDist,mean(previous_best))
max_ce_prev = max([prior_mean,mean(previous_best)])
(max_ce_new-max_ce_prev) * budget
}
//compute the posterior distribution for a Gaussian prior and an observation with a Gaussian likelihood function. This function uses the conjugate-prior update equation to do so.
posterior_normal(prior,x,sigma_x)={
mu_prior=mean(prior)
prior_precision = pow(variance(prior),-1)
precision_x = pow(sigma_x,-2)
posterior_mean = (prior_precision*mu_prior+precision_x*x)/(prior_precision+precision_x)
posterior_precision = prior_precision+precision_x
normal(posterior_mean,pow(posterior_precision,-1/2))
}
/* This function computes the expected value (ev) of obtaining perfect information about the cost-effectiveness of one intervention given the expected cost-effectiveness of the best alternative intervention.
- ce_new is the probability distribution encoding our current belief about the cost-effectiveness of the intervention under consideration in arbitrary units of goodness per dollar.
- ev_best_alternative is the expected value of the cost-effectiveness of the best alternative intervention
- budget is the number of dollars that will be invested into the better intervention
*/
ev_of_perfect_information(ce_new,ev_best_alternative,budget)={
max_ce_in_current_situation = max([ev_best_alternative,mean(ce_new)])
expected_max_ce_in_new_situation = mean(max(ce_new->SampleSet.fromDist,ev_best_alternative))
expected_gain_in_ce = expected_max_ce_in_new_situation - max_ce_in_current_situation
budget * expected_gain_in_ce
}
/* This function computes the expected monetary value of obtaining perfect information about an intervention's cost-effectiveness.
- ce_new is the probability distribution encoding our current belief about the cost-effectiveness of the intervention under consideration in arbitrary units of goodness per dollar.
- ev_best_alternative is the expected value of the cost-effectiveness of the best alternative intervention
- budget is the number of dollars that will be invested into the better intervention
*/
ev_of_perfect_information_in_usd(ce_new,ev_best_alternative,budget)={
ev_of_perfect_information(ce_new,ev_best_alternative,budget)/ev_best_alternative
}
//This function probabilistically estimates the number of waking hours per year
waking_hours_per_year()={
days_per_year = 365.25
hours_of_sleep = 5 to 9
waking_hours_per_day = 24 - hours_of_sleep
days_per_year * waking_hours_per_day
}
sd_of_SWLS_scale() = {
//The SWLS score is the sum of the scores of 5 items. In large samples of college students and large, representative samples of adults, its standard deviation ranges from 6.0 to 6.2 (Pavot & Diener, 2009).
normal_from_ci(6.0,6.2)
}
//convert an effect from x standard deviations of increased well-being per year times years to WELLBYs
sd_to_WELLBY(effect_in_SDs) = {
//One WELLBY is a one-point increase on the Satisfaction with Life Scale (SWLS; Diener et al., 1985) for one year.
effect_in_SDs*sd_of_SWLS_scale()
}
//Convert hours of happiness into well-being adjusted life years
hours_of_happiness_to_WELLBYs(hh) ={
days_per_year = 365.25
hours_of_sleep = 5 to 9
waking_hours_per_day = 24 - hours_of_sleep
waking_hours_per_year = days_per_year * waking_hours_per_day
years_of_happiness = hh / waking_hours_per_year
sd_to_WELLBY(years_of_happiness)
}
//Convert one year of a one SD higher level of well-being per $1000 to happy hours per dollar
sd_years_per_1000USD_to_hh_per_dollar(sd_year_per_1000USD) ={
days_per_year = 365.25
hours_of_sleep = 5 to 9
waking_hours_per_day = 24 - hours_of_sleep
waking_hours_per_year = days_per_year * waking_hours_per_day
sd_year_per_1000USD / 1000 * waking_hours_per_year
}
//convert WELLBYs per $1000 to hours of happiness per dollar
wellbys_per_1000USD_to_hh_per_dollar(wellby) = {
wellby_per_dollar = wellby / 1000
sd_per_dollar = wellby_per_dollar / sd_of_SWLS_scale()
sd_per_dollar / waking_hours_per_year()
}
p_std_from_estimate(std_estimate,n)={
/*Quantifying the uncertainty about this estimate based on the number of
participants (n) according to
https://en.wikipedia.org/wiki/Unbiased_estimation_of_standard_deviation
*/
c4 = 1 - 1/(4*n)-7/(32*n^2) - 19/(128*n^3)
se_std = std_estimate* pow(1-c4^2,1/2)
std_estimate-1.96*se_std to std_estimate+1.96*se_std
}
// Summing up the effect over time assuming an exponential decay
// This is computed as the analytic solution to the time integral of initial_effect*2^(-t/half_life)
sum_over_time(initial_effect,half_life) = initial_effect*half_life/log(2) //initial_effect * 1/(1-0.5^(1/half_life))
//Net effect of developing a new intervention with cost-effectiveness ce_new at a given cost (cost_of_research)
//on the total well-being of humanity.
net_effect_of_research(ce_new,ce_prev,investment_in_new_intervention,cost_of_research) = {
//Is the new intervention more cost-effective than the previous one? If so, by how much?
increase_in_ce = max(ce_new-ce_prev,0)
//How much more well-being can the new intervention created than the previous one?
benefit = investment_in_new_intervention*increase_in_ce
/* Opportunity cost of developing the new intervention instead of
investing the research money into deploying the previous intervention. */
cost =cost_of_research*ce_prev
// The value of developing a new intervention is the benefit of having it
// minus the cost of developing it
benefit-cost
}
//How much money could be invested in the new intervention?
investment_in_new_intervention(scalability,budget,cost_of_research) ={
min(scalability, budget-cost_of_research)
}
// Total well-being benefits of one act of well-being on the benefactor and the beneficiary
benefit_of_one_kind_act() = {
/* How much happier is someone right after someone did something kind for them?
The estimate is based on the study by Pressman et al. (2015) and Zhao and Epley (2021).
*/
//According to the increase in Duchenne smiles observed by Pressman, Kraft, and Cross (2015). The effect size reported in that study was //1.1. I estimate the width of the confidence interval on that effect size to be about 0.3.
effect_of_kindness_on_smiles = normal_from_ci(1.1-1.96*0.3,1.1+1.96*0.3) // normal distribution with 95% CI [0.5,1.7]
// Effect of compliments according to Experiments 1 and 2 by Zhao & Epley (2021) being complemented boosted the recipients' mood by 0.66 // standard deviations
effect_of_compliment_on_mood = normal(0.66,0.20)
// To express the assumption that each empirically documented effect-sizes is equally likely to occur when someone benefits from prosocial behavior, we create a mixture distribution.
peak_amplitude_of_joy_beneficiary = mx(effect_of_kindness_on_smiles, effect_of_compliment_on_mood)
//How much happier does a person feel right after being kind to someone else?
peak_amplitude_of_joy_benefactor = normal_from_ci(0.16,0.41) //95% Gaussian posterior credible interval: [0.16,0.41] according to Curry et al. (2018)
//Average duration of joy according to Verduyn, van Mechelen, & Tuerlinckx (2011)
avg_duration_joy_h = lognormal_from_mean_and_std(1.7267,0.1086)
//I calculated the total joy per unit event by calculating the area under the joy intensity profile shown in Figure 6 of Verduyn, P., Van Mechelen, I., Tuerlinckx, F., Meers, K., & Van Coillie, H. (2009) using numerical integration with the trapezoid rule in Matlab (trapz). Because a unit event lasts one unit of time, the result is also the average level of joy during a unit event.
avg_intensity_of_joy = normal(0.4796,0.1) // The mean value is the area under the curve in Figure 6. The standard deviation estimates the effect of the unreported uncertainty. Given that this number corresponds to the average across 260 graphs, I believe this estimate to be high enough.
hours_of_happiness_per_SD_joy = avg_duration_joy_h * avg_intensity_of_joy
//Sum up the beneficiary's increase in affective well-being over time in hours of happiness.
//Arguably, the beneficiary's emotional response might be closer to gratitude than joy. However, according to Verduyn & Lavrijsen (2015), the durations of joy and gratitude are similar.
total_effect_of_one_act_of_kindness_on_beneficiary = peak_amplitude_of_joy_beneficiary * hours_of_happiness_per_SD_joy
//Sum up the benefactor's increase in affective well-being over time in hours of happiness.
total_effect_of_one_act_of_kindness_on_benefactor = peak_amplitude_of_joy_benefactor * hours_of_happiness_per_SD_joy
//the total benefit is the sum of the benefits to the benefactor and the beneficiary
total_effect_of_one_act_of_kindness_on_beneficiary + total_effect_of_one_act_of_kindness_on_benefactor
}
//Cost of deploying an online intervention through advertising in USD per person reached
cost_of_online_intervention_per_person_reached() ={
//Cost of bringing people into the digital intervention through online advertising
//https://www.businessofapps.com/ads/cpi/research/cost-per-install/
cost_per_install_in_usd = SampleSet.fromList([0.93,1.03,0.34,5.28,3.6,1.22,4.3,1.15,1.04])
//How many people complete the intervention per person who clicks on the ad?
//A meta-analysis of observational studies found that 49% (95% CI: 27%-70%) of users
//of mobile health interventions drop out of the intervention without completing it.
//https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7556375/
rate_of_followthrough = normal_from_ci(1-0.70,1-0.27)//uniform(0.05,0.5)
//The cost per person who finishes the
cost_per_install_in_usd / rate_of_followthrough
}
//the total amount of money that can be invested into a new online intervention
scalability_of_online_interventions(relative_size_of_target_group) = {
/*
Sources:
- https://www.statista.com/statistics/617136/digital-population-worldwide/
- https://datareportal.com/global-digital-overview
*/
number_of_internet_users = SampleSet.fromList([5.03B, 5.07B,5.03B, 5.07B,5.03B, 5.07B,5.03B, 5.07B])
// The number of people in the target group.
size_of_target_group = relative_size_of_target_group*number_of_internet_users
// We assume that an online advertising campaign can reach between 0.1% and 10% of the relevant demographic.
//The remainder of them will never click on the ad advertising our intervention.
proportion_reachable_by_ads = 0.001 to 0.1
//The number of people in the relevant demographic group who can be reached by the intervention through online advertising
nr_people_reachable = proportion_reachable_by_ads * size_of_target_group
//The maximum amount of money that can be invested into the intervention is the cost per person times
//the maximal number of people whom the intervention can be deployed to.
nr_people_reachable * cost_of_online_intervention_per_person_reached()
}
//This function estimates the effect size for an experiment with a pre-post design with a control group according to Equation 8 in Morris (2008).
effect_size_ppc(m_post_t,m_pre_t,m_post_c,m_pre_c,sd_pre_t,sd_pre_c,n_t,n_c) = {
c_p = 1 - 3/(4 * (n_t+n_c-2) -1 )
sd_pre = pow( ((n_t-1)*pow(sd_pre_t,2)+(n_c-1)*(pow(sd_pre_c,2))) / (n_t+n_c-2) , 1/2)
c_p* ( (m_post_t-m_pre_t)-(m_post_c-m_pre_c) )/(sd_pre)
}
//This function estimates the standard error of the effect size for an experiment with a pre-post design with a control group according to Equation 25 in Morris (2008).
se_effect_size_ppc(m_post_t,m_pre_t,m_post_c,m_pre_c,sd_pre_t,sd_pre_c,n_t,n_c,rho) = {
c_p = 1 - 3/(4 * (n_t+n_c-2) -1 )
delta = effect_size_ppc(m_post_t,m_pre_t,m_post_c,m_pre_c,sd_pre_t,sd_pre_c,n_t,n_c)
first_factor = 2*pow(c_p,2)*(1-rho)
second_factor = (n_t+n_c) / (n_t*n_c)
third_factor = (n_t+n_c-2) / (n_t+n_c-4)
fourth_factor = 1 + pow(delta,2) / ( (2*(1-rho))*second_factor )
complex_product = first_factor*second_factor*third_factor*fourth_factor
var_d_ppc2 = complex_product - delta^2
pow( var_d_ppc2, 1/2)
}
// effect of volunteering in a nursing home for 1 year on the total wellbeing of the seniors in WELLBYs
effect_of_volunteering_WELLBYs() = {
//Wheeler, Gorey, & Greenblatt (1998) found that the average duration of volunteering was about 1 year.
avg_duration_in_years = 1
// According to Frazier, Birmingham, Wheat, and Georges (2019) the RSVP program had about 174000 volunteers who served a total of 7000000 seniors in 1993.
nr_seniors_per_volunteer = 700000/174000
//Wheeler, Gorey, & Greenblatt (1998)'s meta-analysis of 9 studies that measured the effect of volunteer programs on the well-being of nursing home residents the 95% confidence interval 0.372 to 0.544.
increase_in_seniors_SWB = normal_from_ci(0.372,0.544)
//The total number of WELLBYs created by each volunteer is the number of seniors served times the increase in their well-being times the duration of this effect
nr_seniors_per_volunteer * sd_to_WELLBY(increase_in_seniors_SWB * avg_duration_in_years)
}
//This function estimates the cost-effectiveness of volunteering in a nursing home in WELLBYs per 100h of volunteering.
ce_volunteering_WELLBYs_per_100h() = {
//According to Wheeler, Gorey, and Greenblatt (1998), the average amount of time that volunteers invested was 4 hours per week.
nr_hours_per_week = 4
//The average duration for which they volunteered was 12 months.
nr_weeks = 52
time_investment = nr_weeks * nr_hours_per_week
//the cost-effectiveness is the ratio of the benefit over the costs.
effect_of_volunteering_WELLBYs() / (time_investment/100)
}
prevalence_of_secular_volunteering() = {
// According to Rotolo & Wilson (2011), the median prevalence of secular volunteering across all 50 states of the US was 0.195. This is based on a poll that was completed by 300000 US citizens.
median_volunteering_freq = 0.195
nr_people_surveyed = 300000
std_volunteering = pow(median_volunteering_freq*(1-median_volunteering_freq),1/2)
normal(median_volunteering_freq,std_volunteering/pow(nr_people_surveyed,1/2))
}
days_per_month() = SampleSet.fromList([31,28,31,30,31,30,31,31,30,31,30,31])
indirect_cost_rate() = 0.1 to 0.6
total_cost(direct_cost) ={
direct_cost*(1+indirect_cost_rate)
}
postdoc_salary_costs_per_year() = 65000 to 80000
std_freq_of_prosocial_behavior_in_acts_per_day() = {
// estimate of the standard deviation reported in the article
std_estimate = 1.64
n=77
p_std_from_estimate(std_estimate,n)
}
net_effect_of_research_with_limited_uptake(p_uptake,ce_new,ce_prev,investment_in_new_intervention,cost_of_research) = {
//Is the new intervention more cost-effective than the previous one? If so, by how much?
increase_in_ce = max(ce_new-ce_prev,0)
//How much more well-being can the new intervention created than the previous one?
benefit = p_uptake*investment_in_new_intervention*increase_in_ce
/* Opportunity cost of developing the new intervention instead of
investing the research money into deploying the previous intervention. */
cost =cost_of_research*ce_prev
// The value of developing a new intervention is the benefit of having it
// minus the cost of developing it
benefit-cost
}
`)