# -*- coding: utf-8 -*- """ WELFARE AND COST CHANGES - consumer surplus calculations This file takes the output of scenarios model projections and Calculates consumer welfare values from the policy change and other key metrics such as revenue, consumption and other charges Note that when the welfare measures calculated in this file are discounted - to account for low but increasing rates of price pass-through when calculating expenditure based (compensating variation) welfare measures - the descriptive decompositions conducted towards the end of this file are no longer accurate as they have not been adjusted for the discount Key inputs are files from the various runs @author: theot """ import numpy as np import pandas as pd import random import matplotlib import matplotlib.pyplot as plt import itertools in_fn = "C:/Users/theot/Dropbox (Sense.)/2018-19 EA TPM CBA (Shared)/Step 3/AoB model/Output/All_major_capex_gen_sensitivities/" out_fn="C:/Users/theot/Dropbox (Sense.)/2018-19 EA TPM CBA (Shared)/Step 3/AoB model/Output/All_major_capex_gen_sensitivities/" coef_m=pd.read_csv("C:/Users/theot/Dropbox (Sense.)/2018-19 EA TPM CBA (Shared)/Step 3/AoB model/Data/tou_coef_mass.csv") coef_i=pd.read_csv("C:/Users/theot/Dropbox (Sense.)/2018-19 EA TPM CBA (Shared)/Step 3/AoB model/Data/tou_coef_ind.csv") # Demand results rcpd = pd.read_csv(in_fn+"rcpd.csv") aob = pd.read_csv(in_fn+"aob.csv") # Parameters share_n=4 ones=np.matrix(np.ones((1,share_n))) r=1.06 logistic_a = 0.12 # Intercept for logistic growth curve for welfare relationship to consumer participation logistic_max = 0.88 # Limit for logistic growth curve for welfare relationship to consumer participation logistic_b = 0.25 #Shape parameter for rate of penetration, captured in logistic growth curve logistic_c = 10 #Interacts with intercept to set the starting value of the logistic growth curve = ~0.19 aob_yr = 2022 # Mass market Time of Use (TOU) elasticity/LAAIDS parameters - from data gamma=np.column_stack(np.array(coef_m[coef_m['type']=='gamma']['coef'])) gamma = np.transpose(np.reshape(gamma,(4,4))) beta=np.column_stack(np.array(coef_m[coef_m['type']=='beta']['coef'])) beta=np.transpose(np.matrix(beta)) dg_c = np.column_stack(np.array(coef_m[coef_m['type']=='dg']['coef'])) # Large industrials TOU elasticity/LAAIDS parameters - from data gamma_i=np.column_stack(np.array(coef_i[coef_i['type']=='gamma']['coef'])) gamma_i = np.transpose(np.reshape(gamma_i,(4,4))) beta_i=np.column_stack(np.array(coef_i[coef_i['type']=='beta']['coef'])) beta_i=np.transpose(np.matrix(beta_i)) dg_c_i = np.column_stack(np.array(coef_i[coef_i['type']=='dg']['coef'])) twai_c_i = np.column_stack(np.array(coef_i[coef_i['type']=='tiwai']['coef'])) gamma_twai=np.multiply(gamma_i,twai_c_i) beta_twai=np.multiply(beta_i,np.transpose(twai_c_i)) # Market aggregate price and demand parameters (non TOU) p_e = -0.11 #Aggregate mass market price (per ICP) elasticity m_e = 0.11 #Aggregate mass market income (per ICP) elasticity p_e_i = -0.02 #Aggregate industrial price (per ICP) elasticity base_year = 2010 #Years to examine yr_l=list(range(2022,2050)) #Years for projections #Nodes to examine reg_l=["MDN","OTA","HLY","TRK","WKM","RDF","SFD", "BPE","HAY","KIK","ISL","BEN","ROX","TWI"] # Backbone nodes in model ind_l=["OTA","HLY","TRK","RDF","SFD","BPE","ISL","TWI"] # List to iterate on reg_t=list(itertools.product(reg_l,yr_l)) yr_l=list(range(2022,2050)) #Years for projections reg_l=["MDN","OTA","HLY","TRK","WKM","RDF","SFD", "BPE","HAY","KIK","ISL","BEN","ROX","TWI"] # Backbone nodes in model ind_l=["OTA","HLY","TRK","RDF","SFD","BPE","ISL","TWI"] def cv_f(collector,pre=rcpd,post=aob,reg="OTA",year=2017,ctype=1,max_inv=2,uplift=1.15): #(1) Get dln price, by year p0 = np.matrix(rcpd.loc[(rcpd['m_yr']==year)&(rcpd['bb']==reg)&(rcpd["type"]==ctype)& (rcpd['max_inv']==max_inv)&(rcpd['uplift']==uplift),['pk_p','dg_p','sh_p','off_p']].values) p1 = np.matrix(aob.loc[(aob['m_yr']==year)&(aob['bb']==reg)&(aob["type"]==ctype)& (aob['max_inv']==max_inv)&(aob['uplift']==uplift),['pk_p','dg_p','sh_p','off_p']].values) px0 = np.matrix(rcpd.loc[(rcpd['m_yr']==year)&(rcpd['bb']==reg)&(rcpd["type"]==ctype)& (rcpd['max_inv']==max_inv)&(rcpd['uplift']==uplift),['pk_px','dg_px','sh_px','off_px']].values) px1 = np.matrix(aob.loc[(aob['m_yr']==year)&(aob['bb']==reg)&(aob["type"]==ctype)& (aob['max_inv']==max_inv)&(aob['uplift']==uplift),['pk_px','dg_px','sh_px','off_px']].values) p1_px0 = np.add(np.subtract(p1,px1),px0) # price with baseline generation and transport price dlnp = np.subtract(np.log(p1),np.log(p0)) dlnp_px0 = np.subtract(np.log(p1_px0),np.log(p0)) dp = np.subtract(p1,p0) dp_px0 = np.subtract(p1_px0,p0) dp_pcnt = np.divide(p1,p0)-1 dp_px0_pcnt = np.divide(p1_px0,p0)-1 q0 = np.matrix(rcpd.loc[(rcpd['m_yr']==year)&(rcpd['bb']==reg)&(rcpd["type"]==ctype)& (rcpd['max_inv']==max_inv)&(rcpd['uplift']==uplift),['pk_q','dg_q','sh_q','off_q']].values) q1 = np.matrix(aob.loc[(aob['m_yr']==year)&(aob['bb']==reg)&(aob["type"]==ctype)& (aob['max_inv']==max_inv)&(aob['uplift']==uplift),['pk_q','dg_q','sh_q','off_q']].values) dq = np.subtract(q1,q0) dq_pcnt = np.divide(q1,q0)-1 # (2) Get shares (s0), plus base shares (sb) sb=np.matrix(rcpd.loc[(rcpd['bb']==reg)&(rcpd['type']==ctype)& (rcpd['max_inv']==max_inv)&(rcpd['uplift']==uplift)&(rcpd['p_yr']==base_year),['pk_s','dg_s','sh_s','off_s']]) s0=np.matrix(rcpd.loc[(rcpd['bb']==reg)&(rcpd['type']==ctype)& (rcpd['max_inv']==max_inv)&(rcpd['uplift']==uplift)&(rcpd['m_yr']==year),['pk_s','dg_s','sh_s','off_s']]) s1=np.matrix(aob.loc[(rcpd['bb']==reg)&(aob['type']==ctype)& (aob['max_inv']==max_inv)&(aob['uplift']==uplift)&(aob['m_yr']==year),['pk_s','dg_s','sh_s','off_s']]) # (3) Get elasticities if ctype== 1: elas_tou=(-np.eye(share_n)+(gamma/(np.transpose(s0)*ones))- (((np.multiply(beta*ones,np.transpose(ones)*sb)))/ (np.transpose(s0)*ones))) #This adds the market elasticity to the TOU elasticities, to get a total effect elas = elas_tou*abs(p_e) inc_elas=np.tile(1-np.divide(np.transpose(beta),s0),(share_n,1)) else: elas_tou=(-np.eye(share_n)+(gamma_i/(np.transpose(s0)*ones))- (((np.multiply(beta_i*ones,np.transpose(ones)*sb)))/ (np.transpose(s0)*ones))) elas = elas_tou*abs(p_e_i) inc_elas=np.tile(1-np.divide(np.transpose(beta_i),s0),(share_n,1)) # (4) Hicksian elasticities elas(i,j)+exp_elas*s0*assumed total budget share # Assumed total budget share bs = 0.03 hicks=np.add(np.transpose(elas),np.multiply(inc_elas,s0)*bs) # (5) Log change in price weighted by budget share pre-TPM change s0_dlnp=np.multiply(s0,dlnp) s0_dlnp_px0=np.multiply(s0,dlnp_px0) s0_dp = np.multiply(s0,dp_pcnt) # CS measure s0_dp_px0 = np.multiply(s0,dp_px0_pcnt) # CS measure, px0 # (6) Change in relative purchasing power dlnpi_dlnpj=np.matmul(np.transpose(dlnp),dlnp) dlnpi_dlnpj_px0=np.matmul(np.transpose(dlnp_px0),dlnp_px0) dpi_dpj = np.matmul(np.transpose(dp_pcnt),dp_pcnt) dpi_dpj_px0 = np.matmul(np.transpose(dp_px0_pcnt),dp_px0_pcnt) # (7) Hicksian (compensated) response to price change hicks_change=np.multiply(hicks,dlnpi_dlnpj) hicks_change_px0=np.multiply(hicks,dlnpi_dlnpj_px0) elas_change = np.multiply(np.transpose(elas),dpi_dpj) elas_change_px0 = np.multiply(np.transpose(elas),dpi_dpj_px0) # (8) Weighted by initial budget shares wghtd_hicks_change = np.multiply(np.tile(s0,(share_n,1)),hicks_change) wghtd_hicks_change_px0 = np.multiply(np.tile(s0,(share_n,1)),hicks_change_px0) wghtd_elas_change = np.multiply(np.tile(s0,(share_n,1)),elas_change) wghtd_elas_change_px0 = np.multiply(np.tile(s0,(share_n,1)),elas_change_px0) exp0=rcpd.loc[(rcpd['m_yr']==year)&(rcpd['bb']==reg)&(rcpd["type"]==ctype)& (rcpd['max_inv']==max_inv)&(rcpd['uplift']==uplift),['exp_all']].values exp1=aob.loc[(aob['m_yr']==year)&(aob['bb']==reg)&(aob["type"]==ctype)& (aob['max_inv']==max_inv)&(aob['uplift']==uplift),['exp_all']].values # (9) Consumer surplus cs = np.sum(np.add(np.multiply(dp,q0),np.multiply(dp,dq)*0.5)) cs_px0 = np.sum(np.add(np.multiply(dp_px0,q0),np.multiply(dp_px0,dq)*0.5)) # (10) Willingness to pay to avoid policy change cv=np.divide(cs,exp0) cv_px0=np.divide(cs_px0,exp0) cv = cv.reshape(-1) cv_px0=cv_px0.reshape(-1) # (10) Willingness to pay to avoid policy change exp0=rcpd.loc[(rcpd['m_yr']==year)&(rcpd['bb']==reg)&(rcpd["type"]==ctype)& (rcpd['max_inv']==max_inv)&(rcpd['uplift']==uplift),['exp_all']].values exp1=aob.loc[(aob['m_yr']==year)&(aob['bb']==reg)&(aob["type"]==ctype)& (aob['max_inv']==max_inv)&(aob['uplift']==uplift),['exp_all']].values # Maximum compensation = change in expenditure given old expenditure and old quantities and new prices # Necessary because very large percentage price changes can lead to theoretically implausible compensating variations in a handful of cases # that are larger than the change in expenditure at old quantities and new prices #Enforce budget constraint (for industrial loads where models suggest little/no substitution possibilities remaining) welf = -cv*exp0 welf=welf.reshape(-1) welf_px0 = -cv_px0*exp0 welf_px0=welf_px0.reshape(-1) #(10) Decompose effects rev0 = np.sum(np.matrix(rcpd.loc[(rcpd['m_yr']==year)&(rcpd['bb']==reg)&(rcpd["type"]==ctype)& (rcpd['max_inv']==max_inv)&(rcpd['uplift']==uplift),['pk_rev','dg_rev','sh_rev','off_rev']].values)) rev1 = np.sum(np.matrix(aob.loc[(aob['m_yr']==year)&(aob['bb']==reg)&(aob["type"]==ctype)& (aob['max_inv']==max_inv)&(aob['uplift']==uplift),['pk_rev','dg_rev','sh_rev','off_rev']].values)) drev = np.subtract(rev1,rev0) dexp = np.subtract(exp1,exp0) # Decompose effects direct = -np.sum(s0_dlnp)*exp0 direct = direct.reshape(-1) sub = -np.sum(np.sum(wghtd_hicks_change))*0.5*exp0 sub = sub.reshape(-1) direct_pcnt = -np.sum(s0_dlnp) sub_pcnt = -np.sum(np.sum(wghtd_hicks_change))*0.5 cv_pcnt = -cv direct_px0 = -np.sum(s0_dlnp_px0)*exp0 direct_px0 = direct_px0.reshape(-1) sub_px0 = -np.sum(np.sum(wghtd_hicks_change_px0))*0.5*exp0 sub_px0 = sub.reshape(-1) direct_pcnt_px0 = -np.sum(s0_dlnp_px0) sub_pcnt_px0 = -np.sum(np.sum(wghtd_hicks_change_px0))*0.5 cv_pcnt_px0 = -cv_px0 # (11) Result node=pd.Series(reg) bb_type=pd.Series(ctype) yr=pd.Series(year) #Changes in prices and quantities dq_val = pd.DataFrame(dq,columns=['pk_dq','dg_dq','sh_dq','off_dq']) dp_val = pd.DataFrame(dp,columns=['pk_dp','dg_dp','sh_dp','off_dp']) dlnp_val = pd.DataFrame(dlnp,columns=['pk_dlnp','dg_dlnp','sh_dlnp','off_dlnp']) dp_px0_val = pd.DataFrame(dp_px0,columns=['pk_dp_px0','dg_dp_px0','sh_dp_px0','off_dp_px0']) dlnp_px0_val = pd.DataFrame(dlnp_px0,columns=['pk_dlnp_px0','dg_dlnp_px0','sh_dlnp_px0','off_dlnp_px0']) # Changes in expenditure shares s0_val = pd.DataFrame(s0,columns=['pk_s0','dg_s0','sh_s0','off_s0']) s1_val = pd.DataFrame(s1,columns=['pk_s1','dg_s1','sh_s1','off_s1']) # Revenue and expenditure and changes in revenue and expenditure exp_val = pd.Series(np.asscalar(exp0)) exp1_val = pd.Series(np.asscalar(exp1)) dexp_val = pd.Series(np.asscalar(dexp)) rev0_val = pd.Series(np.asscalar(rev0)) rev1_val = pd.Series(np.asscalar(rev1)) drev_val=pd.Series(np.asscalar(drev)) # Welfare impacts and decomposition cv_val=pd.Series(welf) cv_pcnt_val = pd.Series(cv_pcnt) direct_val=pd.Series(direct) sub_val=pd.Series(sub) direct_pcnt_val=pd.Series(direct_pcnt) sub_pcnt_val = pd.Series(sub_pcnt) #Welfare impacts and decomposition holding generation and transport cost constant cv_px0_val=pd.Series(welf_px0) cv_pcnt_px0_val = pd.Series(cv_pcnt_px0) direct_px0_val=pd.Series(direct_px0) sub_px0_val=pd.Series(sub_px0) direct_pcnt_px0_val=pd.Series(direct_pcnt_px0) sub_pcnt_px0_val = pd.Series(sub_pcnt_px0) #Welfare impacts from allowing prices to change cv_dpx0_val=pd.Series(np.subtract(welf,welf_px0)) cv_pcnt_dpx0_val = pd.Series(np.subtract(cv_pcnt,cv_pcnt_px0)) direct_dpx0_val=pd.Series(np.subtract(direct,direct_px0)) sub_dpx0_val=pd.Series(np.subtract(sub,sub_px0)) direct_pcnt_dpx0_val=pd.Series(np.subtract(direct_pcnt,direct_pcnt_px0)) sub_pcnt_dpx0_val = pd.Series(np.subtract(sub_pcnt,sub_pcnt_px0)) uplift_val=pd.Series(uplift) max_inv_val=pd.Series(max_inv) out=pd.concat([node,bb_type,yr,dq_val,dp_val,dlnp_val, dp_px0_val,dlnp_px0_val, s0_val, s1_val, exp_val,exp1_val,dexp_val, rev0_val,rev1_val,drev_val, cv_val, cv_pcnt_val, direct_val, sub_val, direct_pcnt_val, sub_pcnt_val, cv_px0_val, cv_pcnt_px0_val, direct_px0_val, sub_px0_val, direct_pcnt_px0_val, sub_pcnt_px0_val, cv_dpx0_val, cv_pcnt_dpx0_val, direct_dpx0_val, sub_dpx0_val, direct_pcnt_dpx0_val, sub_pcnt_dpx0_val,uplift_val,max_inv_val],axis=1) out.columns=['bb','type','year','pk_dq','dg_dq','sh_dq','off_dq', 'pk_dp','dg_dp','sh_dp','off_dp', 'pk_dlnp','dg_dlnp','sh_dlnp','off_dlnp', 'pk_dp_p0','dg_dp_p0','sh_dp_p0','off_dp_p0', 'pk_dlnp_p0','dg_dlnp_p0','sh_dlnp_p0','off_dlnp_p0', 'pk_s0','dg_s0','sh_s0','off_s0', 'pk_s1','dg_s1','sh_s1','off_s1', 'exp0','exp1','dexp','rev0','rev1','drev', 'cv','cv_pcnt','direct','sub','direct_pcnt','sub_pcnt', 'cv_p0','cv_pcnt_p0','direct_p0','sub_p0','direct_pcnt_p0','sub_pcnt_p0', 'cv_dp0','cv_pcnt_dp0','direct_dp0','sub_dp0','direct_pcnt_dp0','sub_pcnt_dp0', 'uplift','max_inv'] out = collector.append(out,ignore_index=True) return out # Collector out_cols=['bb','type','year','pk_dq','dg_dq','sh_dq','off_dq', 'pk_dp','dg_dp','sh_dp','off_dp', 'pk_dlnp','dg_dlnp','sh_dlnp','off_dlnp', 'pk_dp_p0','dg_dp_p0','sh_dp_p0','off_dp_p0', 'pk_dlnp_p0','dg_dlnp_p0','sh_dlnp_p0','off_dlnp_p0', 'pk_s0','dg_s0','sh_s0','off_s0', 'pk_s1','dg_s1','sh_s1','off_s1', 'exp0','exp1','dexp','rev0','rev1','drev', 'cv','cv_pcnt','direct','sub','direct_pcnt','sub_pcnt', 'cv_p0','cv_pcnt_p0','direct_p0','sub_p0','direct_pcnt_p0','sub_pcnt_p0', 'cv_dp0','cv_pcnt_dp0','direct_dp0','sub_dp0','direct_pcnt_dp0','sub_pcnt_dp0', 'uplift','max_inv'] cv_results=pd.DataFrame(columns=out_cols) #Uplift parameter scenarios uplift_l = [1.0,1.075,1.15,1.225,1.30] max_inv_l = [1,2,5] for reg, year, uplift_s, max_inv_s in itertools.product(reg_l,yr_l,uplift_l,max_inv_l): cv_results=cv_f(cv_results,pre=rcpd,post=aob,reg=reg,year=year,ctype=1,uplift=uplift_s,max_inv=max_inv_s) for reg, year, uplift_s, max_inv_s in itertools.product(ind_l,yr_l,uplift_l,max_inv_l): cv_results=cv_f(cv_results,pre=rcpd,post=aob,reg=reg,year=year,ctype=2,uplift=uplift_s,max_inv=max_inv_s) cv_results['npv_cv']=cv_results.cv*(1/(r**(cv_results.year-2019))) cv_results['npv_cv_p0']=cv_results.cv_p0*(1/(r**(cv_results.year-2019))) cv_results['npv_cv_dp0']=cv_results.cv_dp0*(1/(r**(cv_results.year-2019))) npv = cv_results.groupby(['uplift','max_inv']).agg({'npv_cv': 'sum'}) npv_p0 = cv_results.groupby(['uplift','max_inv']).agg({'npv_cv_p0': 'sum'}) npv_dp0 = cv_results.groupby(['uplift','max_inv']).agg({'npv_cv_dp0': 'sum'}) cv_results.to_csv(out_fn+"cs_results.csv") npv.to_csv(out_fn+"npv_results_cs.csv") npv_p0.to_csv(out_fn+"npv_p0_results_cs.csv") npv_dp0.to_csv(out_fn+"npv_dp0_results_cs.csv")