{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "%pylab inline\n", "from datetime import date, datetime, time, timedelta" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# UTS period (this is the period agreed by the EA board)\n", "UTS_start = \"2019/12/3\"\n", "UTS_end = \"2019/12/18\"\n", "date_format = \"%Y/%m/%d\"\n", "UTS_start_date = datetime.strptime(UTS_start, date_format)\n", "UTS_end_date = datetime.strptime(UTS_end, date_format) + timedelta(minutes=60*23+30)\n", "UTS_next_date = datetime.strptime(UTS_end, date_format) + timedelta(days=1)\n", "UTS_days = (UTS_end_date - UTS_start_date) / timedelta(days=1)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#set input folder\n", "input_folder = r''\n", "\n", "#EA published folder\n", "path = r\"/\"\n", "\n", "# get some nice colours and define better legend and plot formatting...\n", "\n", "c_p = {'rd2': (0.9647058823529412, 0.5450980392156862, 0.6352941176470588), 'yl1': (1.0, 0.7294117647058823, 0.17647058823529413), 'gy2': (0.9098039215686274, 0.9098039215686274, 0.9098039215686274), 'bl2': (0.7176470588235294, 0.8901960784313725, 0.9725490196078431), 'yl2': (0.984313725490196, 0.8196078431372549, 0.5450980392156862), 'br1': (0.32941176470588235, 0.0, 0.0), 'bl1': (0.058823529411764705, 0.4823529411764706, 0.6862745098039216), 'rd1': (0.9294117647058824, 0.09019607843137255, 0.27058823529411763), 'gy1': (0.8235294117647058, 0.8235294117647058, 0.8235294117647058), 'br2': (0.6627450980392157, 0.5019607843137255, 0.5019607843137255)}\n", "c_s = {'og1': (0.8235294117647058, 0.38823529411764707, 0.10196078431372549), 'yl1': (0.803921568627451, 0.8156862745098039, 0.0), 'or1': (1.0, 0.4823529411764706, 0.0), 'pp1': (0.27058823529411763, 0.1803921568627451, 0.6745098039215687), 'mv1': (0.0, 0.4588235294117647, 0.4235294117647059), 'rd2': (0.8470588235294118, 0.5882352941176471, 0.5019607843137255), 'pr1': (0.803921568627451, 0.34901960784313724, 1.0), 'pk2': (1.0, 0.7372549019607844, 0.8313725490196079), 'gr1': (0.00784313725490196, 0.4666666666666667, 0.2196078431372549), 'or2': (1.0, 0.7372549019607844, 0.5019607843137255), 'mv2': (0.5019607843137255, 0.7254901960784313, 0.7098039215686275), 'pp2': (0.6862745098039216, 0.6431372549019608, 0.7411764705882353), 'gn1': (0.3803921568627451, 0.7372549019607844, 0.22745098039215686), 'bu1': (0.0, 0.5568627450980392, 1.0), 'bl2': (0.5254901960784314, 0.7372549019607844, 0.8392156862745098), 'ge1': (0.0, 0.7019607843137254, 0.6196078431372549), 'yl2': (0.8980392156862745, 0.9058823529411765, 0.5019607843137255), 'bu2': (0.5019607843137255, 0.7725490196078432, 1.0), 'ge2': (0.5019607843137255, 0.8470588235294118, 0.807843137254902), 'rd1': (0.7019607843137254, 0.18823529411764706, 0.0), 'gy1': (0.5843137254901961, 0.5843137254901961, 0.5843137254901961), 'og2': (0.8901960784313725, 0.6901960784313725, 0.5490196078431373), 'gy2': (0.788235294117647, 0.788235294117647, 0.788235294117647), 'be2': (0.8862745098039215, 0.9490196078431372, 0.984313725490196), 'pk1': (1.0, 0.3411764705882353, 0.9607843137254902), 'be1': (0.7803921568627451, 0.9019607843137255, 0.9803921568627451), 'gr2': (0.5019607843137255, 0.7294117647058823, 0.7098039215686275), 'bl1': (0.058823529411764705, 0.4823529411764706, 0.6862745098039216), 'gn2': (0.6862745098039216, 0.8666666666666667, 0.611764705882353), 'pr2': (0.8980392156862745, 0.6705882352941176, 1.0)}\n", "\n", "def colours():\n", " mpl.rcParams['axes.prop_cycle'] = cycler('color', [c_p['br1'], c_p['br2'],\n", " c_p['bl1'], c_p['bl2'], c_p['yl1'], c_p['yl2'], c_p['rd1'], c_p['rd2'],\n", " c_p['gy1'], c_p['gy2'], c_s['pp1'], c_s['pp2'], c_s['pr1'], c_s['pr2'],\n", " c_s['gr1'], c_s['gr2'], c_s['gn1'], c_s['gn2'], c_s['pk1'], c_s['pk2'],\n", " c_s['yl1'], c_s['yl2'], c_s['mv1'], c_s['mv2']])\n", "\n", "def legend_format(ax, cols=4, xpos=-0.021, ypos=-0.15, **kwargs):\n", " \"\"\"Place legend outside of plot\"\"\"\n", " ax.legend(loc=3,\n", " bbox_to_anchor=(xpos, ypos),\n", " ncol=cols,\n", " frameon=False, **kwargs)\n", "\n", "def plot_formatting(ax, rot=False, **kwargs):\n", " \"\"\"A few tricks used for better looking plots\"\"\"\n", " ax.grid(b=True, which='major', color='k', linestyle='-',\n", " axis='y', alpha=0.6, clip_on=True, marker=None)\n", " ax.grid(b=False, axis='x', which='both')\n", " ax.set_frame_on(False) # Remove plot frame\n", " ax.set_axisbelow(True)\n", " ax.xaxis.tick_bottom()\n", " plt.xticks(ax.get_xticks(), rotation=0, **kwargs)\n", " if rot:\n", " plt.xticks(ax.get_xticks(), rotation=90, **kwargs)\n", " else:\n", " plt.xticks(ax.get_xticks(), rotation=0, **kwargs) \n", "\n", " \n", "def plot_shading(ax, y_offset, run_text_list=['vSPD run 1', 'vSPD run 1', 'vSPD run 2', 'vSPD_run3',\n", " 'vSPD run 4', 'vSPD_run5', 'vSPD_run6', 'vSPD run 7']):\n", " \"\"\"shade charts with each timeperiod simulation\"\"\"\n", " # only plot for 3-18 dec\n", " ax.axvspan(datetime(2019, 12, 3), datetime(2019,12,8), color=c_p['rd2'], alpha=0.4)\n", " ax.axvspan(datetime(2019, 12, 8), datetime(2019,12,19), color=c_p['rd2'], alpha=0.2)\n", " \n", " #ax.axvspan(datetime(2019, 11, 9, 10), datetime(2019,12,2,16), color=c_p['rd2'], alpha=0.2)\n", " #ax.axvspan(datetime(2019, 11, 15, 8), datetime(2019,11,19,20), color=c_p['rd2'], alpha=0.2)\n", " #ax.axvspan(datetime(2019, 12, 2, 16), datetime(2019,12,8), color=c_p['rd2'], alpha=0.4)\n", " #ax.axvspan(datetime(2019, 12, 8), datetime(2019,12,28), color=c_p['rd2'], alpha=0.2)\n", " #ax.axvspan(datetime(2019, 12, 28), datetime(2019,12,31), color=c_p['rd2'], alpha=0.4)\n", " #ax.axvspan(datetime(2019, 12, 31), datetime(2020,1,6), color=c_p['rd2'], alpha=0.2)\n", " #ax.axvspan(datetime(2020, 1, 6), datetime(2020,1,16), color=c_p['rd2'], alpha=0.4)\n", " #ax.text(datetime(2019,11,10), y_offset, run_text_list[1])\n", " #ax.text(datetime(2019,11,23), y_offset, run_text_list[1])\n", " #ax.text(datetime(2019,11,15), y_offset, run_text_list[2])\n", " #ax.text(datetime(2019,12,3), y_offset, run_text_list[3])\n", " #ax.text(datetime(2019,12,16), y_offset, run_text_list[4])\n", " #ax.text(datetime(2019,12,27), y_offset, run_text_list[5], rotation=0)\n", " #ax.text(datetime(2020,1,1), y_offset, run_text_list[6])\n", " #ax.text(datetime(2020,1,8), y_offset, run_text_list[7])\n", "\n", " \n", "def UTS_period(ax):\n", " \"\"\"UTS period plot shading...\"\"\"\n", " ax.axvspan(datetime(2019, 12, 3), datetime(2019, 12, 18), color=c_p['rd2'], alpha=0.4)\n", "\n", " \n", "files={'brh_res': 'BranchResults_TP.csv', # ignore brach results for speed + RAM issues\n", " 'gen_res': 'OfferResults_TP.csv',\n", " 'bid_res': 'BidResults_TP.csv', 'MNC_res': 'MNodeConstraintResults_TP.csv',\n", " 'res_res': 'ReserveResults_TP.csv', 'nod_res': 'NodeResults_TP.csv',\n", " 'bus_res': 'BusResults_TP.csv', 'brC_res': 'BrConstraintResults_TP.csv',\n", " 'sum_res': 'SummaryResults_TP.csv', 'trd_res': 'TraderResults.csv',\n", " 'sys_res': 'SystemResults.csv', 'scr_res': 'ScarcityResults_TP.csv',\n", " 'isl_res': 'IslandResults_TP.csv'}\n", " \n", "def vSPD_loader(path, case, files=files, csv=True):\n", " \"\"\"load vSPD output data\"\"\"\n", " output = {}\n", " if csv:\n", " for k, v in files.items():\n", " filename = path + case + '/' + case + '_' + v\n", " print('Loading: ' + filename)\n", " output[k] = pd.read_csv(filename, index_col=[0, 1], parse_dates=True)\n", " return output\n", " else: # use FAST parquet datafiles\n", " for k, v in files.items():\n", " filename = path + case + '_' + v[:-4] + '.parquet'\n", " print('Loading: ' + filename)\n", " df = pd.read_parquet(filename)\n", " df = df.groupby(level=[0, 1]).last()\n", " output[k] = df\n", " return output\n", "\n", "\n", "def piece_together_vSPD_runs(run1, run2, run3, run4, run5, run6, run7, files):\n", " \"\"\"given vSPD run data, output dictionary of dataframes pieced togther\"\"\"\n", " vSPDrun1Aa = \"2019/11/9T10\" # start of Manapouri spill only vSPD run 1A\n", " vSPDrun1Ab = \"2019/11/15T0730\" # end of Manapouri spill only vSPD run 1A\n", " vSPDrun2a = \"2019/11/15T08\" # start of Manapouri/AVI/WTK spill vSPD run 2\n", " vSPDrun2b = \"2019/11/19T1930\" # end of Manapouri/AVI/WTK spill vSPD run 2\n", " vSPDrun1Ba = \"2019/11/19T2000\" # start of Manapouri spill only vSPD run 1B\n", " vSPDrun1Bb = \"2019/12/02T1530\" # end of Manapouri spill only vSPD run 1B\n", " vSPDrun3a = \"2019/12/02T1600\" # start of everyone spilling vSPD run 3\n", " vSPDrun3b = \"2019/12/07T2330\" # end of simulation vSPD run 3\n", " vSPDrun4a = \"2019/12/08T0000\" # start of everyone spilling vSPD run 4\n", " vSPDrun4b = \"2019/12/27T2330\" # end of simulation vSPD run 4\n", " vSPDrun5a = \"2019/12/28T0000\" # start of everyone spilling vSPD run 5\n", " vSPDrun5b = \"2019/12/30T2330\" # end of simulation vSPD run 5\n", " vSPDrun6a = \"2019/12/31T0000\" # start of everyone spilling vSPD run 6\n", " vSPDrun6b = \"2020/01/05T2330\" # end of simulation vSPD run 6\n", " vSPDrun7a = \"2020/01/06T1600\" # start of everyone spilling vSPD run 7\n", " vSPDrun7b = \"2020/01/16T2330\" # end of simulation vSPD run 7\n", "\n", " RUN1A = {}\n", " RUN2 = {}\n", " RUN1B = {}\n", " RUN3 = {}\n", " RUN4 = {}\n", " RUN5 = {}\n", " RUN6 = {}\n", " RUN7 = {}\n", " ALL_RUNS={}\n", " \n", " for k in files.keys():\n", " #print(k)\n", " # main counterfactual vSPD run \n", " RUN1A[k] = run1[k].unstack()[vSPDrun1Aa:vSPDrun1Ab].stack()\n", " RUN2[k] = run2[k].unstack()[vSPDrun2a:vSPDrun2b].stack()\n", " RUN1B[k] = run1[k].unstack()[vSPDrun1Ba:vSPDrun1Bb].stack()\n", " RUN3[k] = run3[k].unstack()[vSPDrun3a:vSPDrun3b].stack()\n", " RUN4[k] = run4[k].unstack()[vSPDrun4a:vSPDrun4b].stack()\n", " RUN5[k] = run5[k].unstack()[vSPDrun5a:vSPDrun5b].stack()\n", " RUN6[k] = run6[k].unstack()[vSPDrun6a:vSPDrun6b].stack()\n", " RUN7[k] = run7[k].unstack()[vSPDrun7a:vSPDrun7b].stack()\n", "\n", " # append timeseries runs together\n", " ALL_RUNS[k] = RUN1A[k].append(RUN2[k].append(RUN1B[k].append(RUN3[k].append(RUN4[k].append(RUN5[k].append(RUN6[k].append(RUN7[k]))))))) # orig vSPD counterfactual run result\n", " \n", " return ALL_RUNS\n", "\n", "\n", "def get_hvdc_flow(df):\n", " \"\"\"given branch flow datafram return HVDC flow\"\"\"\n", " BEN_HAY_BC = df['brh_res']['Flow (MW) (From->To)'].unstack().loc[:, ['BEN_HAY2.1', 'BEN_HAY1.1']].sum(axis=1)\n", " HAY_BEN_BC = df['brh_res']['Flow (MW) (From->To)'].unstack().loc[:, ['HAY_BEN2.1', 'HAY_BEN1.1']].sum(axis=1)\n", " df = (BEN_HAY_BC-HAY_BEN_BC)\n", " return df\n", "\n", "\n", "def calc_diff_sys_cost(bc, run):\n", " \"\"\"given basecase vSPD run and pieced together run, return total differnce in System Load cost\"\"\"\n", " bc_sys_res = bc['sys_res'].sort_index().reset_index(level=1, drop=True)\n", " run_sys_res = run['sys_res'].sort_index().reset_index(level=1, drop=True)\n", " diff_sys_cost = bc_sys_res['SystemLoadCost ($)']-run_sys_res['SystemLoadCost ($)']\n", " return diff_sys_cost\n", "\n", "\n", "def determine_BEN_generation(case, RMG_LSI, BEN_CUMEC, CUMECs_to_MW, lower_limit=340, upper_limit=760,\n", " LSI_GENS=['OHA2201 OHA0', 'OHB2201 OHB0', 'OHC2201 OHC0', 'BEN2202 BEN0', 'AVI2201 AVI0',\n", " 'WTK0111 WTK0', 'CYD2201 CYD0', 'ROX2201 ROX0', 'ROX1101 ROX0', 'MAN2201 MAN0']):\n", " \"\"\"Ok, given the vSPD case dict and RMG data, and Benmore spillway constraints, determine possible Ben generation in Dec 2019.\n", " We first get the LSI generation data from the vSPD simulation (df_gen). We then get actual LSI generation (minus Benmore), \n", " what was actually generated from RM data (RMG_LSI2). We then get the difference in total vSPD LSI generation and what was\n", " actually generated for Dec 2019. We then clip this to Benmores max generation (for 2019) to get a potential synthesized \n", " Benmore generation time series and subtract actual Ben RM generation to get additional generation over and above what occurred (BEN_add).\n", " This additional Benmore generation is then converted from MW to CUMECs and two new columns added to the BEN_CUMEC dataframe (copied \n", " to a new memory reg to BEN_CUMEC_new (and returned for testing));\n", " The columns, BEN_CUMEC_new['New Benmore flow'] adds this new flow to the Ben generation cumecs and BEN_CUMEC_new['New Spillway flow']\n", " subtracts the same amount from the spillway cumecs. This way total throughflow - spillway+generation remains const.\n", " We then filter trading periods with the spillway logic, returning only those periods that satisfy the spillway constraints in \n", " BEN_CUMEC_cons. We then calculate the difference in spillway flow (add_ben_spill) and then convert this to MW (add_ben_spill_MW).\n", " Finally, we calculate the \n", " \"\"\"\n", " \n", " df_gen = case['gen_res']['Generation (MW)'].unstack().loc[:, LSI_GENS]\n", " df_gen.index = df_gen.index.map(lambda x: x-timedelta(seconds=60*15)) # time shift by 15 minutes.\n", " \n", " RMG_LSI2 = RMG_LSI.drop(columns='BEN2202')\n", " BEN_ADDed = (df_gen.sum(axis=1)[\"2019/12\"])-(RMG_LSI2.sum(axis=1)[\"2019/12\"])\n", " BEN_ADDed_clipped = BEN_ADDed.clip_upper(BEN_CUMEC['BenmoreMaxMW'])\n", " # work out additional Benmore generation and convert to CUMECs for testing against spillway constraints\n", " BEN_add = (BEN_ADDed_clipped-RMG_LSI['BEN2202'])\n", " BEN_add_CUMECS = BEN_add/CUMECS_to_MW # ie, multiplied by 1.223.\n", " print(BEN_add.mean())\n", " \n", " # could add a clip(lower=0) here to ensure that Benmore generation doesn't decrease.\n", " #BEN_add_CUMECS = BEN_add_CUMECS.clip(lower=0)\n", " #print(BEN_add.mean())\n", " \n", " #test = BEN_add_CUMECS/BEN_add # cumecs/MW should equal 1.223\n", " #print(test)\n", " # now we need to ensure spillway constraints are met\n", " BEN_CUMEC_new = BEN_CUMEC.copy()\n", " BEN_CUMEC_new['New Benmore flow'] = BEN_CUMEC_new.Generation + BEN_add_CUMECS\n", " #this clip lower results in a lake level change.\n", " BEN_CUMEC_new['New Spillway flow'] = (BEN_CUMEC_new.Spillway - BEN_add_CUMECS).clip_lower(0)\n", " # Ignore if new spill is in the no-go zone\n", " BEN_CUMEC_new = BEN_CUMEC_new.loc[(BEN_CUMEC_new['New Spillway flow']upper_limit),:]\n", " # calculate difference in spill\n", " add_ben_spill = (BEN_CUMEC_new['Spillway']-BEN_CUMEC_new['New Spillway flow']).clip_lower(0)\n", " add_ben_spill_MW = add_ben_spill*CUMECS_to_MW\n", " MWspill=int((add_ben_spill_MW.cumsum()/2).iloc[-1]/(UTS_days*24)) # MWh spill in Dec 2019\n", " \n", " BEN_CUMEC_new = BEN_CUMEC_new.loc[BEN_CUMEC_new['Spillway']!=BEN_CUMEC_new['New Spillway flow'],:]\n", " #print(BEN_CUMEC_new.head())\n", " BEN_CUMEC_new = BEN_CUMEC_new.reindex(BEN_CUMEC.index) \n", " return BEN_CUMEC_new, add_ben_spill_MW/2000.0, MWspill\n", "\n", "\n", "def plot_ben_flows(df_periods_new, offer):\n", " \"\"\"\"\"\"\n", " \n", " # Logic check - total Benmore flow and before should be equal\n", " fig = plt.figure(1, figsize=[16,22])\n", " ax = fig.add_subplot(311)\n", " new_through_flow_check = df_periods_new['New Spillway flow']+ df_periods_new['New Benmore flow']\n", " old_through_flow_check = df_periods_new['Generation'] + df_periods_new['Spillway']\n", " df1 = pd.DataFrame({'New Benmore total through flow': new_through_flow_check,\n", " 'Old Benmore total through flow': old_through_flow_check})\n", " df1['New Benmore total through flow'].plot(ax=ax, color=c_s['or1'], lw=4, label='Simulated Benmore total flow')\n", " df1['Old Benmore total through flow'].plot(ax=ax, color=c_p['br1'], lw=1, label='Actual Benmore total flow')\n", " legend_format(ax, fontsize=16)\n", " plot_formatting(ax)\n", " ax.set_title(offer, fontsize=20)\n", " ax.set_xlabel('')\n", " ax.set_ylabel('Cumecs (m$^3$)', fontsize=18)\n", " ax.legend([\"Simulated Benmore total flow\", \"Actual Benmore total flow\"], loc=3,bbox_to_anchor=(-0.025, -0.22),ncol=2, fontsize=16,)\n", "\n", " ax2 = fig.add_subplot(312)\n", " df2 = pd.DataFrame({'New Spill': df_periods_new['New Spillway flow'], \n", " 'Old Spill': df_periods_new['Spillway']})\n", " df2.plot(ax=ax2, lw=3)\n", "\n", " legend_format(ax2, fontsize=16)\n", " plot_formatting(ax2)\n", " ax2.set_xlabel('')\n", " ax2.set_ylabel('Cumecs (m$^3$)', fontsize=18)\n", " ax2.legend([\"Simulated spill\", \"Actual spill\"], loc=3,bbox_to_anchor=(-0.025, -0.22),ncol=2, fontsize=16,)\n", "\n", " ax3 = fig.add_subplot(313)\n", " df3 = pd.DataFrame({'New Generation': df_periods_new['New Benmore flow'], \n", " 'Old Generation': df_periods_new['Generation']})\n", " df3.plot(ax=ax3, lw=3)\n", " legend_format(ax3, fontsize=16)\n", " plot_formatting(ax3)\n", " ax3.set_xlabel('')\n", " ax3.set_ylabel('Cumecs (m$^3$)', fontsize=18)\n", " ax3.legend([\"Simulated generation\", \"Actual generation\"], loc=3,bbox_to_anchor=(-0.025, -0.22),ncol=2, fontsize=16,)\n", "\n", "# look at generation for groups\n", "#Generation node lists\n", "NI_TAUPO = ['ARA2201 ARA0', 'ATI2201 ATI0', 'ARI1101 ARI0', 'ARI1102 ARI0', 'KPO1101 KPO0',\n", " 'MTI2201 MTI0', 'OHK2201 OHK0', 'WPA2201 WPA0', 'WKM2201 WKM0']\n", "\n", "NI_HYDRO = ['ARA2201 ARA0', 'ATI2201 ATI0', 'ARI1101 ARI0', 'ARI1102 ARI0', 'KPO1101 KPO0',\n", " 'MTI2201 MTI0', 'OHK2201 OHK0', 'WPA2201 WPA0', 'WKM2201 WKM0', 'TUI1101 TUI0',\n", " 'TUI1101 PRI0','TUI1101 KTW0']\n", "\n", "NI_THERMAL = ['HLY2201 HLY1', 'HLY2201 HLY2', 'HLY2201 HLY4', 'GLN0332 GLN0', 'HLY2201 HLY5', 'HLY2201 HLY6',\n", " 'SFD0331', 'SFD2201 SFD21', 'SFD2201 SFD22', 'SFD2201 SPL0' ]\n", "\n", "SI_HYDRO = ['OHA2201 OHA0', 'OHB2201 OHB0', 'OHC2201 OHC0', 'BEN2202 BEN0', 'AVI2201 AVI0', 'WTK0111 WTK0',\n", " 'MAN2201 MAN0', 'ROX2201 ROX0', 'ROX1101 ROX0', 'CYD2201 CYD0']\n", "\n", "def return_generation(df, gen_list=None): \n", " \"\"\"Given case generation dataframe, return all generation in gen_list and timeshit by 15 minutes\"\"\"\n", " df = df[df.index.isin(gen_list, level='Offer')]\n", " df = df[['Generation (MW)']]\n", " df = df.unstack()\n", " df=df.xs(['Generation (MW)'], level=0, axis=1)\n", " df.index = df.index.map(lambda x: x+timedelta(seconds=15*60))\n", " return df\n", "\n", "\n", "def return_cum_MW_gen_diff(date_str, gen_list=None):\n", " \"\"\"return cumulative generation difference in MW over time period for each vSPD run\"\"\"\n", " G_BC = return_generation(BC['gen_res'], gen_list=gen_list)[date_str].sum(axis=1)\n", " G_001MWh = return_generation(ALL_RUNS_001MWh['gen_res'], gen_list=gen_list)[date_str].sum(axis=1) - G_BC\n", " G_6pt35 = return_generation(ALL_RUNS_6pt35['gen_res'], gen_list=gen_list)[date_str].sum(axis=1) - G_BC\n", " G_10MWh = return_generation(ALL_RUNS_10MWh['gen_res'], gen_list=gen_list)[date_str].sum(axis=1) - G_BC\n", " G_20MWh = return_generation(ALL_RUNS_20MWh['gen_res'], gen_list=gen_list)[date_str].sum(axis=1) - G_BC\n", " G_30MWh = return_generation(ALL_RUNS_30MWh['gen_res'], gen_list=gen_list)[date_str].sum(axis=1) - G_BC\n", " G_35MWh = return_generation(ALL_RUNS_35MWh['gen_res'], gen_list=gen_list)[date_str].sum(axis=1) - G_BC\n", " df = pd.DataFrame({'$0.01/MWh': G_001MWh,\n", " '$6.35/MWh': G_6pt35,\n", " '$10/MWh': G_10MWh,\n", " '$20/MWh': G_20MWh,\n", " '$30/MWh': G_30MWh,\n", " '$35/MWh': G_35MWh\n", " })\n", " \n", " hours = (df.index[-1]-df.index[0]).total_seconds()/3600 + 0.5\n", " df = df.cumsum()/2/hours\n", " return df\n", " \n", " \n", "def plot_gen_change(df, ylabel='Cumulative average MW change (MW)',\n", " title='Waitako River generation change with low LSI offers'):\n", " \"\"\"plot generation changes between different vSPD LSI offer runs\"\"\"\n", "\n", " fig = plt.figure(1, figsize=[16, 12])\n", " ax = fig.add_subplot(111)\n", " df.plot(ax=ax, lw=3)\n", " ax.set_xlabel('', fontsize=16)\n", " ax.set_ylabel(ylabel, fontsize=16)\n", " ax.set_xlabel('', fontsize=16)\n", " ax.set_title(title, fontsize=20)\n", " plot_formatting(ax)\n", " legend_format(ax, cols=5)\n", "\n", " ax.text(UTS_next_date, df.iloc[-1].round()['$0.01/MWh'],\n", " str(df.iloc[-1].round()['$0.01/MWh']) + 'MW / ' + str((df.iloc[-1]['$0.01/MWh']*24*UTS_days/1000).round()) + 'GWh', fontsize=12)\n", " ax.text(UTS_next_date, df.iloc[-1].round()['$6.35/MWh'],\n", " str(df.iloc[-1].round()['$6.35/MWh']) + 'MW / ' + str((df.iloc[-1]['$6.35/MWh']*24*UTS_days/1000).round()) + 'GWh', fontsize=12)\n", " ax.text(UTS_next_date, df.iloc[-1].round()['$10/MWh'],\n", " str(df.iloc[-1].round()['$10/MWh']) + 'MW / ' + str((df.iloc[-1]['$10/MWh']*24*UTS_days/1000).round()) + 'GWh', fontsize=12)\n", " ax.text(UTS_next_date, df.iloc[-1].round()['$20/MWh'],\n", " str(df.iloc[-1].round()['$20/MWh']) + 'MW / ' + str((df.iloc[-1]['$20/MWh']*24*UTS_days/1000).round()) + 'GWh', fontsize=12)\n", " ax.text(UTS_next_date, df.iloc[-1].round()['$30/MWh'],\n", " str(df.iloc[-1].round()['$30/MWh']) + 'MW / ' + str((df.iloc[-1]['$30/MWh']*24*UTS_days/1000).round()) + 'GWh', fontsize=12)\n", " ax.text(UTS_next_date, df.iloc[-1].round()['$35/MWh'],\n", " str(df.iloc[-1].round()['$35/MWh']) + 'MW / ' + str((df.iloc[-1]['$35/MWh']*24*UTS_days/1000).round()) + 'GWh', fontsize=12)\n", "\n", " \n", "# basecase files\n", "\n", "basecase = 'LSI_Flood_Basecase' # this has been checked with final and is ok\n", "basecase2 = 'LSI_Flood_Basecase_2' # this has been checked with final and is ok\n", "basecase3 = 'LSI_Flood_Basecase_3' # this has been checked with final and is ok\n", "\n", "vSPDrun1 = 'LSI_Flood_MAN_ONLY_AVI_BEN_NSY_ROX' # Added MAN => $0.01/MWh + AVI_BEN and NSY_CYD constraints\n", "vSPDrun2 = 'LSI_Flood_MAN_AVI_WTK_AVI_BEN_NSY_ROX' # Added both AVI_BEN and NSY_CYD constraints with MAN,AVI, WTK at $0.01/MWh\n", "vSPDrun3 = 'LSI_Flood_vSPDrun3' # \n", "vSPDrun4 = 'LSI_Flood_vSPDrun4' # \n", "vSPDrun5 = 'LSI_Flood_vSPDrun5' # \n", "vSPDrun6 = 'LSI_Flood_vSPDrun6' # \n", "vSPDrun7 = 'LSI_Flood_vSPDrun7' # \n", "\n", "# $0.01/MWh\n", "vSPDrun1_001MWh = 'LSI_Flood_vSPDrun1_001MWh'\n", "vSPDrun2_001MWh = 'LSI_Flood_vSPDrun2_001MWh' \n", "vSPDrun3_001MWh = 'LSI_Flood_vSPDrun3_001MWh'\n", "vSPDrun4_001MWh = 'LSI_Flood_vSPDrun4_001MWh'\n", "vSPDrun5_001MWh = 'LSI_Flood_vSPDrun5_001MWh'\n", "vSPDrun6_001MWh = 'LSI_Flood_vSPDrun6_001MWh'\n", "vSPDrun7_001MWh = 'LSI_Flood_vSPDrun7_001MWh'\n", "\n", "# $6.35/MWh\n", "vSPDrun1_6pt35 = 'LSI_Flood_MAN_ONLY_AVI_BEN_NSY_ROX'\n", "vSPDrun2_6pt35 = 'LSI_Flood_MAN_AVI_WTK_AVI_BEN_NSY_ROX' \n", "vSPDrun3_6pt35 = 'LSI_Flood_vSPDrun3_6pt35'\n", "vSPDrun4_6pt35 = 'LSI_Flood_vSPDrun4_6pt35'\n", "vSPDrun5_6pt35 = 'LSI_Flood_vSPDrun5_6pt35'\n", "vSPDrun6_6pt35 = 'LSI_Flood_vSPDrun6_6pt35'\n", "vSPDrun7_6pt35 = 'LSI_Flood_vSPDrun7_6pt35'\n", "\n", "# $10/MWh\n", "vSPDrun1_10MWh = 'LSI_Flood_vSPDrun1_10MWh'\n", "vSPDrun2_10MWh = 'LSI_Flood_vSPDrun2_10MWh'\n", "vSPDrun3_10MWh = 'LSI_Flood_vSPDrun3_10MWh'\n", "vSPDrun4_10MWh = 'LSI_Flood_vSPDrun4_10MWh' \n", "vSPDrun5_10MWh = 'LSI_Flood_vSPDrun5_10MWh' \n", "vSPDrun6_10MWh = 'LSI_Flood_vSPDrun6_10MWh' \n", "vSPDrun7_10MWh = 'LSI_Flood_vSPDrun7_10MWh' \n", "\n", "# $20/MWh\n", "vSPDrun1_20MWh = 'LSI_Flood_vSPDrun1_20MWh'\n", "vSPDrun2_20MWh = 'LSI_Flood_vSPDrun2_20MWh'\n", "vSPDrun3_20MWh = 'LSI_Flood_vSPDrun3_20MWh'\n", "vSPDrun4_20MWh = 'LSI_Flood_vSPDrun4_20MWh' \n", "vSPDrun5_20MWh = 'LSI_Flood_vSPDrun5_20MWh' \n", "vSPDrun6_20MWh = 'LSI_Flood_vSPDrun6_20MWh' \n", "vSPDrun7_20MWh = 'LSI_Flood_vSPDrun7_20MWh' \n", "\n", "# $30/MWh\n", "vSPDrun1_30MWh = 'LSI_Flood_vSPDrun1_30MWh'\n", "vSPDrun2_30MWh = 'LSI_Flood_vSPDrun2_30MWh'\n", "vSPDrun3_30MWh = 'LSI_Flood_vSPDrun3_30MWh'\n", "vSPDrun4_30MWh = 'LSI_Flood_vSPDrun4_30MWh' \n", "vSPDrun5_30MWh = 'LSI_Flood_vSPDrun5_30MWh' \n", "vSPDrun6_30MWh = 'LSI_Flood_vSPDrun6_30MWh' \n", "vSPDrun7_30MWh = 'LSI_Flood_vSPDrun7_30MWh' \n", "\n", "\n", "# get Benmore Max MW\n", "BEN_Max = pd.read_csv(f'{input_folder}/BENMOREMAXMW.csv', parse_dates=True)\n", "BEN_Max.set_index('DateTime', inplace=True, drop=True)\n", "BEN_Max.index = pd.to_datetime(BEN_Max.index, dayfirst=True)\n", "BEN_Max.index = BEN_Max.index.map(lambda x: x+timedelta(seconds=60*15))\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def vSPD_override_loader(path, case, files=files, start_date=date(2019,12,3), end_date=date(2019,12,18)):\n", " \"\"\"load vSPD output data\"\"\"\n", " output = {}\n", " delta = end_date - start_date # as timedelta\n", "\n", "\n", "\n", " for k, v in files.items():\n", " first_file = True\n", " for i in range(delta.days + 1):\n", " yyyymmdd = (start_date + timedelta(days=i)).strftime('%Y%m%d')\n", " filename = path + '\\\\or_' + case + yyyymmdd + '/or_' + case + yyyymmdd + '_' + v\n", " print('Loading: ' + filename)\n", " if first_file:\n", " df_temp = pd.read_csv(filename, index_col=[0, 1], parse_dates=True)\n", " first_file = False\n", " else:\n", " df_temp = df_temp.append(pd.read_csv(filename, index_col=[0, 1], parse_dates=True))\n", " \n", " output[k] = df_temp\n", " return output\n", " \n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# LSI flood event\n", "\n", "A large rainfall event resulting in spilling at a number of hydro stations in the South Island throughout November and December 2019. A UTS was alleged on the 12 December 2019. This notebook provides some of the analysis performed by the market monitoring team into this event.\n", "\n", "We first look at South Island hydro generation spill data provided by both Meridian and Contact. \n", "\n", "### vSPD simulations\n", "\n", "We started our UTS analysis by using vSPD to model potential alternative pricing scenarios had offers by the LSI generators been lower. We decided to only alter LSI generation offer prices (not quantities) and we only set offer prices if, at the same time, the hydro generator had recorded hydro spill. \n", "\n", "Our initial analysis did this for all generators, ignoring physical and resource constraint limits that the LSI generators must take into account. As described further below, we then attempted to take into account some of the resource and hydrological constraint limits faced by the LSI generators. \n", "\n", "Our first vSPD modelling was conducted by splitting the time period into seven sequential time periods (as illustrated below) with different combinations of hydro stations spilling. This made simulating each period relatively straight-forward by setting offer prices for the generators spilling during each period. \n", "\n", "The seven time periods are:\n", "\n", " - vSPD run1 (~6 days and ~13 days) 2019-11-09 -> 2019-12-02 MAN spilling\n", " - vSPD run2 (~4 days) 2019-11-15 -> 2019-11-18) MAN/CYD/ROX spilling\n", " - vSPD run3 (~5 days) 2019-12-03 -> 2019-12-07 MAN/CYD/ROX/BEN/AVI/WTK spilling\n", " - vSPD run4 (~20 days) 2019-12-08 -> 2019-12-27 MAN/OHA/OHB/OHC/BEN/AVI/WTK/ROX/CYD all spilling\n", " - vSPD run5 (~3 days) 2019-12-28 -> 2019-12-30 MAN/OHA/OHB/OHC/BEN/AVI/WTK all spilling\n", " - vSPD run6 (~6 days) 2019-12-31 -> 2020-01-05 MAN/BEN/AVI/WTK spilling\n", " - vSPD run7 (~10 days) 2020-01-06 -> 2020-01-16 MAN/AVI/WTK spilling\n", "\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Spill data\n", "MERI_spill = pd.read_excel(f\"{input_folder}/Data Request - Sam Fleming - 20200116.xlsx\", index_col=0,\n", " parse_dates=True)\n", "\n", "MERI_spill.index = MERI_spill.index.map(lambda x: x+timedelta(seconds=60*60)) # Day-light savings time-shift required!\n", "CTCT_spill = pd.read_excel(f\"{input_folder}/Contact Energy Flows Data-20200116.xlsx\", index_col=0, parse_dates=True)\n", "CTCT_spill = CTCT_spill[['TP', 'Spill For Highflow CYD (cumecs)','Spill For Highflow ROX(cumecs)']]\n", "CTCT_spill = CTCT_spill.reset_index()\n", "CTCT_spill = CTCT_spill.rename(columns={'Date \\n(Trading Period Ending)': 'Date', \"Spill For Highflow CYD (cumecs)\": \"Clyde Spillway\", \"Spill For Highflow ROX(cumecs)\": \"Roxburgh Spillway\"})\n", "CTCT_spill = CTCT_spill.set_index(['Date'])\n", "CTCT_spill.index = pd.to_datetime(CTCT_spill.index)\n", "del CTCT_spill['TP']\n", "spill2 = pd.concat([MERI_spill, CTCT_spill], axis=1)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "colours()\n", "fig = plt.figure(1, figsize=[16,12])\n", "ax = fig.add_subplot(111)\n", "\n", "MERI_spill.sort_index(axis=1).plot(ax=ax, lw=3, fontsize=14)\n", "(CTCT_spill/100).plot(ax=ax, lw=3)\n", "legend_format(ax, cols=6, ypos=-0.18)\n", "plot_formatting(ax)\n", "#UTS_period(ax)\n", "plot_shading(ax, y_offset=1150)\n", "ax.set_xlabel('')\n", "ax.set_ylabel('Hydro spill (cumecs)', fontsize=14)\n", "ax.set_xlim([UTS_start_date, UTS_next_date])" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "fig = plt.figure(1, figsize=[16,12])\n", "ax = fig.add_subplot(111)\n", "\n", "spill2.sort_index(level=1).plot(kind='area', ax=ax, fontsize=14)\n", "legend_format(ax, cols=5, fontsize=14, xpos=-0.04, ypos=-0.2)\n", "ax.set_ylabel('Combinded South Island hydro spill (cumecs)', fontsize=14)\n", "legend_format(ax, cols=6, ypos=-0.2)\n", "plot_formatting(ax)\n", "#UTS_period(ax)\n", "plot_shading(ax, y_offset=6200)\n", "ax.set_xlim([UTS_start_date, UTS_next_date])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### Basecase vSPD runs\n", "\n", "For each of these time periods we first performed a basecase vSPD run which outputted results that matched final pricing, i.e., what was settled in the wholesale spot market at the time. \n", "\n", "#### Low offer vSPD runs\n", "We then altered SI hydro generation offer prices at the generators that were spilling. We did this for several lower offer prices to investigate the effects, if particular we looked at changing offer prices to spilling generators with the following offers:\n", "\n", " - \\$0/MWh\n", " - \\$0.01/MWh (results illustrated below)\n", " - \\$6.35/MWh (results illustrated below)\n", " - \\$10/MWh (results illustrated below)\n", " - \\$20/MWh (results illustrated below)\n", " - \\$30/MWh (results illustrated below)\n", "\n", "#### Notes on Transmission constraints\n", "\n", "When lowering offer prices, and potentially increasing generation output we were careful to ensure that transmission constraints were included in the vSPD formulation. This is required because a change in generation offer behaviour will result in increased LSI generation and the potential for transmission constraints out of the Lower South Island to bind. These constraints were not necessary in the base case Final Pricing cases. \n", "\n", "The System Operator, in the pricing schedules and in final pricing, uses a tool called the Simultaneous Feasibility Test (SFT). This test is an AC power flow model of the current network configuration that automatically builds transmission constraints on-the-fly to ensure the transmission network can operate if a single element, like a single circuit or transformer, trips (called N-1). As we do not have this tool we relied on both historically built constraints, as well as contracting System Operator engineers to check and manually model these transmission constraints. A complicating factor during the flood event was the damage, and consequent loss of a section of transmission line on the Rangitata River that feeds Christchurch and the Upper South Island (USI). This occurred in the evening of 7 December 2019.\n", "\n", "Two LSI transmission constraints were required to be manually added to the vSPD runs to ensure the transmission system operated within its means. These were:\n", "\n", " - between Aviemore and Benmore on the Waitaki river, this constraint in particular is import to model following the damage and long term outage that has occurred on the Livingston - Islington transmission line[1](#fn1).\n", " - over-loading on the Roxburgh - Naseby transmission line following a trip on either of the Clyde - Roxburgh transmission circuits[2](#fn2). \n", "\n", " [1] **AVI_BEN constraint (in vSPDsolve.gms)**\n", "\n", " The AVI_BEN transmission constraints added to vSPDsolve.gms were:\n", " \n", "i_tradePeriodBranchConstraintFactors(i_TradePeriod,'AVI_BEN1.1__AVI_BEN2.1__AVI_BEN2__AVI__LN','AVI_BEN1.1') = 1.216 ; i_tradePeriodBranchConstraintFactors(i_TradePeriod,'AVI_BEN1.1__AVI_BEN2.1__AVI_BEN2__AVI__LN','AVI_BEN2.1') = 0.921 ; i_tradePeriodBranchConstraintRHS(i_TradePeriod,'AVI_BEN1.1__AVI_BEN2.1__AVI_BEN2__AVI__LN','i_constraintLimit') = 250 ; i_tradePeriodBranchConstraintRHS(i_TradePeriod,'AVI_BEN1.1__AVI_BEN2.1__AVI_BEN2__AVI__LN','i_constraintSense') = -1 ;\n", " \n", " \n", " [2]**NSY_ROX constraint (in vSPDsolve.gms)**\n", " \n", "This constraint is managed with a Special Protection Scheme (SPS) which, following a detection of an over-load on the Roxburgh - Naseby circuit, splits the Roxburgh 220kV bus. Following this split, the effective impedance of the Roxburgh - Naseby circuit is increased which reduces the power flow and relieves the constraint. One issue with this scheme during high LSI hydro generation is that it could inadvertently trigger by steady states flows if flows reach close to the steady states transmission limit on the Roxburgh - Naseby circuit. To ensure that this does not occur the System Operator added manual constraint equations in the vSPD model to ensure that this transmission line did not inadvertently trip in steady state. Although this sounds like a large change, it is not and this constraint should have always existed as part of the SPS scheme. Possibly the trigger level for the operation of the scheme (the line flow on the Roxburgh - Naseby circuit) should have been increased to temporary 15 minute off-load limits which would allow slightly more transmission out of the LSI region. The NSY_ROX transmission constraints added to vSPDsolve.gms were:\n", " \n", " \n", "i_tradePeriodBranchConstraintFactors(i_TradePeriod,'NSY_ROX.1__CYD_TWZ1.1__:S__CYD_TWZ1__ROX__LN','NSY_ROX.1') = -1 ; i_tradePeriodBranchConstraintFactors(i_TradePeriod,'NSY_ROX.1__CYD_TWZ1.1__:S__CYD_TWZ1__ROX__LN','CYD_TWZ1.1') = -0.084 ; i_tradePeriodBranchConstraintFactors(i_TradePeriod,'NSY_ROX.1__CYD_TWZ2.1__:S__CYD_TWZ2__ROX__LN','NSY_ROX.1') = -1 ; i_tradePeriodBranchConstraintFactors(i_TradePeriod,'NSY_ROX.1__CYD_TWZ2.1__:S__CYD_TWZ2__ROX__LN','CYD_TWZ2.1') = -0.084 ;\n", " \n", "\n", "i_tradePeriodBranchConstraintRHS(i_TradePeriod,'NSY_ROX.1__CYD_TWZ1.1__:S__CYD_TWZ1__ROX__LN','i_constraintLimit') = 220 ; i_tradePeriodBranchConstraintRHS(i_TradePeriod,'NSY_ROX.1__CYD_TWZ1.1__:S__CYD_TWZ1__ROX__LN','i_constraintSense') = -1 ; i_tradePeriodBranchConstraintRHS(i_TradePeriod,'NSY_ROX.1__CYD_TWZ2.1__:S__CYD_TWZ2__ROX__LN','i_constraintLimit') = 220 ; i_tradePeriodBranchConstraintRHS(i_TradePeriod,'NSY_ROX.1__CYD_TWZ2.1__:S__CYD_TWZ2__ROX__LN','i_constraintSense') = -1 ;\n", " \n", " \n", "\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Load vSPD results using parquet files = this is FAST!\n", "# moved to top of cell 3\n", "\n", "bc = vSPD_loader(path, basecase, csv=False)\n", "bc2 = vSPD_loader(path, basecase2, csv=False)\n", "bc3 = vSPD_loader(path, basecase3, csv=False)\n", "\n", "# Load $0.01/MWh vSPD run\n", "run1_001MWh = vSPD_loader(path, vSPDrun1_001MWh, csv=False)\n", "run2_001MWh = vSPD_loader(path, vSPDrun2_001MWh, csv=False)\n", "run3_001MWh = vSPD_loader(path, vSPDrun3_001MWh, csv=False)\n", "run4_001MWh = vSPD_loader(path, vSPDrun4_001MWh, csv=False)\n", "run5_001MWh = vSPD_loader(path, vSPDrun5_001MWh, csv=False)\n", "run6_001MWh = vSPD_loader(path, vSPDrun6_001MWh, csv=False)\n", "run7_001MWh = vSPD_loader(path, vSPDrun7_001MWh, csv=False)\n", "\n", "# Load $6pt35/MWh vSPD run\n", "run1_6pt35 = vSPD_loader(path, vSPDrun1_6pt35, csv=False)\n", "run2_6pt35 = vSPD_loader(path, vSPDrun2_6pt35, csv=False)\n", "run3_6pt35 = vSPD_loader(path, vSPDrun3_6pt35, csv=False)\n", "run4_6pt35 = vSPD_loader(path, vSPDrun4_6pt35, csv=False)\n", "run5_6pt35 = vSPD_loader(path, vSPDrun5_6pt35, csv=False)\n", "run6_6pt35 = vSPD_loader(path, vSPDrun6_6pt35, csv=False)\n", "run7_6pt35 = vSPD_loader(path, vSPDrun7_6pt35, csv=False)\n", "\n", "# Load $10/MWh vSPD run\n", "run1_10MWh = vSPD_loader(path, vSPDrun1_10MWh, csv=False)\n", "run2_10MWh = vSPD_loader(path, vSPDrun2_10MWh, csv=False)\n", "run3_10MWh = vSPD_loader(path, vSPDrun3_10MWh, csv=False)\n", "run4_10MWh = vSPD_loader(path, vSPDrun4_10MWh, csv=False)\n", "run5_10MWh = vSPD_loader(path, vSPDrun5_10MWh, csv=False)\n", "run6_10MWh = vSPD_loader(path, vSPDrun6_10MWh, csv=False)\n", "run7_10MWh = vSPD_loader(path, vSPDrun7_10MWh, csv=False)\n", "\n", "# Load $20/MWh vSPD run\n", "run1_20MWh = vSPD_loader(path, vSPDrun1_20MWh, csv=False)\n", "run2_20MWh = vSPD_loader(path, vSPDrun2_20MWh, csv=False)\n", "run3_20MWh = vSPD_loader(path, vSPDrun3_20MWh, csv=False)\n", "run4_20MWh = vSPD_loader(path, vSPDrun4_20MWh, csv=False)\n", "run5_20MWh = vSPD_loader(path, vSPDrun5_20MWh, csv=False)\n", "run6_20MWh = vSPD_loader(path, vSPDrun6_20MWh, csv=False)\n", "run7_20MWh = vSPD_loader(path, vSPDrun7_20MWh, csv=False)\n", "\n", "# Load $30/MWh vSPD run\n", "run1_30MWh = vSPD_loader(path, vSPDrun1_30MWh, csv=False)\n", "run2_30MWh = vSPD_loader(path, vSPDrun2_30MWh, csv=False)\n", "run3_30MWh = vSPD_loader(path, vSPDrun3_30MWh, csv=False)\n", "run4_30MWh = vSPD_loader(path, vSPDrun4_30MWh, csv=False)\n", "run5_30MWh = vSPD_loader(path, vSPDrun5_30MWh, csv=False)\n", "run6_30MWh = vSPD_loader(path, vSPDrun6_30MWh, csv=False)\n", "run7_30MWh = vSPD_loader(path, vSPDrun7_30MWh, csv=False)\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Piece together the three base caser runs - do this separately (three runs as UTS analysis was on-going at the time).\n", "BC={}\n", "for k in files.keys():\n", " print(k)\n", " BC[k] = bc[k].append(bc2[k]).append(bc3[k])" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Piece together the 7 time periods for each of the different simulations\n", "ALL_RUNS_001MWh = piece_together_vSPD_runs(run1_001MWh, run2_001MWh, run3_001MWh, run4_001MWh,\n", " run5_001MWh, run6_001MWh, run7_001MWh, files)\n", "ALL_RUNS_6pt35 = piece_together_vSPD_runs(run1_6pt35, run2_6pt35, run3_6pt35, run4_6pt35,\n", " run5_6pt35, run6_6pt35, run7_6pt35, files)\n", "ALL_RUNS_10MWh = piece_together_vSPD_runs(run1_10MWh, run2_10MWh, run3_10MWh, run4_10MWh,\n", " run5_10MWh, run6_10MWh, run7_10MWh, files)\n", "ALL_RUNS_20MWh = piece_together_vSPD_runs(run1_20MWh, run2_20MWh, run3_20MWh, run4_20MWh,\n", " run5_20MWh, run6_20MWh, run7_20MWh, files)\n", "ALL_RUNS_30MWh = piece_together_vSPD_runs(run1_30MWh, run2_30MWh, run3_30MWh, run4_30MWh,\n", " run5_30MWh, run6_30MWh, run7_30MWh, files)\n", "vSPDpath = input_folder\n", "RUN1_35 = vSPD_override_loader(vSPDpath, 'UTS_35_CLU_MAN_', files=files, start_date=date(2019,12,3), end_date=date(2019,12,7))\n", "RUN2_35 = vSPD_override_loader(vSPDpath, 'UTS_35_CLU_MAN_WTR_', files=files, start_date=date(2019,12,8), end_date=date(2019,12,18))\n", "ALL_RUNS_35MWh={}\n", "for k in files.keys():\n", " # append timeseries runs together\n", " ALL_RUNS_35MWh[k] = RUN1_35[k].append(RUN2_35[k])" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# reduce date range to UTS period\n", "idx = pd.IndexSlice\n", "#df.loc[idx['2019-12-03':'2019-12-18', :], :]\n", "\n", "for k in files.keys():\n", " print(k)\n", " BC[k] = BC[k].loc[idx[UTS_start_date:UTS_end_date, :], :]\n", " ALL_RUNS_001MWh[k] = ALL_RUNS_001MWh[k].loc[idx[UTS_start_date:UTS_end_date, :], :]\n", " ALL_RUNS_6pt35[k] = ALL_RUNS_6pt35[k].loc[idx[UTS_start_date:UTS_end_date, :], :]\n", " ALL_RUNS_10MWh[k] = ALL_RUNS_10MWh[k].loc[idx[UTS_start_date:UTS_end_date, :], :]\n", " ALL_RUNS_20MWh[k] = ALL_RUNS_20MWh[k].loc[idx[UTS_start_date:UTS_end_date, :], :]\n", " ALL_RUNS_30MWh[k] = ALL_RUNS_30MWh[k].loc[idx[UTS_start_date:UTS_end_date, :], :]\n", " ALL_RUNS_35MWh[k] = ALL_RUNS_35MWh[k].loc[idx[UTS_start_date:UTS_end_date, :], :]" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Output system load cost files to excel for participants to explore\n", "# Daily System Load cost from vSPD result files.\n", "vSPD_system_load_cost = pd.DataFrame({'vSPD basecase': BC['sys_res'].sort_index().reset_index(level=1, drop=True)['SystemLoadCost ($)']/1e6,\n", " 'vSPD LSI offers@$0.01/MWh': ALL_RUNS_001MWh['sys_res'].sort_index().reset_index(level=1, drop=True)['SystemLoadCost ($)']/1e6,\n", " 'vSPD LSI offers@$6.35/MWh': ALL_RUNS_6pt35['sys_res'].sort_index().reset_index(level=1, drop=True)['SystemLoadCost ($)']/1e6,\n", " 'vSPD LSI offers@$10/MWh': ALL_RUNS_10MWh['sys_res'].sort_index().reset_index(level=1, drop=True)['SystemLoadCost ($)']/1e6,\n", " 'vSPD LSI offers@$20/MWh': ALL_RUNS_20MWh['sys_res'].sort_index().reset_index(level=1, drop=True)['SystemLoadCost ($)']/1e6,\n", " 'vSPD LSI offers@$30/MWh': ALL_RUNS_30MWh['sys_res'].sort_index().reset_index(level=1, drop=True)['SystemLoadCost ($)']/1e6,\n", " 'vSPD LSI offers@$35/MWh': ALL_RUNS_35MWh['sys_res'].sort_index().reset_index(level=1, drop=True)['SystemLoadCost ($)']/1e6\n", " })\n", "\n", "# don't print excel files, switch to if True if desired.\n", "outputFolder = r''\n", "if not True:\n", " writer = pd.ExcelWriter(f'{outputFolder}/vSPD_UTS_daily_system_load_cost_results.xlsx')\n", " vSPD_system_load_cost.to_excel(writer, sheet_name='Daily System Load Cost ($m)')\n", " writer.save()\n", " # basecase load data - does change a little between vSPD runs, due to IL I think, but mostly the same between runs.\n", " vSPD_load = BC['nod_res']['Load (MW)'].unstack()\n", " writer = pd.ExcelWriter(f'{outputFolder}/vSPD_UTS_basecase_load.xlsx')\n", " vSPD_load.to_excel(writer, sheet_name='vSPD basecase Load (MW)')\n", " writer.save()\n", " # Basecase wholesale spot price simulation results\n", " vSPD_price = BC['nod_res']['Price ($/MWh)'].unstack()\n", " writer = pd.ExcelWriter(f'{outputFolder}/vSPD_UTS_basecase_spotprice.xlsx')\n", " vSPD_price.to_excel(writer, sheet_name='vSPD basecase spot price (dollars per MWh)')\n", " writer.save()\n", " # Wholesale spot price with LSI offers @ $0.01/MWh\n", " vSPD_price_001 = ALL_RUNS_001MWh['nod_res']['Price ($/MWh)'].unstack()\n", " writer = pd.ExcelWriter(f'{outputFolder}/vSPD_UTS_LSI_offers_001MWh_spotprice.xlsx')\n", " vSPD_price_001.to_excel(writer, sheet_name='vSPD spot price (dollars per MWh)')\n", " writer.save()\n", " # Wholesale spot price with LSI offers @ $6.35/MWh\n", " vSPD_price_6pt35 = ALL_RUNS_6pt35['nod_res']['Price ($/MWh)'].unstack()\n", " writer = pd.ExcelWriter(f'{outputFolder}/vSPD_UTS_LSI_offers_6pt35_spotprice.xlsx')\n", " vSPD_price_6pt35.to_excel(writer, sheet_name='vSPD spot price (dollars per MWh)')\n", " writer.save()\n", " # Wholesale spot price with LSI offers @ $10/MWh\n", " vSPD_price_10 = ALL_RUNS_10MWh['nod_res']['Price ($/MWh)'].unstack()\n", " writer = pd.ExcelWriter(f'{outputFolder}/vSPD_UTS_LSI_offers_10MWh_spotprice.xlsx')\n", " vSPD_price_10.to_excel(writer, sheet_name='vSPD spot price (dollars per MWh)')\n", " writer.save()\n", " # Wholesale spot price with LSI offers @ $20/MWh\n", " vSPD_price_20 = ALL_RUNS_20MWh['nod_res']['Price ($/MWh)'].unstack()\n", " writer = pd.ExcelWriter(f'{outputFolder}/vSPD_UTS_LSI_offers_20MWh_spotprice.xlsx')\n", " vSPD_price_20.to_excel(writer, sheet_name='vSPD spot price (dollars per MWh)')\n", " writer.save()\n", " # Wholesale spot price with LSI offers @ $30/MWh\n", " vSPD_price_30 = ALL_RUNS_30MWh['nod_res']['Price ($/MWh)'].unstack()\n", " writer = pd.ExcelWriter(f'{outputFolder}/vSPD_UTS_LSI_offers_30MWh_spotprice.xlsx')\n", " vSPD_price_30.to_excel(writer, sheet_name='vSPD spot price (dollars per MWh)')\n", " writer.save()\n", " # Wholesale spot price with LSI offers @ $35/MWh\n", " vSPD_price_35 = ALL_RUNS_35MWh['nod_res']['Price ($/MWh)'].unstack()\n", " writer = pd.ExcelWriter(f'{outputFolder}/vSPD_UTS_LSI_offers_35MWh_spotprice.xlsx')\n", " vSPD_price_35.to_excel(writer, sheet_name='vSPD spot price (dollars per MWh)')\n", " writer.save()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Initial vSPD results\n", "### Estimating additional hydro spill - ignoring physical hydrological constraints\n", "\n", "The best method of comparing the difference between the different vSPD runs is to look at the change in dispatched generation. We can do this easily by plotting the change in HVDC flow between the basecase vSPD run and the low offer LSI generation runs - although noting that this will be a bit lower than actual generation due to electrical network losses. \n", "\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Return HVDC flow for different vSPD simulations\n", "HVDC_BC = get_hvdc_flow(BC)\n", "HVDC_001MWh = get_hvdc_flow(ALL_RUNS_001MWh) \n", "HVDC_6pt35 = get_hvdc_flow(ALL_RUNS_6pt35)\n", "HVDC_10MWh = get_hvdc_flow(ALL_RUNS_10MWh)\n", "HVDC_20MWh = get_hvdc_flow(ALL_RUNS_20MWh)\n", "HVDC_30MWh = get_hvdc_flow(ALL_RUNS_30MWh)\n", "HVDC_35MWh = get_hvdc_flow(ALL_RUNS_35MWh)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "\n", "HVDC = pd.DataFrame({'Basecase (mean HVDC flow north: %iMW/%iGWh)' % (HVDC_BC.mean(), HVDC_BC.sum()/2000.0): HVDC_BC,\n", " 'LSI offers@$0.01/MWh (mean HVDC flow north: %iMW/%iGWh)' % (HVDC_001MWh.mean(), HVDC_001MWh.sum()/2000.0): HVDC_001MWh,\n", " 'LSI offers@$6.35/MWh (mean HVDC flow north: %iMW/%iGWh)' % (HVDC_6pt35.mean(), HVDC_6pt35.sum()/2000.0): HVDC_6pt35,\n", " 'LSI offers@$10/MWh (mean HVDC flow north: %iMW/%iGWh)' % (HVDC_10MWh.mean(), HVDC_10MWh.sum()/2000.0): HVDC_10MWh,\n", " 'LSI offers@$20/MWh (mean HVDC flow north: %iMW/%iGWh)' % (HVDC_20MWh.mean(), HVDC_20MWh.sum()/2000.0): HVDC_20MWh,\n", " 'LSI offers@$30/MWh (mean HVDC flow north: %iMW/%iGWh)' % (HVDC_30MWh.mean(), HVDC_30MWh.sum()/2000.0): HVDC_30MWh,\n", " 'LSI offers@$35/MWh (mean HVDC flow north: %iMW/%iGWh)' % (HVDC_35MWh.mean(), HVDC_35MWh.sum()/2000.0): HVDC_35MWh\n", " })" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "fig = plt.figure(1, figsize=[16, 12])\n", "ax = fig.add_subplot(111)\n", "HVDC.plot(ax=ax, fontsize=16)\n", "plot_formatting(ax)\n", "legend_format(ax, cols=2, ypos=-0.25)\n", "ax.set_xlabel('')\n", "ax.set_ylabel('MW', fontsize=16)\n", "ax.set_title('HVDC flows for Basecase and vSPD runs (December 2019)', fontsize=20)\n", "ax.set_xlim([UTS_start_date, UTS_next_date])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "On first appearance the difference is relatively small. There are various reasons for this but we think one of the main reasons is low offer prices in the North Island. For example an offer at a North Island generation, say at \\$0.01/MWh is likely to be dispatched ahead of the same offer in the South Island. This is due to the nature of the wholesale spot market in New Zealand which takes into account electrical losses. This means generation closer to load is favoured ahead of generation further away from load.\n", "\n", "However the difference in HVDC flow add over time, as illustrated below." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "\n", "HVDC_diff_001MWh = (HVDC_001MWh-HVDC_BC).fillna(method='ffill')\n", "HVDC_diff_6pt35 = (HVDC_6pt35-HVDC_BC).fillna(method='ffill')\n", "HVDC_diff_10MWh = (HVDC_10MWh-HVDC_BC).fillna(method='ffill')\n", "HVDC_diff_20MWh = (HVDC_20MWh-HVDC_BC).fillna(method='ffill')\n", "HVDC_diff_30MWh = (HVDC_30MWh-HVDC_BC).fillna(method='ffill')\n", "HVDC_diff_35MWh = (HVDC_35MWh-HVDC_BC).fillna(method='ffill')\n", "\n", "\n", "HVDC_diff_001MWh_cs = HVDC_diff_001MWh.cumsum()/2e3\n", "HVDC_diff_6pt35_cs = HVDC_diff_6pt35.cumsum()/2e3\n", "HVDC_diff_10MWh_cs = HVDC_diff_10MWh.cumsum()/2e3\n", "HVDC_diff_20MWh_cs = HVDC_diff_20MWh.cumsum()/2e3\n", "HVDC_diff_30MWh_cs = HVDC_diff_30MWh.cumsum()/2e3\n", "HVDC_diff_35MWh_cs = HVDC_diff_35MWh.cumsum()/2e3\n", "\n", "\n", "df = pd.DataFrame({'Estimated reduced spill (LSI offers @ $0.01/MWh)': HVDC_diff_001MWh_cs,\n", " 'Estimated reduced spill (LSI offers @ $6.35/MWh)': HVDC_diff_6pt35_cs,\n", " 'Estimated reduced spill (LSI offers @ $10/MWh)': HVDC_diff_10MWh_cs,\n", " 'Estimated reduced spill (LSI offers @ $20/MWh)': HVDC_diff_20MWh_cs,\n", " 'Estimated reduced spill (LSI offers @ $30/MWh)': HVDC_diff_30MWh_cs,\n", " 'Estimated reduced spill (LSI offers @ $35/MWh)': HVDC_diff_35MWh_cs\n", " })\n", "\n", "fig = plt.figure(1, figsize=[16, 12])\n", "ax = fig.add_subplot(111)\n", "df[\"2019/12\"].plot(lw=5, ax=ax, fontsize=16)\n", "ax.set_xlabel('')\n", "ax.set_ylabel('Estimated reduced spill (GWh)', fontsize=18)\n", "ax.set_ylim([0, 85])\n", "plot_formatting(ax)\n", "legend_format(ax, fontsize=14, xpos=0.04, ypos=-.24, cols=2)\n", "textrange = UTS_end_date + timedelta(hours = 12) \n", "ax.text(textrange, HVDC_diff_001MWh_cs[-1], str(int(10*HVDC_diff_001MWh_cs[-1]*1000/24/UTS_days)/10) + 'MW/' + str(int(10*HVDC_diff_001MWh_cs[-1])/10) + 'GWh', fontsize=12)\n", "ax.text(textrange, HVDC_diff_6pt35_cs[-1], str(int(10*HVDC_diff_6pt35_cs[-1]*1000/24/UTS_days)/10) + 'MW/' + str(int(10*HVDC_diff_6pt35_cs[-1])/10) + 'GWh', fontsize=12)\n", "ax.text(textrange, HVDC_diff_10MWh_cs[-1], str(int(10*HVDC_diff_10MWh_cs[-1]*1000/24/UTS_days)/10) + 'MW/' + str(int(10*HVDC_diff_10MWh_cs[-1])/10) + 'GWh', fontsize=12)\n", "ax.text(textrange, HVDC_diff_20MWh_cs[-1], str(int(10*HVDC_diff_20MWh_cs[-1]*1000/24/UTS_days)/10) + 'MW/' + str(int(10*HVDC_diff_20MWh_cs[-1])/10) + 'GWh', fontsize=12)\n", "ax.text(textrange, HVDC_diff_30MWh_cs[-1], str(int(10*HVDC_diff_30MWh_cs[-1]*1000/24/UTS_days)/10) + 'MW/' + str(int(10*HVDC_diff_30MWh_cs[-1])/10) + 'GWh', fontsize=12)\n", "ax.text(textrange, HVDC_diff_35MWh_cs[-1], str(int(10*HVDC_diff_35MWh_cs[-1]*1000/24/UTS_days)/10) + 'MW/' + str(int(10*HVDC_diff_35MWh_cs[-1])/10) + 'GWh', fontsize=12)\n", "ax.set_xlim([UTS_start_date, UTS_next_date])\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# return difference in total system costs\n", "sc_001 = calc_diff_sys_cost(BC, ALL_RUNS_001MWh)/1e6\n", "sc_635 = calc_diff_sys_cost(BC, ALL_RUNS_6pt35)/1e6\n", "sc_10 = calc_diff_sys_cost(BC, ALL_RUNS_10MWh)/1e6\n", "sc_20 = calc_diff_sys_cost(BC, ALL_RUNS_20MWh)/1e6\n", "sc_30 = calc_diff_sys_cost(BC, ALL_RUNS_30MWh)/1e6\n", "sc_35 = calc_diff_sys_cost(BC, ALL_RUNS_35MWh)/1e6\n", "\n", "\n", "sys_cost = pd.DataFrame({'$0.01/MWh': sc_001,\n", " '$6.35/MWh': sc_635,\n", " '$10/MWh': sc_10,\n", " '$20/MWh': sc_20,\n", " '$30/MWh': sc_30,\n", " '$35/MWh': sc_35\n", " })\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "fig = plt.figure(1, figsize=[16,12])\n", "ax = fig.add_subplot(111)\n", "sys_cost.cumsum().interpolate().plot(ax=ax, lw=4, fontsize=16)\n", "plot_shading(ax, y_offset=90)\n", "plot_formatting(ax)\n", "legend_format(ax, xpos=0.04, cols=5)\n", "ax.set_xlabel('')\n", "ax.set_ylabel('$m', fontsize=20)\n", "ax.set_xlim([UTS_start_date, UTS_next_date])" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "\n", "fig = plt.figure(1, figsize=[16,12])\n", "ax = fig.add_subplot(111)\n", "sys_cost[UTS_start: UTS_end].cumsum().plot(ax=ax, lw=3, fontsize=16)\n", "plot_formatting(ax)\n", "legend_format(ax, cols=5)\n", "ax.set_ylabel('$m', fontsize=20)\n", "ax.set_xlabel('', fontsize=20)\n", "ax.set_title('Total system Cost savings for different LSI offer prices - ignoring hydrological constraints', fontsize=16)\n", "ax.text(UTS_next_date, sc_001[UTS_start: UTS_end].cumsum()[-1], sc_001.cumsum()[-1], fontsize=12)\n", "ax.text(UTS_next_date, sc_635[UTS_start: UTS_end].cumsum()[-1], sc_635[UTS_start: UTS_end].cumsum()[-1], fontsize=12)\n", "ax.text(UTS_next_date, sc_10[UTS_start: UTS_end].cumsum()[-1], sc_10[UTS_start: UTS_end].cumsum()[-1], fontsize=12)\n", "ax.text(UTS_next_date, sc_20[UTS_start: UTS_end].cumsum()[-1], sc_20[UTS_start: UTS_end].cumsum()[-1], fontsize=12)\n", "ax.text(UTS_next_date, sc_30[UTS_start: UTS_end].cumsum()[-1], sc_30[UTS_start: UTS_end].cumsum()[-1], fontsize=12)\n", "ax.text(UTS_next_date, sc_35[UTS_start: UTS_end].cumsum()[-1], sc_35[UTS_start: UTS_end].cumsum()[-1], fontsize=12)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "fig = plt.figure(1, figsize=[16,20])\n", "ax = fig.add_subplot(211)\n", "ax2 = fig.add_subplot(212)\n", "#MAN\n", "BC['nod_res']['Price ($/MWh)'].unstack()['MAN2201 MAN0'].plot(ax=ax2, lw=1, alpha=0.1, fontsize=14, color=c_p['br1'], label='')\n", "BC['nod_res']['Price ($/MWh)'].unstack()['MAN2201 MAN0'].rolling(48).mean().plot(ax=ax2, lw=3, fontsize=14, label='Manapouri rolling daily mean price')\n", "ALL_RUNS_6pt35['nod_res']['Price ($/MWh)'].unstack()['MAN2201 MAN0'].plot(ax=ax2, lw=1, alpha=0.5, fontsize=14, color=c_p['bl2'], label='')\n", "ALL_RUNS_6pt35['nod_res']['Price ($/MWh)'].unstack()['MAN2201 MAN0'].rolling(48).mean().plot(ax=ax2, lw=3, fontsize=14, color=c_p['bl1'], label='Manapouri rolling daily mean price (low offers)' )\n", "#OTA\n", "BC['nod_res']['Price ($/MWh)'].unstack()['OTA2201'].plot(ax=ax, lw=1, alpha=0.1, fontsize=14, color=c_p['br1'], label='')\n", "BC['nod_res']['Price ($/MWh)'].unstack()['OTA2201'].rolling(48).mean().plot(ax=ax, lw=3, fontsize=14, label='Otahuhu rolling daily mean price')\n", "ALL_RUNS_6pt35['nod_res']['Price ($/MWh)'].unstack()['OTA2201'].plot(ax=ax, lw=1, alpha=0.5, fontsize=14, color=c_p['bl2'], label='')\n", "ALL_RUNS_6pt35['nod_res']['Price ($/MWh)'].unstack()['OTA2201'].rolling(48).mean().plot(ax=ax, lw=3, fontsize=14, color=c_p['bl1'], label='Otahuhu rolling daily mean price (low offers)' )\n", "\n", "ax.set_ylabel('$/MWh', fontsize=16)\n", "ax.set_xlabel('')\n", "ax2.set_xlabel('')\n", "ax2.set_ylabel('$/MWh', fontsize=16)\n", "\n", "ax.set_ylim([0, 400])\n", "ax2.set_ylim([0, 400])\n", "plot_formatting(ax)\n", "plot_formatting(ax2)\n", "plot_shading(ax, y_offset=390)\n", "plot_shading(ax2, y_offset=390)\n", "legend_format(ax, cols=2, ypos=-0.21, xpos=0.01, fontsize=14)\n", "legend_format(ax2, cols=2, ypos=-0.21, xpos=0.01, fontsize=14)\n", "\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "fig = plt.figure(1, figsize=[16,20])\n", "ax = fig.add_subplot(211)\n", "ax2 = fig.add_subplot(212)\n", "#MAN\n", "BC['nod_res']['Price ($/MWh)'].unstack()['MAN2201 MAN0'].plot(ax=ax2, lw=1, alpha=0.1, fontsize=14, color=c_p['br1'], label='')\n", "BC['nod_res']['Price ($/MWh)'].unstack()['MAN2201 MAN0'].rolling(48).mean().plot(ax=ax2, lw=3, fontsize=14, label='Manapouri rolling daily mean price')\n", "ALL_RUNS_35MWh['nod_res']['Price ($/MWh)'].unstack()['MAN2201 MAN0'].plot(ax=ax2, lw=1, alpha=0.5, fontsize=14, color=c_p['bl2'], label='')\n", "ALL_RUNS_35MWh['nod_res']['Price ($/MWh)'].unstack()['MAN2201 MAN0'].rolling(48).mean().plot(ax=ax2, lw=3, fontsize=14, color=c_p['bl1'], label='Manapouri rolling daily mean price ($46 offers)' )\n", "#OTA\n", "BC['nod_res']['Price ($/MWh)'].unstack()['OTA2201'].plot(ax=ax, lw=1, alpha=0.1, fontsize=14, color=c_p['br1'], label='')\n", "BC['nod_res']['Price ($/MWh)'].unstack()['OTA2201'].rolling(48).mean().plot(ax=ax, lw=3, fontsize=14, label='Otahuhu rolling daily mean price')\n", "ALL_RUNS_35MWh['nod_res']['Price ($/MWh)'].unstack()['OTA2201'].plot(ax=ax, lw=1, alpha=0.5, fontsize=14, color=c_p['bl2'], label='')\n", "ALL_RUNS_35MWh['nod_res']['Price ($/MWh)'].unstack()['OTA2201'].rolling(48).mean().plot(ax=ax, lw=3, fontsize=14, color=c_p['bl1'], label='Otahuhu rolling daily mean price ($46 offers)' )\n", "\n", "ax.set_ylabel('$/MWh', fontsize=16)\n", "ax.set_xlabel('')\n", "ax2.set_xlabel('')\n", "ax2.set_ylabel('$/MWh', fontsize=16)\n", "\n", "ax.set_ylim([0, 400])\n", "ax2.set_ylim([0, 400])\n", "plot_formatting(ax)\n", "plot_formatting(ax2)\n", "plot_shading(ax, y_offset=390)\n", "plot_shading(ax2, y_offset=390)\n", "legend_format(ax, cols=2, ypos=-0.21, xpos=0.01, fontsize=14)\n", "legend_format(ax2, cols=2, ypos=-0.21, xpos=0.01, fontsize=14)\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Estimating additional hydro spill - including physical hydrological constraints\n", "\n", "One problem with the vSPD simulations above is the lack of modelling of physical constraints outside of the vSPD market model. In this instance these constraints are environmental and hydrological in nature and are not part of the vSPD or SPD formulation. For example, a lot of the spill at Manapouri is due to the environmental resource consent conditions as detailed in our main report. A second issue we discovered after discussions with Meridian is the 'no-go' zone on the Benmore spillway. \n", "\n", "The details of the specific steps we undertook to estimate the excess spill, in particular related to the Benmore spillway issue, are as follows:\n", "\n", " a) All South Island generators except Benmore were modelled to generate the same as they did during the flood event – we use reconciliation (RM) data for this.\n", " \n", " b) Using vSPD we determined total South Island generation dispatched had all offers for spilling hydro stations been set to $0.01/MWh. This ensures all market constraints, including additional HVDC flow are satisfied, but ignores any spillway constraints at Benmore.\n", " \n", " c) The actual generation (RM data excluding Benmore) was subtracted from the total dispatch that the $0.01/MWh offer vSPD simulation predicts. This gave us a total potential Benmore generation series which is truncated at Benmore’s generation capacity. This is the new potential Benmore generation ignoring the spillway constraint.\n", " \n", " d) The Benmore RM data (what was actually generated) was then subtracted from this new potential Benmore generation to get the additional potential Benmore generation.\n", " \n", " e) This was then converted to cumecs and subtracted from the spill data which is also in cumecs. \n", " \n", " f) If this resulted in spill within the no-go zone then the generation at Benmore in the trading period was discarded. \n", " \n", " g) For those trading periods that result in an increase in generation, this generation was converted to MW, summed, and converted to GWh. \n", "\n", "The calculations for this analysis are below. We first take a look at the Benmore spill and Benmore generation hydro flows. Given the spillway constraints, we determine trading periods where, ignoring all other market constraints, we think Benmore could have physically generated more, for the same generator station through flow, satisfying the spillway constraints. \n", "\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Get RM generation data during December for LSI generators\n", "\n", "RMG_LSI = pd.read_parquet(f\"{input_folder}/201912_RMG_LSI.parquet\")\n", "RMG_BEN = RMG_LSI[\"BEN2202\"]\n", "RMG_MERI = RMG_LSI.loc[:, ['OHA2201', 'OHB2201', 'OHC2201', 'BEN2202', 'AVI2201', 'WTK0111', 'MAN2201']]\n", "RMG_MERI_WTK = RMG_MERI.loc[:, ['OHA2201', 'OHB2201', 'OHC2201', 'BEN2202', 'AVI2201', 'WTK0111']]\n", "\n", "# Benmore spill data\n", "BEN_SPILL = MERI_spill['Benmore Spillway'][\"2019/12\"]\n", "BEN_SPILL.index = BEN_SPILL.index.map(lambda x: x+timedelta(seconds=15*60))\n", "# MAX_BEN_GEN_FLOW = 650\n", "# CUMECS_to_MW = RMG_BEN.max()/MAX_BEN_GEN_FLOW\n", "CUMECS_to_MW = 1/1.223 # https://www.emi.ea.govt.nz/Environment/Datasets/HydrologicalModellingDataset/1_InfrastructureAndHydroConstraintAttributes/20180726_InfrastructureAndHydroConstraintAttributes.csv\n", "\n", "\n", "#BEN_g_CUMECS = RMG_BEN*(MAX_BEN_GEN_FLOW/RMG_BEN.max())\n", "BEN_g_CUMECS = RMG_BEN/CUMECS_to_MW\n", "BEN_CUMECS = pd.DataFrame({'Spillway': BEN_SPILL,\n", " 'Generation': BEN_g_CUMECS, \n", " 'Total': BEN_SPILL + BEN_g_CUMECS})\n", "\n", "BEN_CUMECS = pd.concat([BEN_CUMECS, BEN_Max], axis=1, sort=True)\n", "BEN_CUMECS = BEN_CUMECS.loc[idx[UTS_start_date:UTS_end_date], :]" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "fig = plt.figure(1, figsize=[16,12])\n", "ax = fig.add_subplot(111)\n", "RMG_LSI.plot(kind='area', ax=ax, fontsize=16)\n", "plot_formatting(ax)\n", "legend_format(ax, cols=10)\n", "ax.set_title('LSI generation (December 2019)', fontsize=20)\n", "ax.set_ylabel('MW', fontsize=20)\n", "ax.set_xlim([UTS_start_date, UTS_next_date])" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "\n", "fig = plt.figure(1, figsize=[16,12])\n", "ax = fig.add_subplot(111)\n", "BEN_CUMECS.plot(ax=ax, fontsize=16, lw=2)\n", "\n", "ax.axhspan(340, 760, color = c_p['rd2'], alpha=0.2)\n", "#ax.axhline(MAX_BEN_GEN_FLOW, color=c_p['rd1'])\n", "#ax.axhline(MAX_BEN_GEN_FLOW+340, color=c_p['rd1'])\n", "#ax.axhline(MAX_BEN_GEN_FLOW+760, color=c_p['rd1'])\n", "\n", "\n", "ax.set_ylabel('CUMECS ($m^3$/s)', fontsize=16)\n", "ax.set_xlabel('', fontsize=16)\n", "legend_format(ax, fontsize=16)\n", "plot_formatting(ax)\n", "ax.text(datetime(2019,11,29, 12), 770, 'Spillway no go zone', color=c_p['rd1'],\n", " fontsize=14, alpha=0.4)\n", "ax.text(datetime(2019,11,29, 12), 660, 'Max Generation', color=c_p['rd1'],\n", " fontsize=14, alpha=0.9)\n", "ax.set_title('Benmore Hydraulics', fontsize=20)\n", "ax.set_xlim([UTS_start_date, UTS_next_date])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Need sort logic to determine periods of interest...\n", "\n", "Here, we assume that the total spill that occurred is what is allowed. This total must be made up from generation flow and spill flow that satisfy Benmores spillway constraint, i.e., generation flow<660cumecs + spill flow<340 or spill flow>760\n", "\n", "Split into several constraints: \n", "\n", " - periods when spilling\n", " - periods when total spill = <660+340 (1000) = most periods...\n", " - total when total spill-760 <660 = all periods...\n", "\n", "Then during these periods we calculate what could have been generated and the resulting spill that obeys the physical constraints of the Benmore spillway...\n", "\n", "I.e.,\n", "\n", " - New Benmore generation = old_generation + spillway (under 660 cumecs).\n", " - New spill = old total - new generation\n", " \n", "We discard trading periods that don't match this, although we note that lake Benmore storage could have allowed some flexibility and that in one may argue that this flexibility in lake level control could have allowed less spill to occur. In this sense, we think this is a favourable assumption for Meridian. " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# BEN booleans\n", "#b1 = BEN_CUMECS.Spillway>0\n", "#b2 = BEN_CUMECS.Total<1000\n", "#b3 = BEN_CUMECS.Total-760 < 660\n", "\n", "#boolean = (b1&b2&b3)\n", "\n", "\n", "#new BEN booleans\n", "b1 = BEN_CUMECS.Spillway>0\n", "b2 = BEN_CUMECS.Total<(BEN_CUMECS.BenmoreMAXGenCumecs + 340)\n", "b3 = BEN_CUMECS.Total-760