diff --git a/.github/workflows/deploy_docs.yml b/.github/workflows/deploy_docs.yml index 12dbdc784..e5c03aeb2 100644 --- a/.github/workflows/deploy_docs.yml +++ b/.github/workflows/deploy_docs.yml @@ -27,6 +27,7 @@ jobs: pip install -e . pip install jupyter-book>=0.11.3 pip install sphinxcontrib-bibtex>=2.0.0 + python -m ipykernel install --user --name=ogcore-dev cd docs jb build ./book @@ -35,4 +36,4 @@ jobs: with: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} BRANCH: gh-pages # The branch the action should deploy to. - FOLDER: docs/book/_build/html # The folder the action should deploy. \ No newline at end of file + FOLDER: docs/book/_build/html # The folder the action should deploy. diff --git a/.github/workflows/docs_check.yml b/.github/workflows/docs_check.yml index 295f19b46..b786ffb98 100644 --- a/.github/workflows/docs_check.yml +++ b/.github/workflows/docs_check.yml @@ -24,5 +24,6 @@ jobs: pip install -e . pip install jupyter-book>=0.11.3 pip install sphinxcontrib-bibtex>=2.0.0 + python -m ipykernel install --user --name=ogcore-dev cd docs jb build ./book diff --git a/.gitignore b/.gitignore index d2471dff7..77104e773 100644 --- a/.gitignore +++ b/.gitignore @@ -53,4 +53,5 @@ ogcore/tests/OUTPUT_BASELINE/* ogcore/tests/OUTPUT_REFORM/* regression/OUTPUT_BASELINE/* regression/OUTPUT_REFORM* +.vscode/ *default.profraw diff --git a/docs/book/_config.yml b/docs/book/_config.yml index bda8f9386..dd200c9d4 100644 --- a/docs/book/_config.yml +++ b/docs/book/_config.yml @@ -8,7 +8,7 @@ logo : '..//OG-Core_logo.png' #################################################### # Execution settings execute: - execute_notebooks : cache + execute_notebooks : force #################################################### # HTML-specific settings @@ -41,7 +41,7 @@ launch_buttons: # Information about where the book exists on the web repository: url : https://github.com/PSLmodels/OG-Core - path_to_book : 'book' + path_to_book : 'docs/book' ####################################################################################### # Advanced and power-user settings diff --git a/docs/book/_toc.yml b/docs/book/_toc.yml index f501558ce..90817bac1 100644 --- a/docs/book/_toc.yml +++ b/docs/book/_toc.yml @@ -19,6 +19,7 @@ parts: - file: content/theory/market_clearing - file: content/theory/stationarization - file: content/theory/equilibrium + - file: content/theory/calibration - caption: Appendix chapters: - file: content/theory/derivations diff --git a/docs/book/content/theory/calibration.md b/docs/book/content/theory/calibration.md new file mode 100644 index 000000000..7e3addc77 --- /dev/null +++ b/docs/book/content/theory/calibration.md @@ -0,0 +1,135 @@ +(Chap_Calib)= +# Calibrating OG-Core + +The `OG-Core` model represents all the general model solution code for any overlapping generations model of a country or region. Although `OG-Core` has a `default_parameters.json` file that allows it to run independently, the preferred method for using `OG-Core` is as a dependency to a country calibration repository. We recommend that another repository is made, such as [`OG-USA`](https://github.com/PSLmodels/OG-USA) or [`OG-ZAF`](https://github.com/EAPD-DRB/OG-ZAF/) that uses `OG-Core` as its main computational foundation and engine and calibrates country-specific variables and functions in its own respective source code. This approach results in a working overlapping generations model consisting of a country-specific calibration repository plus a dependency on the general `OG-Core` model logic and options. + +{numref}`TabCountryModels` is a list of country-specific calibrations of overlapping generations models that use `OG-Core` as a dependency from oldest to newest. Note that these models are in varying stages of completeness and maturity. It is true that a model is never really fully calibrated. The model maintainer is always updating calibrated values as new data become available. And the model maintainer can always search for better fit and better targeting strategies. As such, the only measures of model maturity of the country calibrations below is the date the repository was created. + +```{list-table} **Country-specific calibrated OG models based on OG-Core.** +:header-rows: 1 +:name: TabCountryModels +* - **Country** + - **Model name** + - **GitHub repo** + - **Documentation** + - **Date created** +* - United States + - `OG-USA` + - https://github.com/PSLmodels/OG-USA + - https://pslmodels.github.io/OG-USA + - May 25, 2014 +* - United Kingdom + - `OG-UK` + - https://github.com/PSLmodels/OG-UK + - https://pslmodels.github.io/OG-UK + - Feb. 14, 2021 +* - India + - `OG-IND` + - https://github.com/Revenue-Academy/OG-IND + - https://revenue-academy.github.io/OG-IND + - Jul. 17, 2022 +* - Malaysia + - `OG-MYS` + - https://github.com/Revenue-Academy/OG-MYS + - + - Jul. 17, 2022 +* - South Africa + - `OG-ZAF` + - https://github.com/EAPD-DRB/OG-ZAF + - https://eapd-drb.github.io/OG-ZAF + - Oct. 9, 2022 +``` + +In the following section, we detail a list of items to calibrate for a country and what types of data and approaches might be available for those calibrations. Each of the country-specific models listed in {numref}`TabCountryModels` will have varying degrees of calibration maturity and further varying degrees of documentation of their calibration. But the following section details all the areas where each of these models should be calibrated. + + +(SecCalibList)= +## Detail of parameters, data, and approaches for calibration + +{numref}`TabCalibStrategy` shows the data and calibration strategies for each parameter and parameter area of the model. + +```{list-table} **Areas, parameters, and data strategies for calibrating country- or region-specific OG model based on OG-Core.** +:header-rows: 1 +:name: TabCalibStrategy +* - **General item description** + - **Specific item description** + - **Data source** +* - Demographics + - Using UN population data + - Access to country demographics in UN Population Data Portal +* - Demographics + - Other data source + - Custom interface between OG model and other data source. Data source must have the number of people by age, fertility rates by age, mortality rates by age (age bins are suitable and interpolation can be used). +* - Macroeconomic parameters + - Capital share of income, private/sovereign interest rate spread, long-run growth rate, debt-to-GDP ratios, transfer spending to GDP, government spending on goods and services to GDP, foreign purchases of government debt + - Capital and Labor cost data by industry. Average private borrowing rate/corporate bond yields, GDP time series, publicly held government debt time series, government transfer program spending, government spending (total non-transfer and infrastructure spending separately) +* - Lifetime income profiles + - Approximate US profiles rescaled by Gini coefficient + - Gini coefficient for the country +* - Lifetime income profiles + - Estimate from micro data + - Individual panel data with earnings (wage, salaries, self-employment income before taxes), labor hours, and age (can impute labor hours if necessary) +* - Labor supply elasticities + - Constant + - Use existing estimates from the research literature. Or cross sectional or panel data with hours and wages. +* - Labor supply elasticities + - Age varying + - Use existing estimates from the research literature. Or cross sectional or panel data with hours and wages and age. +* - Bequest motive + - Bequest motive + - Data on bequests given and/or bequests received similar to the US Survey of Consumer Finances. Other forms of information could allow us to rescale the US bequest distribution to match some moment from the target country. +* - Rate of time preference + - Constant + - Research empirical literature +* - Rate of time preference + - Heterogeneous (match to MPCs and wealth distribution) + - Data on country marginal propensity to consume (e.g., US Consumer Expenditure Survey or PSID) and data on the distribution of wealth in the country +* - Composite consumption share parameters + - Stone-Geary sub-utility function + - Consumption by category data within the country (e.g., similar to the US Consumer Expenditure Survey) +* - Hand-to-mouth consumers + - Calibrated separately from savers + - Cross-sectional or panel data with measures of income, wealth, consumption +* - Link PIT microsimulation model, produces effective tax rates and marginal tax rates by total income (even better is has both labor income and capital income breakdown) + - PIT model has Python API + - Microsimulation model with Python API +* - Link PIT microsimulation model, produces effective tax rates and marginal tax rates by total income (even better is has both labor income and capital income breakdown) + - PIT model has command line interface + - Microsimulation model that can be executed from a terminal command line +* - Link PIT microsimulation model, produces effective tax rates and marginal tax rates by total income (even better is has both labor income and capital income breakdown) + - PIT model has another way to interact with it + - Microsimulation model is in another program like Excel that can be run with an executable or with other software +* - Consumption tax rates + - Single rate + - Average consumption tax rates (e.g., time series with total revenue from consumption taxes and time series on GDP/national income) +* - Consumption tax rates + - Product-specific rates + - Consumption tax rates by product or industry category +* - Public Pension system (exogenous retirement age) + - If one of [notional defined contribution, defined benefits, points system, US Social Security] + - Pension rules based on age, payout, retirement rules, spouse benefits +* - Public Pension system (exogenous retirement age) + - If pension system not mentioned above + - Pension rules based on age, payout, retirement rules, spouse benefits +* - Production functions by industry + - More than one industry + - Time series of capital and labor demand by industry, output by industry +* - Calibrate METRs, capital cost recovery by industry with Cost of Capital Calculator + - Gather data on cost recovery policies and business tax system by country + - Tax code treatment of business income, depreciation +* - Calibrate METRs, capital cost recovery by industry with Cost of Capital Calculator + - Gather data on value of different types of assets by industry + - Time series or recent snapshot of investment or asset holdings by asset type, tax treatment (e.g., corporation, partnership), and industry +* - Calibrate METRs, capital cost recovery by industry with Cost of Capital Calculator + - Link [`Cost-of-Capital-Calculator`](https://ccc.pslmodels.org/) to OG macro model + - No additional data requirements +* - Infrastructure + - As share of gov’t spending and as share of firm production + - Current government infrastructure spending data plus time series of capital and labor demand by industry, output by industry +``` + + +(SecCalibFootnotes)= +## Footnotes + + diff --git a/ogcore/SS.py b/ogcore/SS.py index 7b15daf05..cb6d254ea 100644 --- a/ogcore/SS.py +++ b/ogcore/SS.py @@ -80,8 +80,8 @@ def euler_equation_solver(guesses, *args): theta, p.e[:, j], p.rho, - p.etr_params[-1, :, :], - p.mtry_params[-1, :, :], + p.etr_params[-1], + p.mtry_params[-1], None, j, p, @@ -101,8 +101,8 @@ def euler_equation_solver(guesses, *args): theta, p.chi_n, p.e[:, j], - p.etr_params[-1, :, :], - p.mtrx_params[-1, :, :], + p.etr_params[-1], + p.mtrx_params[-1], None, j, p, @@ -139,7 +139,7 @@ def euler_equation_solver(guesses, *args): False, "SS", p.e[:, j], - p.etr_params[-1, :, :], + p.etr_params[-1], p, ) cons = household.get_cons( @@ -270,10 +270,14 @@ def inner_loop(outer_loop_vars, p, client): theta = tax.replacement_rate_vals(nssmat, w, factor, None, p) - etr_params_3D = np.tile( - np.reshape(p.etr_params[-1, :, :], (p.S, 1, p.etr_params.shape[2])), - (1, p.J, 1), - ) + num_params = len(p.etr_params[-1][0]) + etr_params_3D = [ + [ + [p.etr_params[-1][s][i] for i in range(num_params)] + for j in range(p.J) + ] + for s in range(p.S) + ] net_tax = tax.net_taxes( r_p, @@ -310,7 +314,7 @@ def inner_loop(outer_loop_vars, p, client): B = aggr.get_B(bssmat, p, "SS", False) # Find gov't debt - r_gov = fiscal.get_r_gov(r, p) + r_gov = fiscal.get_r_gov(r, p, "SS") D, D_d, D_f, new_borrowing, _, new_borrowing_f = fiscal.get_D_ss( r_gov, Y, p ) @@ -361,7 +365,7 @@ def inner_loop(outer_loop_vars, p, client): new_r = firm.get_r(Y_vec[-1], K_vec[-1], p_m, p, "SS", -1) new_w = firm.get_w(Y_vec[-1], L_vec[-1], p_m, p, "SS") - new_r_gov = fiscal.get_r_gov(new_r, p) + new_r_gov = fiscal.get_r_gov(new_r, p, "SS") # now get accurate measure of debt service cost ( D, @@ -398,10 +402,15 @@ def inner_loop(outer_loop_vars, p, client): new_p_i = np.dot(p.io_matrix, new_p_m) new_p_tilde = aggr.get_ptilde(new_p_i, p.tau_c[-1, :], p.alpha_c) - etr_params_3D = np.tile( - np.reshape(p.etr_params[-1, :, :], (p.S, 1, p.etr_params.shape[2])), - (1, p.J, 1), - ) + num_params = len(p.etr_params[-1][0]) + etr_params_3D = [ + [ + [p.etr_params[-1][s][i] for i in range(num_params)] + for j in range(p.J) + ] + for s in range(p.S) + ] + taxss = tax.net_taxes( new_r_p, new_w, @@ -658,7 +667,7 @@ def SS_solver( K_vec_ss = new_K_vec L_vec_ss = new_L_vec Y_vec_ss = new_Y_vec - r_gov_ss = fiscal.get_r_gov(rss, p) + r_gov_ss = fiscal.get_r_gov(rss, p, "SS") p_m_ss = new_p_m p_i_ss = np.dot(p.io_matrix, p_m_ss) p_tilde_ss = aggr.get_ptilde(p_i_ss, p.tau_c[-1, :], p.alpha_c) @@ -710,18 +719,29 @@ def SS_solver( theta = tax.replacement_rate_vals(nssmat, wss, factor_ss, None, p) # Compute effective and marginal tax rates for all agents - etr_params_3D = np.tile( - np.reshape(p.etr_params[-1, :, :], (p.S, 1, p.etr_params.shape[2])), - (1, p.J, 1), - ) - mtrx_params_3D = np.tile( - np.reshape(p.mtrx_params[-1, :, :], (p.S, 1, p.mtrx_params.shape[2])), - (1, p.J, 1), - ) - mtry_params_3D = np.tile( - np.reshape(p.mtry_params[-1, :, :], (p.S, 1, p.mtry_params.shape[2])), - (1, p.J, 1), - ) + num_params = len(p.etr_params[-1][0]) + etr_params_3D = [ + [ + [p.etr_params[-1][s][i] for i in range(num_params)] + for j in range(p.J) + ] + for s in range(p.S) + ] + mtrx_params_3D = [ + [ + [p.mtrx_params[-1][s][i] for i in range(num_params)] + for j in range(p.J) + ] + for s in range(p.S) + ] + mtry_params_3D = [ + [ + [p.mtry_params[-1][s][i] for i in range(num_params)] + for j in range(p.J) + ] + for s in range(p.S) + ] + labor_noncompliance_rate_2D = np.tile( np.reshape(p.labor_income_tax_noncompliance_rate[-1, :], (1, p.J)), (p.S, 1), @@ -1237,10 +1257,17 @@ def run_SS(p, client=None): if p.reform_use_baseline_solution: # use baseline solution as starting values if dimensions match try: + print( + "Shape HH = ", + ss_solutions["bssmat_splus1"].shape, + p.S, + p.J, + ) + print("Shape firm = ", ss_solutions["Y_vec_ss"].shape, p.M) if ss_solutions["bssmat_splus1"].shape == ( p.S, p.J, - ) and ss_solutions["Y_vec_ss"].shape == (p.M): + ) and np.squeeze(ss_solutions["Y_vec_ss"].shape) == (p.M): print("Using previous solutions for SS") ( b_guess, @@ -1269,10 +1296,15 @@ def run_SS(p, client=None): ) use_new_guesses = False else: + print( + "Dimensions of previous solutions for SS do not match" + ) use_new_guesses = True except KeyError: + print("KeyError: previous solutions for SS not found") use_new_guesses = True else: + print("Using new guesses for SS") use_new_guesses = True if use_new_guesses: if p.use_zeta: diff --git a/ogcore/TPI.py b/ogcore/TPI.py index edb7e7793..15d7b8bcb 100644 --- a/ogcore/TPI.py +++ b/ogcore/TPI.py @@ -156,8 +156,8 @@ def firstdoughnutring( theta[j], p.e[-1, j], p.rho[-1], - p.etr_params[0, -1, :], - p.mtry_params[0, -1, :], + p.etr_params[0][-1], + p.mtry_params[0][-1], None, j, p, @@ -178,8 +178,8 @@ def firstdoughnutring( theta[j], p.chi_n[-1], p.e[-1, j], - p.etr_params[0, -1, :], - p.mtrx_params[0, -1, :], + p.etr_params[0][-1], + p.mtrx_params[0][-1], None, j, p, @@ -231,12 +231,12 @@ def twist_doughnut( j (int): index of ability type s (int): years of life remaining t (int): model period - etr_params (Numpy array): ETR function parameters, - size = sxsxnum_params - mtrx_params (Numpy array): labor income MTR function parameters, - size = sxsxnum_params - mtry_params (Numpy array): capital income MTR function - parameters, size = sxsxnum_params + etr_params (list): ETR function parameters, + list of lists with size = sxsxnum_params + mtrx_params (list): labor income MTR function parameters, + list of lists with size = sxsxnum_params + mtry_params (list): capital income MTR function + parameters, lists of lists with size = sxsxnum_params initial_b (Numpy array): savings of agents alive at T=0, size = SxJ p (OG-Core Specifications object): model parameters @@ -408,22 +408,29 @@ def inner_loop(guesses, outer_loop_vars, initial_values, ubi, j, ind, p): tr_to_use = np.diag(tr[: p.S, :, j], p.S - (s + 2)) ubi_to_use = np.diag(ubi[: p.S, :, j], p.S - (s + 2)) - length_diag = np.diag(p.etr_params[: p.S, :, 0], p.S - (s + 2)).shape[ - 0 + num_params = len(p.etr_params[0][0]) + temp_etr = [ + [p.etr_params[t][p.S - s - 2 + t][i] for i in range(num_params)] + for t in range(s + 2) ] - etr_params_to_use = np.zeros((length_diag, p.etr_params.shape[2])) - mtrx_params_to_use = np.zeros((length_diag, p.mtrx_params.shape[2])) - mtry_params_to_use = np.zeros((length_diag, p.mtry_params.shape[2])) - for i in range(p.etr_params.shape[2]): - etr_params_to_use[:, i] = np.diag( - p.etr_params[: p.S, :, i], p.S - (s + 2) - ) - mtrx_params_to_use[:, i] = np.diag( - p.mtrx_params[: p.S, :, i], p.S - (s + 2) - ) - mtry_params_to_use[:, i] = np.diag( - p.mtry_params[: p.S, :, i], p.S - (s + 2) - ) + etr_params_to_use = [ + [temp_etr[i][j] for j in range(num_params)] for i in range(s + 2) + ] + temp_mtrx = [ + [p.mtrx_params[t][p.S - s - 2 + t][i] for i in range(num_params)] + for t in range(s + 2) + ] + mtrx_params_to_use = [ + [temp_mtrx[i][j] for j in range(num_params)] for i in range(s + 2) + ] + temp_mtry = [ + [p.mtry_params[t][p.S - s - 2 + t][i] for i in range(num_params)] + for t in range(s + 2) + ] + mtry_params_to_use = [ + [temp_mtry[i][j] for j in range(num_params)] for i in range(s + 2) + ] + solutions = opt.root( twist_doughnut, list(b_guesses_to_use) + list(n_guesses_to_use), @@ -463,19 +470,19 @@ def inner_loop(guesses, outer_loop_vars, initial_values, ubi, j, ind, p): ubi_to_use = np.diag(ubi[t : t + p.S, :, j]) # initialize array of diagonal elements - length_diag = np.diag(p.etr_params[t : t + p.S, :, 0]).shape[0] - etr_params_to_use = np.zeros((length_diag, p.etr_params.shape[2])) - mtrx_params_to_use = np.zeros((length_diag, p.mtrx_params.shape[2])) - mtry_params_to_use = np.zeros((length_diag, p.mtry_params.shape[2])) - - for i in range(p.etr_params.shape[2]): - etr_params_to_use[:, i] = np.diag(p.etr_params[t : t + p.S, :, i]) - mtrx_params_to_use[:, i] = np.diag( - p.mtrx_params[t : t + p.S, :, i] - ) - mtry_params_to_use[:, i] = np.diag( - p.mtry_params[t : t + p.S, :, i] - ) + num_params = len(p.etr_params[t][0]) + etr_params_to_use = [ + [p.etr_params[t + s][s][i] for i in range(num_params)] + for s in range(p.S) + ] + mtrx_params_to_use = [ + [p.mtrx_params[t + s][s][i] for i in range(num_params)] + for s in range(p.S) + ] + mtry_params_to_use = [ + [p.mtry_params[t + s][s][i] for i in range(num_params)] + for s in range(p.S) + ] solutions = opt.root( twist_doughnut, @@ -662,7 +669,7 @@ def run_TPI(p, client=None): total_tax_revenue = np.ones(p.T + p.S) * ss_vars["total_tax_revenue"] # Compute other interest rates - r_gov = fiscal.get_r_gov(r, p) + r_gov = fiscal.get_r_gov(r, p, "TPI") r_p = np.ones_like(r) * ss_vars["r_p_ss"] MPKg = np.zeros((p.T, p.M)) for m in range(p.M): @@ -707,7 +714,6 @@ def run_TPI(p, client=None): # TPI loop while (TPIiter < p.maxiter) and (TPIdist >= p.mindist_TPI): - outer_loop_vars = (r_p, r, w, p_m, BQ, TR, theta) # compute composite good price p_i = ( @@ -754,12 +760,18 @@ def run_TPI(p, client=None): bmat_splus1 = np.zeros((p.T, p.S, p.J)) bmat_splus1[:, :, :] = b_mat[: p.T, :, :] - etr_params_4D = np.tile( - p.etr_params[: p.T, :, :].reshape( - p.T, p.S, 1, p.etr_params.shape[2] - ), - (1, 1, p.J, 1), - ) + num_params = len(p.etr_params[0][0]) + etr_params_4D = [ + [ + [ + [p.etr_params[t][s][i] for i in range(num_params)] + for j in range(p.J) + ] + for s in range(p.S) + ] + for t in range(p.T) + ] + bqmat = household.get_bq(BQ, None, p, "TPI") trmat = household.get_tr(TR, None, p, "TPI") tax_mat = tax.net_taxes( @@ -936,7 +948,7 @@ def run_TPI(p, client=None): ) # For case where economy is small open econ rnew[p.zeta_K == 1] = p.world_int_rate[p.zeta_K == 1] - r_gov_new = fiscal.get_r_gov(rnew, p) + r_gov_new = fiscal.get_r_gov(rnew, p, "TPI") MPKg_vec = np.zeros((p.T, p.M)) for m in range(p.M): MPKg_vec[:, m] = np.squeeze( @@ -1088,18 +1100,37 @@ def run_TPI(p, client=None): print("\tDistance:", TPIdist) # Compute effective and marginal tax rates for all agents - mtrx_params_4D = np.tile( - p.mtrx_params[: p.T, :, :].reshape( - p.T, p.S, 1, p.mtrx_params.shape[2] - ), - (1, 1, p.J, 1), - ) - mtry_params_4D = np.tile( - p.mtry_params[: p.T, :, :].reshape( - p.T, p.S, 1, p.mtry_params.shape[2] - ), - (1, 1, p.J, 1), - ) + num_params = len(p.mtrx_params[0][0]) + etr_params_4D = [ + [ + [ + [p.etr_params[t][s][i] for i in range(num_params)] + for j in range(p.J) + ] + for s in range(p.S) + ] + for t in range(p.T) + ] + mtrx_params_4D = [ + [ + [ + [p.mtrx_params[t][s][i] for i in range(num_params)] + for j in range(p.J) + ] + for s in range(p.S) + ] + for t in range(p.T) + ] + mtry_params_4D = [ + [ + [ + [p.mtry_params[t][s][i] for i in range(num_params)] + for j in range(p.J) + ] + for s in range(p.S) + ] + for t in range(p.T) + ] labor_noncompliance_rate_3D = np.tile( np.reshape( p.labor_income_tax_noncompliance_rate[: p.T, :], (p.T, 1, p.J) diff --git a/ogcore/__init__.py b/ogcore/__init__.py index c7a0d5187..6b1322859 100644 --- a/ogcore/__init__.py +++ b/ogcore/__init__.py @@ -19,4 +19,4 @@ from ogcore.txfunc import * from ogcore.utils import * -__version__ = "0.10.2" +__version__ = "0.10.7" diff --git a/ogcore/aggregates.py b/ogcore/aggregates.py index 81a44c6cb..20299b7e6 100644 --- a/ogcore/aggregates.py +++ b/ogcore/aggregates.py @@ -309,7 +309,7 @@ def revenue( ubi (array_like): universal basic income household distributions theta (Numpy array): social security replacement rate for each lifetime income group - etr_params (Numpy array): parameters of the effective tax rate + etr_params (list): list of parameters of the effective tax rate functions p (OG-Core Specifications object): model parameters method (str): adjusts calculation dimensions based on 'SS' or diff --git a/ogcore/constants.py b/ogcore/constants.py index 68c8eb79b..533051873 100644 --- a/ogcore/constants.py +++ b/ogcore/constants.py @@ -16,7 +16,7 @@ "B": "Wealth ($B_t$)", "I_total": "Investment ($I_t$)", "K": "Capital Stock ($K_t$)", - "Y_vec": "GDP ($Y_t$)", + "Y_vec": "Output ($Y_t$)", "C_vec": "Consumption ($C_t$)", "L_vec": "Labor ($L_t$)", "K_vec": "Capital Stock ($K_t$)", diff --git a/ogcore/default_parameters.json b/ogcore/default_parameters.json index e8f5ed580..5a764bd52 100644 --- a/ogcore/default_parameters.json +++ b/ogcore/default_parameters.json @@ -3,7 +3,8 @@ "labels": {}, "additional_members": { "section_1": {"type": "str"}, - "section_2": {"type": "str"} + "section_2": {"type": "str"}, + "array_first": {"type": "bool"} } }, "frisch": { @@ -829,9 +830,10 @@ "section_1": "Fiscal Policy Parameters", "notes": "", "type": "float", + "number_dims": 1, "value": [ { - "value": 1.0 + "value": [1.0] } ], "validators": { @@ -847,9 +849,10 @@ "description": "Parameter to shift the market interest rate to find interest rate on government debt.", "notes": "", "type": "float", + "number_dims": 1, "value": [ { - "value": 0.02 + "value": [0.02] } ], "validators": { @@ -3583,6 +3586,7 @@ "notes": "", "type": "float", "number_dims": 3, + "array_first": false, "value": [ { "value": [ @@ -3604,6 +3608,7 @@ "notes": "", "type": "float", "number_dims": 3, + "array_first": false, "value": [ { "value": [ @@ -3625,6 +3630,7 @@ "notes": "", "type": "float", "number_dims": 3, + "array_first": false, "value": [ { "value": [ diff --git a/ogcore/fiscal.py b/ogcore/fiscal.py index 43948b44f..8483dcf87 100644 --- a/ogcore/fiscal.py +++ b/ogcore/fiscal.py @@ -350,7 +350,7 @@ def get_TR( return new_TR -def get_r_gov(r, p): +def get_r_gov(r, p, method): r""" Determine the interest rate on government debt @@ -367,7 +367,10 @@ def get_r_gov(r, p): time path or in the steady-state """ - r_gov = np.maximum(p.r_gov_scale * r - p.r_gov_shift, 0.00) + if method == "SS": + r_gov = np.maximum(p.r_gov_scale[-1] * r - p.r_gov_shift[-1], 0.00) + else: + r_gov = np.maximum(p.r_gov_scale * r - p.r_gov_shift, 0.00) return r_gov diff --git a/ogcore/household.py b/ogcore/household.py index ef2b45f4d..3cc1ac67e 100644 --- a/ogcore/household.py +++ b/ogcore/household.py @@ -361,9 +361,9 @@ def FOC_savings( lifetime income group e (Numpy array): effective labor units rho (Numpy array): mortality rates - etr_params (Numpy array): parameters of the effective tax rate + etr_params (list): parameters of the effective tax rate functions - mtry_params (Numpy array): parameters of the marginal tax rate + mtry_params (list): parameters of the marginal tax rate on capital income functions t (int): model period j (int): index of ability type @@ -530,9 +530,9 @@ def FOC_labor( chi_n (Numpy array): utility weight on the disutility of labor supply e (Numpy array): effective labor units - etr_params (Numpy array): parameters of the effective tax rate + etr_params (list): parameters of the effective tax rate functions - mtrx_params (Numpy array): parameters of the marginal tax rate + mtrx_params (list): parameters of the marginal tax rate on labor income functions t (int): model period j (int): index of ability type diff --git a/ogcore/output_plots.py b/ogcore/output_plots.py index c275c5e52..bf7af7fc5 100644 --- a/ogcore/output_plots.py +++ b/ogcore/output_plots.py @@ -151,7 +151,7 @@ def plot_aggregates( plt.legend(loc=9, bbox_to_anchor=(0.5, -0.15), ncol=2) if path: fig_path1 = os.path.join(path) - plt.savefig(fig_path1, bbox_inches="tight") + plt.savefig(fig_path1, bbox_inches="tight", dpi=300) else: return fig1 plt.close() @@ -163,6 +163,7 @@ def plot_industry_aggregates( reform_tpi=None, reform_params=None, var_list=["Y_vec"], + ind_names_list=None, plot_type="pct_diff", num_years_to_plot=50, start_year=DEFAULT_START_YEAR, @@ -207,6 +208,11 @@ def plot_industry_aggregates( """ assert isinstance(start_year, (int, np.integer)) assert isinstance(num_years_to_plot, int) + dims = base_tpi[var_list[0]].shape[1] + if ind_names_list: + assert len(ind_names_list) == dims + else: + ind_names_list = [str(i) for i in range(dims)] # Make sure both runs cover same time period if reform_tpi: assert base_params.start_year == reform_params.start_year @@ -217,7 +223,11 @@ def plot_industry_aggregates( assert reform_tpi is not None fig1, ax1 = plt.subplots() for i, v in enumerate(var_list): - for m in range(base_params.M): + if len(var_list) == 1: + var_label = "" + else: + var_label = VAR_LABELS[v] + for m in range(dims): if plot_type == "pct_diff": plot_var = ( reform_tpi[v][:, m] - base_tpi[v][:, m] @@ -226,7 +236,7 @@ def plot_industry_aggregates( plt.plot( year_vec, plot_var[start_index : start_index + num_years_to_plot], - label=VAR_LABELS[v] + "for industry " + str(m), + label=var_label + " " + ind_names_list[m], ) elif plot_type == "diff": plot_var = reform_tpi[v][:, m] - base_tpi[v][:, m] @@ -234,7 +244,7 @@ def plot_industry_aggregates( plt.plot( year_vec, plot_var[start_index : start_index + num_years_to_plot], - label=VAR_LABELS[v] + "for industry " + str(m), + label=var_label + " " + ind_names_list[m], ) elif plot_type == "levels": plt.plot( @@ -242,10 +252,7 @@ def plot_industry_aggregates( base_tpi[v][ start_index : start_index + num_years_to_plot, m ], - label="Baseline " - + VAR_LABELS[v] - + "for industry " - + str(m), + label="Baseline " + var_label + " " + ind_names_list[m], ) if reform_tpi: plt.plot( @@ -253,10 +260,7 @@ def plot_industry_aggregates( reform_tpi[v][ start_index : start_index + num_years_to_plot, m ], - label="Reform " - + VAR_LABELS[v] - + "for industry " - + str(m), + label="Reform " + var_label + " " + ind_names_list[m], ) ylabel = r"Model Units" elif plot_type == "forecast": @@ -267,10 +271,7 @@ def plot_industry_aggregates( plt.plot( year_vec, plot_var_base, - label="Baseline " - + VAR_LABELS[v] - + "for industry " - + str(m), + label="Baseline " + var_label + " " + ind_names_list[m], ) # Plot change from baseline forecast pct_change = ( @@ -287,7 +288,7 @@ def plot_industry_aggregates( plt.plot( year_vec, plot_var_reform, - label="Reform " + VAR_LABELS[v] + "for industry " + str(m), + label="Reform " + var_label + " " + ind_names_list[m], ) # making units labels will not work if multiple variables # and they are in different units @@ -316,7 +317,7 @@ def plot_industry_aggregates( plt.legend(loc=9, bbox_to_anchor=(0.5, -0.15), ncol=2) if path: fig_path1 = os.path.join(path) - plt.savefig(fig_path1, bbox_inches="tight") + plt.savefig(fig_path1, bbox_inches="tight", dpi=300) else: return fig1 plt.close() @@ -380,7 +381,7 @@ def ss_3Dplot( if plot_title: plt.title(plot_title) if path: - plt.savefig(path) + plt.savefig(path, dpi=300) else: return plt @@ -502,7 +503,7 @@ def plot_gdp_ratio( plt.legend(loc=9, bbox_to_anchor=(0.5, -0.15), ncol=2) if path: fig_path1 = os.path.join(path) - plt.savefig(fig_path1, bbox_inches="tight") + plt.savefig(fig_path1, bbox_inches="tight", dpi=300) else: return fig1 plt.close() @@ -576,7 +577,7 @@ def ability_bar( plt.title(plot_title, fontsize=15) if path: fig_path1 = os.path.join(path) - plt.savefig(fig_path1, bbox_inches="tight") + plt.savefig(fig_path1, bbox_inches="tight", dpi=300) else: return fig plt.close() @@ -630,7 +631,7 @@ def ability_bar_ss( plt.legend(loc=9, bbox_to_anchor=(0.5, -0.15), ncol=2) if path: fig_path1 = os.path.join(path) - plt.savefig(fig_path1, bbox_inches="tight") + plt.savefig(fig_path1, bbox_inches="tight", dpi=300) else: return fig plt.close() @@ -717,7 +718,7 @@ def tpi_profiles( plt.title(plot_title, fontsize=15) if path: fig_path1 = os.path.join(path) - plt.savefig(fig_path1, bbox_inches="tight") + plt.savefig(fig_path1, bbox_inches="tight", dpi=300) else: return fig1 plt.close() @@ -797,7 +798,7 @@ def ss_profiles( plt.title(plot_title, fontsize=15) if path: fig_path1 = os.path.join(path) - plt.savefig(fig_path1, bbox_inches="tight") + plt.savefig(fig_path1, bbox_inches="tight", dpi=300) else: return fig1 plt.close() @@ -1166,7 +1167,7 @@ def inequality_plot( plt.legend(loc=9, bbox_to_anchor=(0.5, -0.15), ncol=2) if path: fig_path1 = os.path.join(path) - plt.savefig(fig_path1, bbox_inches="tight") + plt.savefig(fig_path1, bbox_inches="tight", dpi=300) else: return fig1 plt.close() diff --git a/ogcore/parameter_plots.py b/ogcore/parameter_plots.py index 86e61d7e6..a2b054bf0 100644 --- a/ogcore/parameter_plots.py +++ b/ogcore/parameter_plots.py @@ -37,7 +37,7 @@ def plot_imm_rates(p, year=DEFAULT_START_YEAR, include_title=False, path=None): return fig else: fig_path = os.path.join(path, "imm_rates_orig") - plt.savefig(fig_path) + plt.savefig(fig_path, dpi=300) def plot_mort_rates(p, include_title=False, path=None): @@ -66,7 +66,7 @@ def plot_mort_rates(p, include_title=False, path=None): return fig else: fig_path = os.path.join(path, "mortality_rates") - plt.savefig(fig_path) + plt.savefig(fig_path, dpi=300) def plot_pop_growth( @@ -106,7 +106,7 @@ def plot_pop_growth( return fig else: fig_path = os.path.join(path, "pop_growth_rates") - plt.savefig(fig_path) + plt.savefig(fig_path, dpi=300) def plot_population(p, years_to_plot=["SS"], include_title=False, path=None): @@ -145,7 +145,7 @@ def plot_population(p, years_to_plot=["SS"], include_title=False, path=None): return fig else: fig_path = os.path.join(path, "pop_distribution") - plt.savefig(fig_path) + plt.savefig(fig_path, dpi=300) def plot_ability_profiles(p, include_title=False, path=None): @@ -233,7 +233,7 @@ def plot_elliptical_u(p, plot_MU=True, include_title=False, path=None): return fig else: fig_path = os.path.join(path, "ellipse_v_CFE") - plt.savefig(fig_path) + plt.savefig(fig_path, dpi=300) def plot_chi_n(p, include_title=False, path=None): @@ -259,7 +259,7 @@ def plot_chi_n(p, include_title=False, path=None): return fig else: fig_path = os.path.join(path, "chi_n_values") - plt.savefig(fig_path) + plt.savefig(fig_path, dpi=300) def plot_fert_rates( @@ -332,7 +332,7 @@ def plot_fert_rates( # Save or return figure if output_dir: output_path = os.path.join(output_dir, "fert_rates") - plt.savefig(output_path) + plt.savefig(output_path, dpi=300) plt.close() else: return fig @@ -394,7 +394,7 @@ def plot_mort_rates_data( np.hstack([infmort_rate, mort_rates_all[min_yr - 1 : max_yr]]), ) plt.axvline(x=max_yr, color="red", linestyle="-", linewidth=1) - plt.grid(b=True, which="major", color="0.65", linestyle="-") + plt.grid(visible=True, which="major", color="0.65", linestyle="-") # plt.title('Fitted mortality rate function by age ($rho_{s}$)', # fontsize=20) plt.xlabel(r"Age $s$") @@ -411,7 +411,7 @@ def plot_mort_rates_data( # Save or return figure if output_dir: output_path = os.path.join(output_dir, "mort_rates") - plt.savefig(output_path) + plt.savefig(output_path, dpi=300) plt.close() else: return fig @@ -455,7 +455,7 @@ def plot_omega_fixed( # Save or return figure if output_dir: output_path = os.path.join(output_dir, "OrigVsFixSSpop") - plt.savefig(output_path) + plt.savefig(output_path, dpi=300) plt.close() else: return fig @@ -496,7 +496,7 @@ def plot_imm_fixed( # Save or return figure if output_dir: output_path = os.path.join(output_dir, "OrigVsAdjImm") - plt.savefig(output_path) + plt.savefig(output_path, dpi=300) plt.close() else: return fig @@ -562,7 +562,7 @@ def plot_population_path( # Save or return figure if output_dir: output_path = os.path.join(output_dir, "PopDistPath") - plt.savefig(output_path) + plt.savefig(output_path, dpi=300) plt.close() else: return fig @@ -853,7 +853,7 @@ def txfunc_sse_plot(age_vec, sse_mat, start_year, varstr, output_dir, round): plt.ylabel(r"SSE") graphname = "SSE_" + varstr + "_Round" + str(round) output_path = os.path.join(output_dir, graphname) - plt.savefig(output_path, bbox_inches="tight") + plt.savefig(output_path, bbox_inches="tight", dpi=300) plt.close() @@ -889,7 +889,7 @@ def plot_income_data( plt.plot(ages, emat) filename = "ability_2D_lev" + filesuffix fullpath = os.path.join(output_dir, filename) - plt.savefig(fullpath) + plt.savefig(fullpath, dpi=300) plt.close() # Plot of 2D, J=1 in logs @@ -897,7 +897,7 @@ def plot_income_data( plt.plot(ages, np.log(emat)) filename = "ability_2D_log" + filesuffix fullpath = os.path.join(output_dir, filename) - plt.savefig(fullpath) + plt.savefig(fullpath, dpi=300) plt.close() else: # Plot of 3D, J>1 in levels @@ -910,7 +910,7 @@ def plot_income_data( ax10.set_zlabel(r"ability $e_{j,s}$") filename = "ability_3D_lev" + filesuffix fullpath = os.path.join(output_dir, filename) - plt.savefig(fullpath) + plt.savefig(fullpath, dpi=300) plt.close() # Plot of 3D, J>1 in logs @@ -928,7 +928,7 @@ def plot_income_data( ax11.set_zlabel(r"log ability $log(e_{j,s})$") filename = "ability_3D_log" + filesuffix fullpath = os.path.join(output_dir, filename) - plt.savefig(fullpath) + plt.savefig(fullpath, dpi=300) plt.close() if J <= 10: # Restricted because of line and marker types @@ -976,7 +976,7 @@ def plot_income_data( ax.set_ylabel(r"log ability $log(e_{j,s})$") filename = "ability_2D_log" + filesuffix fullpath = os.path.join(output_dir, filename) - plt.savefig(fullpath) + plt.savefig(fullpath, dpi=300) plt.close() else: if J <= 10: # Restricted because of line and marker types @@ -1086,7 +1086,7 @@ def plot_2D_taxfunc( if len(tax_func_type) < len(tax_param_list): tax_func_type = [tax_func_type[0]] * len(tax_param_list) for i, v in enumerate(tax_func_type): - assert v in ["DEP", "DEP_totalinc", "GS", "linear"] + assert v in ["DEP", "DEP_totalinc", "GS", "linear", "mono"] assert rate_type in ["etr", "mtrx", "mtry"] assert len(tax_param_list) == len(labels) @@ -1118,13 +1118,8 @@ def plot_2D_taxfunc( # get tax rates for each point in the income support and plot fig, ax = plt.subplots() for i, tax_params in enumerate(tax_param_list): - if tax_func_type[i] == "mono": - tax_params = tax_params[rate_key][s][t][0] - else: - tax_param_array = np.array(tax_params[rate_key]) - tax_params = tax_param_array[s, t, :] + tax_params = tax_params[rate_key][s][t] rates = txfunc.get_tax_rates( - # tax_params[rate_key][s, t, :], tax_params, X, Y, @@ -1189,4 +1184,4 @@ def wm(x): if path is None: return fig else: - plt.savefig(path) + plt.savefig(path, dpi=300) diff --git a/ogcore/parameters.py b/ogcore/parameters.py index 3354d336e..5a4da6596 100644 --- a/ogcore/parameters.py +++ b/ogcore/parameters.py @@ -91,7 +91,7 @@ def compute_default_params(self): ) # determine length of budget window from individual income tax # parameters passed in - self.BW = self.etr_params.shape[0] + self.BW = len(self.etr_params) # Find number of economically active periods of life self.E = int( self.starting_age @@ -143,6 +143,8 @@ def compute_default_params(self): "replacement_rate_adjust", "zeta_D", "zeta_K", + "r_gov_scale", + "r_gov_shift", ] for item in tp_param_list: this_attr = getattr(self, item) @@ -353,35 +355,45 @@ def compute_default_params(self): ] for item in tax_params_to_TP: tax_to_set = getattr(self, item) - if tax_to_set.size == 1: + try: + tax_to_set = ( + tax_to_set.tolist() + ) # in case parameters are numpy arrays + except AttributeError: # catches if they are lists already + pass + if len(tax_to_set) == 1 and isinstance(tax_to_set[0], float): setattr( self, item, - np.ones((self.T + self.S, self.S, self.J)) * tax_to_set, + [ + [[tax_to_set] for i in range(self.S)] + for t in range(self.T) + ], ) - elif tax_to_set.ndim == 3: - if tax_to_set.shape[0] > self.T + self.S: - tax_to_set = tax_to_set[: self.T + self.S, :, :] - if tax_to_set.shape[0] < self.T + self.S: - tax_to_set = np.append( - tax_to_set[:, :, :], - np.tile( - tax_to_set[-1, :, :], - (self.T + self.S - tax_to_set.shape[0], 1, 1), - ), - axis=0, + elif any( + [ + isinstance(tax_to_set[i][j], list) + for i, v in enumerate(tax_to_set) + for j, vv in enumerate(tax_to_set[i]) + ] + ): + if len(tax_to_set) > self.T + self.S: + tax_to_set = tax_to_set[: self.T + self.S] + if len(tax_to_set) < self.T + self.S: + tax_params_to_add = [tax_to_set[-1]] * ( + self.T + self.S - len(tax_to_set) ) - if tax_to_set.shape[1] > self.S: - tax_to_set = tax_to_set[:, : self.S, :] - if item == "tau_c": - if tax_to_set.shape[2] > self.J: - tax_to_set = tax_to_set[:, :, : self.J] + tax_to_set.extend(tax_params_to_add) + if len(tax_to_set[0]) > self.S: + for t, v in enumerate(tax_to_set): + tax_to_set[t] = tax_to_set[t][: self.S] setattr(self, item, tax_to_set) else: print( "please give a " + item - + " that is a single element or 3-D array" + + " that is a single element or nested lists of" + + " lists that is three lists deep" ) assert False @@ -619,7 +631,31 @@ def update_specifications(self, revision, raise_errors=True): """ if not (isinstance(revision, dict) or isinstance(revision, str)): raise ValueError("ERROR: revision is not a dictionary or string") + # Skip over the adjust method if the tax paraemeters passed in + # are fucntions (e.g., in the case of tax_func_type = mono) + tax_update_dict = {} + tax_func_params_functions = False + try: + if revision["tax_func_type"] in ["mono", "mono2D"]: + tax_func_params_functions = True + except (KeyError, TypeError): + pass + if self.tax_func_type in ["mono", "mono2D"]: + tax_func_params_functions = True + if tax_func_params_functions: + try: + for item in ["etr_params", "mtrx_params", "mtry_params"]: + if item in revision.keys(): + tax_update_dict[item] = revision[item] + del revision[item] + except (KeyError, TypeError): + pass self.adjust(revision, raise_errors=raise_errors) + # put tax values skipped over in the adjust method back in so + # they are in the parameters class. + if tax_update_dict != {}: + for key, value in tax_update_dict.items(): + setattr(self, key, value) self.compute_default_params() diff --git a/ogcore/tax.py b/ogcore/tax.py index 35e748905..1f0c0ed08 100644 --- a/ogcore/tax.py +++ b/ogcore/tax.py @@ -143,7 +143,7 @@ def ETR_income( factor (scalar): scaling factor converting model units to dollars e (Numpy array): effective labor units - etr_params (array_like or function): effective tax rate function + etr_params (list): list of effective tax rate function parameters or nonparametric function labor_noncompliance_rate (Numpy array): income tax noncompliance rate for labor income capital_noncompliance_rate (Numpy array): income tax noncompliance rate for capital income @@ -192,9 +192,9 @@ def MTR_income( mtr_capital (bool): whether to compute the marginal tax rate on capital income or labor income e (Numpy array): effective labor units - etr_params (array_like or function): effective tax rate function + etr_params (list): list of effective tax rate function parameters or nonparametric function - mtr_params (array_like or function): marginal tax rate function + mtr_params (list): list of marginal tax rate function parameters or nonparametric function noncompliance_rate (Numpy array): income tax noncompliance rate p (OG-Core Specifications object): model parameters @@ -329,7 +329,7 @@ def net_taxes( method (str): adjusts calculation dimensions based on 'SS' or 'TPI' e (Numpy array): effective labor units - etr_params (Numpy array): effective tax rate function parameters + etr_params (list): list of effective tax rate function parameters p (OG-Core Specifications object): model parameters Returns: @@ -362,7 +362,7 @@ def income_tax_liab(r, w, b, n, factor, t, j, method, e, etr_params, p): method (str): adjusts calculation dimensions based on 'SS' or 'TPI' e (Numpy array): effective labor units - etr_params (Numpy array): effective tax rate function parameters + etr_params (list): effective tax rate function parameters p (OG-Core Specifications object): model parameters Returns: diff --git a/ogcore/txfunc.py b/ogcore/txfunc.py index 78804935a..4234d9a61 100644 --- a/ogcore/txfunc.py +++ b/ogcore/txfunc.py @@ -62,7 +62,7 @@ def get_tax_rates( functions. Args: - params (array_like or function): parameters of the tax function, or + params (list): list of parameters of the tax function, or nonparametric function for tax function type "mono" X (array_like): labor income data Y (array_like): capital income data @@ -83,6 +83,10 @@ def get_tax_rates( X2 = X**2 Y2 = Y**2 income = X + Y + if tax_func_type != "mono": + params = np.array( + params + ) # easier to use arrays for calculations below, except when can't (bc lists of functions) if tax_func_type == "GS": phi0, phi1, phi2 = ( np.squeeze(params[..., 0]), @@ -252,11 +256,60 @@ def get_tax_rates( rate = np.squeeze(params[..., 0]) txrates = rate * np.ones_like(income) elif tax_func_type == "mono": - mono_interp = params[0] - txrates = mono_interp(income) + if for_estimation: + mono_interp = params[0] + txrates = mono_interp(income) + else: + if np.isscalar(income): + txrates = params[0](income) + elif income.ndim == 1: + # for s in range(income.shape[0]): + # txrates[s] = params[s][0](income[s]) + if (income.shape[0] == len(params)) and ( + len(params) > 1 + ): # for case where loops over S + txrates = [ + params[s][0](income[s]) for s in range(income.shape[0]) + ] + else: + txrates = [ + params[0](income[i]) for i in range(income.shape[0]) + ] + elif ( + income.ndim == 2 + ): # I think only calls here are for loops over S and J + # for s in range(income.shape[0]): + # for j in range(income.shape[1]): + # txrates[s, j] = params[s][j][0](income[s, j]) + txrates = [ + [ + params[s][j][0](income[s, j]) + for j in range(income.shape[1]) + ] + for s in range(income.shape[0]) + ] + else: # to catch 3D arrays, looping over T, S, J + # for t in range(income.shape[0]): + # for s in range(income.shape[1]): + # for j in range(income.shape[2]): + # txrates[t, s, j] = params[t][s][j][0]( + # income[t, s, j] + # ) + txrates = [ + [ + [ + params[t][s][j][0](income[t, s, j]) + for j in range(income.shape[2]) + ] + for s in range(income.shape[1]) + ] + for t in range(income.shape[0]) + ] + txrates = np.array(txrates) elif tax_func_type == "mono2D": mono_interp = params[0] txrates = mono_interp([[X, Y]]) + return txrates @@ -692,7 +745,7 @@ def txfunc_est( income, txrates, wgts, bins=bin_num ) wsse = wsse_cstr - params = mono_interp + params = [mono_interp] params_to_plot = params elif tax_func_type == "mono2D": obs = df.shape[0] @@ -1648,11 +1701,10 @@ def tax_func_estimate( ) if tax_func_path: - try: - with open(tax_func_path, "wb") as f: + with open(tax_func_path, "wb") as f: + try: pickle.dump(dict_params, f) - except AttributeError: - with open(tax_func_path, "wb") as f: + except AttributeError: cloudpickle.dump(dict_params, f) return dict_params @@ -1733,6 +1785,7 @@ def monotone_spline( plot_start/plot_end (number between 0, 100): for 'pygam' only if show_plot = True, start and end for percentile of data used in plot, can result in better visualizations if original data has strong outliers + Returns: xNew (numpy array): 2d with second dimension m, first @@ -1744,6 +1797,7 @@ def monotone_spline( weightsNew (numpy array): 1d with length same as yNew, weight corresponding to each xNew, yNew row """ + if method == "pygam": if len(x.shape) == 1: @@ -1883,14 +1937,10 @@ def interp(x): # create binned and weighted x and y data if bins: if not np.isscalar(bins): - err_msg = ( - "monotone_spline2 ERROR: bins value is not type scalar" - ) + err_msg = "monotone_spline2 ERROR: bins value is not type scalar" raise ValueError(err_msg) - N = bins - x_binned, y_binned, weights_binned = utils.avg_by_bin( - x, y, weights, bins - ) + N = int(bins) + x_binned, y_binned, weights_binned = utils.avg_by_bin(x, y, weights, N) elif not bins: N = len(x) diff --git a/ogcore/utils.py b/ogcore/utils.py index 7be02a64e..85fe21147 100644 --- a/ogcore/utils.py +++ b/ogcore/utils.py @@ -158,7 +158,6 @@ def comp_array(name, a, b, tol, unequal, exceptions={}, relative=False): return False else: - if np.all(a < EPSILON) and np.all(b < EPSILON): return True diff --git a/setup.py b/setup.py index 361176a3d..9e5e104c7 100755 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ setuptools.setup( name="ogcore", - version="0.10.2", + version="0.10.7", author="Jason DeBacker and Richard W. Evans", license="CC0 1.0 Universal (CC0 1.0) Public Domain Dedication", description="A general equilibribum overlapping generations model for fiscal policy analysis", diff --git a/tests/test_TPI.py b/tests/test_TPI.py index 19adba0a9..2fdfee269 100644 --- a/tests/test_TPI.py +++ b/tests/test_TPI.py @@ -568,6 +568,33 @@ def test_run_TPI(baseline, param_updates, filename, tmpdir, dask_client): filename8 = os.path.join( CUR_PATH, "test_io_data", "run_TPI_outputs_baseline_Kg_nonzero_2.pkl" ) +# read in mono tax funcs (not age specific) +dict_params = utils.safe_read_pickle( + os.path.join(CUR_PATH, "test_io_data", "TxFuncEst_mono_nonage.pkl") +) +p = Specifications() +etr_params = [[None] * p.S] * p.T +mtrx_params = [[None] * p.S] * p.T +mtry_params = [[None] * p.S] * p.T +for s in range(p.S): + for t in range(p.T): + if t < p.BW: + etr_params[t][s] = dict_params["tfunc_etr_params_S"][t][s] + mtrx_params[t][s] = dict_params["tfunc_mtrx_params_S"][t][s] + mtry_params[t][s] = dict_params["tfunc_mtry_params_S"][t][s] + else: + etr_params[t][s] = dict_params["tfunc_etr_params_S"][-1][s] + mtrx_params[t][s] = dict_params["tfunc_mtrx_params_S"][-1][s] + mtry_params[t][s] = dict_params["tfunc_mtry_params_S"][-1][s] +param_updates9 = { + "tax_func_type": "mono", + "etr_params": etr_params, + "mtrx_params": mtrx_params, + "mtry_params": mtry_params, +} +filename9 = os.path.join( + CUR_PATH, "test_io_data", "run_TPI_outputs_mono_2.pkl" +) @pytest.mark.local @@ -581,6 +608,7 @@ def test_run_TPI(baseline, param_updates, filename, tmpdir, dask_client): (True, {}, filename1), (False, param_updates4, filename4), (True, param_updates8, filename8), + (True, param_updates9, filename9), ], ids=[ "Baseline, balanced budget", @@ -590,6 +618,7 @@ def test_run_TPI(baseline, param_updates, filename, tmpdir, dask_client): "Baseline", "Reform, baseline spending", "Baseline, Kg>0", + "mono tax functions", ], ) def test_run_TPI_extra(baseline, param_updates, filename, tmpdir, dask_client): diff --git a/tests/test_basic.py b/tests/test_basic.py index b419d4b57..e3a18227b 100644 --- a/tests/test_basic.py +++ b/tests/test_basic.py @@ -66,9 +66,8 @@ def test_constant_demographics_TPI(tmpdir, dask_client): og_spec = { "constant_demographics": True, "budget_balance": True, - "zero_taxes": True, "maxiter": 2, - "r_gov_shift": 0.0, + "r_gov_shift": [0.0], "zeta_D": [0.0, 0.0], "zeta_K": [0.0, 0.0], "debt_ratio_ss": 1.0, @@ -84,14 +83,14 @@ def test_constant_demographics_TPI(tmpdir, dask_client): } spec.update_specifications(og_spec) spec.etr_params = np.zeros( - (spec.T + spec.S, spec.S, spec.etr_params.shape[2]) - ) + (spec.T + spec.S, spec.S, len(spec.etr_params[0][0])) + ).tolist() spec.mtrx_params = np.zeros( - (spec.T + spec.S, spec.S, spec.mtrx_params.shape[2]) - ) + (spec.T + spec.S, spec.S, len(spec.mtrx_params[0][0])) + ).tolist() spec.mtry_params = np.zeros( - (spec.T + spec.S, spec.S, spec.mtry_params.shape[2]) - ) + (spec.T + spec.S, spec.S, len(spec.mtry_params[0][0])) + ).tolist() # Run SS ss_outputs = SS.run_SS(spec, client=dask_client) # save SS results @@ -123,9 +122,8 @@ def test_constant_demographics_TPI_small_open(tmpdir, dask_client): og_spec = { "constant_demographics": True, "budget_balance": True, - "zero_taxes": True, "maxiter": 2, - "r_gov_shift": 0.0, + "r_gov_shift": [0.0], "zeta_D": [0.0, 0.0], "zeta_K": [1.0], "debt_ratio_ss": 1.0, @@ -141,14 +139,14 @@ def test_constant_demographics_TPI_small_open(tmpdir, dask_client): } spec.update_specifications(og_spec) spec.etr_params = np.zeros( - (spec.T + spec.S, spec.S, spec.etr_params.shape[2]) - ) + (spec.T + spec.S, spec.S, len(spec.etr_params[0][0])) + ).tolist() spec.mtrx_params = np.zeros( - (spec.T + spec.S, spec.S, spec.mtrx_params.shape[2]) - ) + (spec.T + spec.S, spec.S, len(spec.mtrx_params[0][0])) + ).tolist() spec.mtry_params = np.zeros( - (spec.T + spec.S, spec.S, spec.mtry_params.shape[2]) - ) + (spec.T + spec.S, spec.S, len(spec.mtry_params[0][0])) + ).tolist() # Run SS ss_outputs = SS.run_SS(spec, client=dask_client) # save SS results diff --git a/tests/test_fiscal.py b/tests/test_fiscal.py index e1c4c43f6..8e203250e 100644 --- a/tests/test_fiscal.py +++ b/tests/test_fiscal.py @@ -244,27 +244,35 @@ def test_get_TR( p1 = Specifications() -p1.r_gov_scale = 0.5 -p1.r_gov_shift = 0.0 +p1.r_gov_scale = [0.5] +p1.r_gov_shift = [0.0] p2 = Specifications() -p2.r_gov_scale = 0.5 -p2.r_gov_shift = 0.01 +p2.r_gov_scale = [0.5] +p2.r_gov_shift = [0.01] p3 = Specifications() -p3.r_gov_scale = 0.5 -p3.r_gov_shift = 0.03 +p3.r_gov_scale = [0.5] +p3.r_gov_shift = [0.03] r = 0.04 r_gov1 = 0.02 r_gov2 = 0.01 r_gov3 = 0.0 +r_tpi = np.ones(320) * r +r_gov_tpi = np.ones(320) * r_gov3 +p4 = p3.update_specifications({"r_gov_scale": [0.5], "r_gov_shift": [0.03]}) @pytest.mark.parametrize( - "r,p,r_gov_expected", - [(r, p1, r_gov1), (r, p2, r_gov2), (r, p3, r_gov3)], - ids=["Scale only", "Scale and shift", "r_gov < 0"], + "r,p,method,r_gov_expected", + [ + (r, p1, "SS", r_gov1), + (r, p2, "SS", r_gov2), + (r, p3, "SS", r_gov3), + (r, p3, "TPI", r_gov3), + ], + ids=["Scale only", "Scale and shift", "r_gov < 0", "TPI"], ) -def test_get_r_gov(r, p, r_gov_expected): - r_gov = fiscal.get_r_gov(r, p) +def test_get_r_gov(r, p, method, r_gov_expected): + r_gov = fiscal.get_r_gov(r, p, method) assert np.allclose(r_gov, r_gov_expected) diff --git a/tests/test_household.py b/tests/test_household.py index f237b79c9..4256c038d 100644 --- a/tests/test_household.py +++ b/tests/test_household.py @@ -994,7 +994,6 @@ def test_get_y(): "bssmat,nssmat,cssmat,ltilde", test_data, ids=["passing", "failing"] ) def test_constraint_checker_SS(bssmat, nssmat, cssmat, ltilde): - household.constraint_checker_SS(bssmat, nssmat, cssmat, ltilde) assert True @@ -1003,7 +1002,6 @@ def test_constraint_checker_SS(bssmat, nssmat, cssmat, ltilde): "bssmat,nssmat,cssmat,ltilde", test_data, ids=["passing", "failing"] ) def test_constraint_checker_TPI(bssmat, nssmat, cssmat, ltilde): - household.constraint_checker_TPI(bssmat, nssmat, cssmat, 10, ltilde) assert True diff --git a/tests/test_io_data/TxFuncEst_mono_nonage.pkl b/tests/test_io_data/TxFuncEst_mono_nonage.pkl new file mode 100644 index 000000000..321115143 Binary files /dev/null and b/tests/test_io_data/TxFuncEst_mono_nonage.pkl differ diff --git a/tests/test_io_data/run_TPI_outputs_mono_2.pkl b/tests/test_io_data/run_TPI_outputs_mono_2.pkl new file mode 100644 index 000000000..8867a493f Binary files /dev/null and b/tests/test_io_data/run_TPI_outputs_mono_2.pkl differ diff --git a/tests/test_parameter_plots.py b/tests/test_parameter_plots.py index 243a24aa5..26a3c9d70 100644 --- a/tests/test_parameter_plots.py +++ b/tests/test_parameter_plots.py @@ -22,6 +22,9 @@ GS_nonage_spec_taxfunctions = utils.safe_read_pickle( os.path.join(CUR_PATH, "test_io_data", "TxFuncEst_GS_nonage.pkl") ) +mono_nonage_spec_taxfunctions = utils.safe_read_pickle( + os.path.join(CUR_PATH, "test_io_data", "TxFuncEst_mono_nonage.pkl") +) micro_data = utils.safe_read_pickle( os.path.join(CUR_PATH, "test_io_data", "micro_data_dict_for_tests.pkl") ) @@ -384,6 +387,7 @@ def test_plot_income_data_save_fig(tmpdir): (base_taxfunctions, 43, "DEP", "etr", True, [micro_data], None), (base_taxfunctions, 43, "DEP", "mtry", True, [micro_data], None), (base_taxfunctions, 43, "DEP", "mtrx", True, [micro_data], None), + (mono_nonage_spec_taxfunctions, None, "mono", "etr", True, None, None), ], ids=[ "over_labinc=True", @@ -392,6 +396,7 @@ def test_plot_income_data_save_fig(tmpdir): "with data", "MTR capital income", "MTR labor income", + "Mono functions", ], ) def test_plot_2D_taxfunc( diff --git a/tests/test_utils.py b/tests/test_utils.py index 4bbf61f0d..354385c5b 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -702,7 +702,6 @@ def test_not_connected_true(): "df,output_type,precision", test_data, ids=["tex", "json", "html"] ) def test_save_return_table(df, output_type, precision): - test_str = utils.save_return_table(df, output_type, None, precision) assert isinstance(test_str, str)