diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..d2f626c --- /dev/null +++ b/.travis.yml @@ -0,0 +1,22 @@ +os: + - linux + - osx +dist: trusty +sudo: false +before_install: +- if [ "$TRAVIS_OS_NAME" = "linux" ]; then wget https://repo.continuum.io/miniconda/Miniconda2-latest-Linux-x86_64.sh -O miniconda.sh; fi +- export PATH="$HOME/miniconda/bin:$PATH" +- bash miniconda.sh -b -p $HOME/miniconda +- conda config --set always_yes yes --set changeps1 no +- conda update -y -q conda +script: +- conda install -c uvcdat/label/nightly -c conda-forge -c uvcdat genutil nose image-compare flake8 matplotlib +- export UVCDAT_ANONYMOUS_LOG=False +- echo $TRAVIS_BRANCH +- export TRAVIS_PR_BRANCH=$TRAVIS_BRANCH +- echo $TRAVIS_EVENT_TYPE +- echo $TRAVIS_PULL_REQUEST +- python setup.py install +- python run_tests.py -v2 -n2 +after_success: + - if [ "$TRAVIS_BRANCH" == "master" -a "$TRAVIS_PULL_REQUEST" == "false" ]; then conda install conda-build && conda install anaconda-client && bash ci-support/conda_upload.sh ; fi diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..7f5c363 --- /dev/null +++ b/LICENSE @@ -0,0 +1,13 @@ +BSD 3-clause license +Copyright (c) 2015-2017, conda-forge +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/conda/meta.yaml b/conda/meta.yaml new file mode 100644 index 0000000..e2d6e54 --- /dev/null +++ b/conda/meta.yaml @@ -0,0 +1,43 @@ +{% set name = "ensometrics" %} +{% set version = "0.1" %} + +package: + name: {{ name|lower }} + version: {{ version }} + +source: + git_url: https://github.com/eguil/Enso_metrics.git + git_rev: master + + +build: + number: 0 + skip: True # [win or py3k] + script: python setup.py install + +requirements: + build: + - python + - setuptools + run: + - python + - cdms2 + - numpy + - udunits2 + - genutil + +test: + command: + - export UVCDAT_ANONYMOUS_LOG=false && python run_tests.py -v2 + +about: + home: http://github.com/eguil/Enso_metrics + license: 'CCLRC' + license_file: LICENSE + summary: "Library to compute ENSO metrics" + +extra: + recipe-maintainers: + - doutriaux1 + - eguil + - lee1043 diff --git a/lib/EnsoMetricsGraph.py b/lib/EnsoMetricsGraph.py index 1cd3a7f..6b897b9 100644 --- a/lib/EnsoMetricsGraph.py +++ b/lib/EnsoMetricsGraph.py @@ -5,9 +5,9 @@ import matplotlib.pyplot as plt -def EnsoMetricsTable(models,metrics, figName): +def EnsoMetricsTable(models, metrics, figName): - fig=plt.figure() + fig = plt.figure() ax = fig.add_subplot(111) ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) @@ -15,29 +15,30 @@ def EnsoMetricsTable(models,metrics, figName): colLabels = ("ENSO Amplitude", r'$\mu$') rowLabels = models # make the table - the_table = ax.table(cellText=metrics,colWidths=[.2,.2], - colLabels=colLabels,rowLabels=rowLabels,colLoc='center',loc='center') + the_table = ax.table(cellText=metrics, colWidths=[.2, .2], + colLabels=colLabels, rowLabels=rowLabels, colLoc='center', loc='center') # height of rows table_props = the_table.properties() table_cells = table_props['child_artists'] - for cell in table_cells: cell.set_height(0.08) + for cell in table_cells: + cell.set_height(0.08) # size of fonts the_table.set_fontsize(12) - #the_table.scale(1.5, 1.5) - plt.tight_layout(rect=[0.05,0.15,0.95,.95]) + # the_table.scale(1.5, 1.5) + plt.tight_layout(rect=[0.05, 0.15, 0.95, .95]) plt.show() - #plt.savefig(figName+'.jpeg') + # plt.savefig(figName+'.jpeg') - #colLabels=("Model", "ENSO Amplitude", "Mu") - #nrows, ncols = len(clust_data)+1, len(colLables) - #hcell, wcell = 0.3, 1. - #hpad, wpad = 0, 0 - #fig=plt.figure(figsize=(ncols*wcell+wpad, nrows*hcell+hpad)) - #ax = fig.add_subplot(111) - #ax.axis('off') - ##do the table - #the_table = ax.table(cellText=clust_data, + # colLabels=("Model", "ENSO Amplitude", "Mu") + # nrows, ncols = len(clust_data)+1, len(colLables) + # hcell, wcell = 0.3, 1. + # hpad, wpad = 0, 0 + # fig=plt.figure(figsize=(ncols*wcell+wpad, nrows*hcell+hpad)) + # ax = fig.add_subplot(111) + # ax.axis('off') + # do the table + # the_table = ax.table(cellText=clust_data, # colLabels=colLabels, # loc='center') - #plt.savefig("table.png") \ No newline at end of file + # plt.savefig("table.png") diff --git a/lib/EnsoMetricsLib.py b/lib/EnsoMetricsLib.py index 32e89b1..c12a107 100644 --- a/lib/EnsoMetricsLib.py +++ b/lib/EnsoMetricsLib.py @@ -1,21 +1,22 @@ -import os import cdms2 as cdm import numpy as npy import cdutil as cdu -from genutil import statistics -from cdms2.selectors import Selector -from monthly_variability_statistics import interannual_variabilty_std_annual_cycle_removed, get_slope_linear_regression_from_anomaly +from monthly_variability_statistics import interannual_variabilty_std_annual_cycle_removed +from monthly_variability_statistics import get_slope_linear_regression_from_anomaly import MV2 as mv # # Libray to compute ENSO metrics # These procedures have file names as inputs and metric as output # -def Ensoampln3 (sstfile, sstname): - ensoampl = Ensoampl(sstfile,sstname, nino3) + + +def Ensoampln3(sstfile, sstname): + ensoampl = Ensoampl(sstfile, sstname, nino3) # noqa ??? We need to pass nino3 return ensoampl -def EnsoAmpl (sstfile, sstname, ninobox): + +def EnsoAmpl(sstfile, sstname, ninobox): ''' The EnsoAmpl() function compute the SST standard deviation in a ninobox @@ -48,21 +49,21 @@ def EnsoAmpl (sstfile, sstname, ninobox): cdm.setAutoBounds('on') # Define metric attributes - Name = 'ENSO amplitude' - Units = 'C' - Method = 'Standard deviation of SST in '+ninobox - Ref = 'Using CDAT std dev calculation' + Name = 'ENSO amplitude' + Units = 'C' + Method = 'Standard deviation of SST in ' + ninobox + Ref = 'Using CDAT std dev calculation' # Open file and get time dimension fi = cdm.open(sstfile) - ssth = fi[sstname] # Create variable handle + ssth = fi[sstname] # Create variable handle # Number of months and years timN = ssth.shape[0] yearN = timN / 12 # define ninobox - if ninobox =='nino3': - nbox = cdu.region.domain(latitude=(-5.,5.),longitude=(-150,-90)) + if ninobox == 'nino3': + nbox = cdu.region.domain(latitude=(-5., 5.), longitude=(-150, -90)) else: print '!!! ninobox not defined in EnsoAmpl', ninobox # Read SST in box @@ -73,11 +74,12 @@ def EnsoAmpl (sstfile, sstname, ninobox): sstStd = interannual_variabilty_std_annual_cycle_removed(sst) # Create output - amplMetric = {'name':Name, 'value':sstStd, 'units':Units, 'method':Method, 'nyears':yearN, 'ref':Ref} + amplMetric = {'name': Name, 'value': sstStd, 'units': Units, 'method': Method, 'nyears': yearN, 'ref': Ref} return amplMetric -def EnsoMu (sstfile, tauxfile, sstname, tauxname): + +def EnsoMu(sstfile, tauxfile, sstname, tauxname): ''' The EnsoMu() function compute the regression of nino4 tauxA over nino3 sstA @@ -110,39 +112,39 @@ def EnsoMu (sstfile, tauxfile, sstname, tauxname): cdm.setAutoBounds('on') # Define metric attributes - Name = 'Bjerknes feedback (mu)' - Units = '10-3 N/m2/C' + Name = 'Bjerknes feedback (mu)' + Units = '10-3 N/m2/C' Method = 'Regression of nino4 tauxA over nino3 sstA' - Ref = 'Using CDAT regression calculation' + Ref = 'Using CDAT regression calculation' # Open file and get time dimension fsst = cdm.open(sstfile) ftaux = cdm.open(tauxfile) - ssth = fsst[sstname] # Create variable handle + ssth = fsst[sstname] # Create variable handle # Number of months and years timN = ssth.shape[0] yearN = timN / 12 # Read SST and Taux in boxes - n3box = cdu.region.domain(latitude=(-5.,5.),longitude=(-150,-90)) + n3box = cdu.region.domain(latitude=(-5., 5.), longitude=(-150, -90)) sst = fsst(sstname, n3box) - n4box = cdu.region.domain(latitude=(-5.,5.),longitude=(160,210)) + n4box = cdu.region.domain(latitude=(-5., 5.), longitude=(160, 210)) taux = ftaux(tauxname, n4box) # Average and compute regression of interannual anomaly - muSlope = get_slope_linear_regression_from_anomaly(taux,sst, 0) # (all points) - muSlopePlus = get_slope_linear_regression_from_anomaly(taux,sst, 1) # (positive SSTA = El Nino) - muSlopeNeg = get_slope_linear_regression_from_anomaly(taux,sst, -1) # (negative SSTA = La Nina) + muSlope = get_slope_linear_regression_from_anomaly(taux, sst, 0) # (all points) + muSlopePlus = get_slope_linear_regression_from_anomaly(taux, sst, 1) # (positive SSTA = El Nino) + muSlopeNeg = get_slope_linear_regression_from_anomaly(taux, sst, -1) # (negative SSTA = La Nina) # Change units - muSlope = muSlope * 1000. + muSlope = muSlope * 1000. muSlopePlus = muSlopePlus * 1000. - muSlopeNeg = muSlopeNeg * 1000. + muSlopeNeg = muSlopeNeg * 1000. # Create output - muMetric = {'name':Name, 'value':muSlope, 'units':Units, 'method':Method, 'nyears':yearN, 'ref':Ref, \ - 'nonlinearity':muSlopeNeg-muSlopePlus} + muMetric = {'name': Name, 'value': muSlope, 'units': Units, 'method': Method, 'nyears': yearN, 'ref': Ref, + 'nonlinearity': muSlopeNeg - muSlopePlus} return muMetric @@ -153,9 +155,9 @@ def computeAnom(var1d, nYears): :param var: :return: ''' - varAC = npy.ma.ones([12], dtype='float32')*0. + varAC = npy.ma.ones([12], dtype='float32') * 0. for m in range(12): - d = var1d[m::12] # select indices for month m every 12 months - varAC[m] = mv.average(d) # average along time axis - varInter = var1d - npy.tile(varAC, nYears) # compute anomaly + d = var1d[m::12] # select indices for month m every 12 months + varAC[m] = mv.average(d) # average along time axis + varInter = var1d - npy.tile(varAC, nYears) # compute anomaly return varInter diff --git a/lib/__init_.py b/lib/__init_.py new file mode 100644 index 0000000..95275ca --- /dev/null +++ b/lib/__init_.py @@ -0,0 +1,3 @@ +from EnsoMetricsLib import Ensoampln3, EnsoAmpl, EnsoMu, computeAnom # noqa +from metricsCollectionsLib import defCollection, metricReqs # noqa +from EnsoMetricsGraph import EnsoMetricsTable # noqa diff --git a/lib/metricsCollectionsLib.py b/lib/metricsCollectionsLib.py index c346b76..e3ce11c 100644 --- a/lib/metricsCollectionsLib.py +++ b/lib/metricsCollectionsLib.py @@ -4,11 +4,13 @@ # Draft version # # Define metrics collections + + def defCollection(MC=True): -# Name, list of metrics + # Name, list of metrics metrics_collection = { - 'MC1':{'long_name':'Metrics collection Q1','list_of_metrics':['EnsoAmpl','EnsoMu'], - 'description':'Describe which science question this collection is about'}, + 'MC1': {'long_name': 'Metrics collection Q1', 'list_of_metrics': ['EnsoAmpl', 'EnsoMu'], + 'description': 'Describe which science question this collection is about'}, } if MC: return metrics_collection @@ -16,13 +18,14 @@ def defCollection(MC=True): return metrics_collection[MC] # List of metrics requirements (var name and reference obs) + + def metricReqs(VOR=True): var_obs_requirements = { - 'EnsoAmpl':{'nbvar':1,'var_names':['sst'],'ref_obs':['HadiSST1.1']}, - 'EnsoMu':{'nbvar':2,'var_names':['sst','taux'],'ref_obs':['HadiSST1.1','ERA-interim']}, + 'EnsoAmpl': {'nbvar': 1, 'var_names': ['sst'], 'ref_obs': ['HadiSST1.1']}, + 'EnsoMu': {'nbvar': 2, 'var_names': ['sst', 'taux'], 'ref_obs': ['HadiSST1.1', 'ERA-interim']}, } if VOR: return var_obs_requirements else: return var_obs_requirements[VOR] - diff --git a/lib/monthly_variability_statistics.py b/lib/monthly_variability_statistics.py index 19c382d..348e843 100644 --- a/lib/monthly_variability_statistics.py +++ b/lib/monthly_variability_statistics.py @@ -1,64 +1,71 @@ def interannual_variabilty_std_annual_cycle_removed(d): - import cdutil, genutil - d_area_avg = cdutil.averager(d,axis='xy') - d_area_avg_anom = cdutil.ANNUALCYCLE.departures(d_area_avg) - d_area_avg_anom_sd = genutil.statistics.std(d_area_avg_anom) - return(float(d_area_avg_anom_sd)) + import cdutil + import genutil + d_area_avg = cdutil.averager(d, axis='xy') + d_area_avg_anom = cdutil.ANNUALCYCLE.departures(d_area_avg) + d_area_avg_anom_sd = genutil.statistics.std(d_area_avg_anom) + return(float(d_area_avg_anom_sd)) -def interannual_variability_seasonal_std_mean_removed(d,season_string): - import cdutil, genutil - d_area_avg = cdutil.averager(d,axis='xy') - pre_defined_seasons = ['DJF', 'MAM', 'JJA', 'SON', 'YEAR'] - if season_string in pre_defined_seasons: - d_area_avg_anom=getattr(cdutil,season_string).departures(d_area_avg) - else: - CustomSeason = cdutil.times.Seasons(season_string) - d_area_avg_anom = CustomSeason.departures(d_area_avg) - d_area_avg_anom_sd = genutil.statistics.std(d_area_avg_anom) - return(float(d_area_avg_anom_sd)) -def get_slope_linear_regression(y,x): - import cdutil, genutil - results = genutil.statistics.linearregression(y,x=x) - slope, intercept = results - return(float(slope)) +def interannual_variability_seasonal_std_mean_removed(d, season_string): + import cdutil + import genutil + d_area_avg = cdutil.averager(d, axis='xy') + pre_defined_seasons = ['DJF', 'MAM', 'JJA', 'SON', 'YEAR'] + if season_string in pre_defined_seasons: + d_area_avg_anom = getattr(cdutil, season_string).departures(d_area_avg) + else: + CustomSeason = cdutil.times.Seasons(season_string) + d_area_avg_anom = CustomSeason.departures(d_area_avg) + d_area_avg_anom_sd = genutil.statistics.std(d_area_avg_anom) + return(float(d_area_avg_anom_sd)) + + +def get_slope_linear_regression(y, x): + import genutil + results = genutil.statistics.linearregression(y, x=x) + slope, intercept = results + return(float(slope)) + + +def get_slope_linear_regression_from_anomaly(y, x, sign_x): + import cdutil + import genutil + import numpy + y_area_avg = cdutil.averager(y, axis='xy') + x_area_avg = cdutil.averager(x, axis='xy') + x_area_avg_anom = cdutil.ANNUALCYCLE.departures(x_area_avg) + y_area_avg_anom = cdutil.ANNUALCYCLE.departures(y_area_avg) + if sign_x == 0: + results = genutil.statistics.linearregression(y_area_avg_anom, x=x_area_avg_anom) + elif sign_x == 1: + y_area_avg_anom = numpy.array(y_area_avg_anom) + x_area_avg_anom = numpy.array(x_area_avg_anom) + idxplus = numpy.nonzero(x_area_avg_anom >= 0.) + results = genutil.statistics.linearregression(y_area_avg_anom[idxplus], x=x_area_avg_anom[idxplus]) + # results=genutil.statistics.linearregression(y_area_avg_anom[idxplus[0].transpose],x=x_area_avg_anom[idxplus]) + elif sign_x == -1: + y_area_avg_anom = numpy.array(y_area_avg_anom) + x_area_avg_anom = numpy.array(x_area_avg_anom) + idxneg = numpy.nonzero(x_area_avg_anom <= 0.) + results = genutil.statistics.linearregression(y_area_avg_anom[idxneg], x=x_area_avg_anom[idxneg]) + # results = genutil.statistics.linearregression(y_area_avg_anom[idxneg[0].transpose],x=x_area_avg_anom[idxneg]) + slope, intercept = results + return(float(slope)) -def get_slope_linear_regression_from_anomaly(y,x,sign_x): - import cdutil, genutil, numpy - y_area_avg = cdutil.averager(y,axis='xy') - x_area_avg = cdutil.averager(x,axis='xy') - x_area_avg_anom = cdutil.ANNUALCYCLE.departures(x_area_avg) - y_area_avg_anom = cdutil.ANNUALCYCLE.departures(y_area_avg) - if sign_x == 0: - results = genutil.statistics.linearregression(y_area_avg_anom,x=x_area_avg_anom) - elif sign_x == 1: - y_area_avg_anom = numpy.array(y_area_avg_anom) - x_area_avg_anom = numpy.array(x_area_avg_anom) - idxplus = numpy.nonzero (x_area_avg_anom >= 0.) - results = genutil.statistics.linearregression(y_area_avg_anom[idxplus],x=x_area_avg_anom[idxplus]) - # results = genutil.statistics.linearregression(y_area_avg_anom[idxplus[0].transpose],x=x_area_avg_anom[idxplus]) - elif sign_x == -1: - y_area_avg_anom = numpy.array(y_area_avg_anom) - x_area_avg_anom = numpy.array(x_area_avg_anom) - idxneg = numpy.nonzero (x_area_avg_anom <= 0.) - results = genutil.statistics.linearregression(y_area_avg_anom[idxneg],x=x_area_avg_anom[idxneg]) - # results = genutil.statistics.linearregression(y_area_avg_anom[idxneg[0].transpose],x=x_area_avg_anom[idxneg]) - slope, intercept = results - return(float(slope)) def get_area_avg_annual_cycle_removed(d): - import cdutil - d_area_avg = cdutil.averager(d,axis='xy') - d_area_avg_anom = cdutil.ANNUALCYCLE.departures(d_area_avg) - return(d_area_avg_anom) + import cdutil + d_area_avg = cdutil.averager(d, axis='xy') + d_area_avg_anom = cdutil.ANNUALCYCLE.departures(d_area_avg) + return(d_area_avg_anom) + -def get_axis_base_dataset(var, reg, path): # to be called from Atm Feedback driver - f = cdms.open(path) - if debug: - reg_timeseries = f(var, regions_specs[reg]['domain'], time = slice(0,60)) # RUN CODE FAST ON 5 YEARS OF DATA - else: - reg_timeseries = f(var, regions_specs[reg]['domain']) - # Get area averaged and annual cycle removed 1-D time series - reg_timeseries_area_avg_anom = get_area_avg_annual_cycle_removed(reg_timeseries) - return(reg_timeseries_area_avg_anom) - f.close() +def get_axis_base_dataset(var, reg, path): # to be called from Atm Feedback driver + import cdms2 + f = cdms2.open(path) + reg_timeseries = f(var, regions_specs[reg]['domain']) # noqa ??? This will never work! + # Get area averaged and annual cycle removed 1-D time series + reg_timeseries_area_avg_anom = get_area_avg_annual_cycle_removed(reg_timeseries) + return(reg_timeseries_area_avg_anom) + f.close() diff --git a/run_tests.py b/run_tests.py new file mode 100755 index 0000000..31e266c --- /dev/null +++ b/run_tests.py @@ -0,0 +1,272 @@ +#!/usr/bin/env python +import glob +import sys +import os +import argparse +import multiprocessing +import subprocess +import image_compare +import codecs +import time +import webbrowser +import shlex +import cdat_info + +root = os.getcwd() +cpus = multiprocessing.cpu_count() + +parser = argparse.ArgumentParser(description="Run VCS tests", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) +parser.add_argument("-H", "--html", action="store_true", + help="create and show html result page") +parser.add_argument("-p", "--package", action="store_true", + help="package test results") +parser.add_argument( + "-c", + "--coverage", + action="store_true", + help="run coverage (not implemented)") +parser.add_argument( + "-v", + "--verbosity", + default=1, + choices=[ + 0, + 1, + 2], + type=int, + help="verbosity output level") +parser.add_argument( + "-n", + "--cpus", + default=cpus, + type=int, + help="number of cpus to use") +parser.add_argument( + "-f", + "--failed-only", + action="store_true", + default=False, + help="runs only tests that failed last time and are in the list you provide") +parser.add_argument( + "-A","--attributes", + default=[], + action="append", + help="attribute-based runs") +parser.add_argument("tests", nargs="*", help="tests to run") + +args = parser.parse_args() + + +def abspath(path, name, prefix): + import shutil + full_path = os.path.abspath(os.path.join(os.getcwd(), "..", path)) + if not os.path.exists(name): + os.makedirs(name) + new = os.path.join(nm, prefix + "_" + os.path.basename(full_path)) + try: + shutil.copy(full_path, new) + except: + pass + return new + + +def findDiffFiles(log): + i = -1 + file1 = "" + file2 = "" + diff = "" + N = len(log) + while log[i].find("Source file") == -1 and i > -N: + i -= 1 + if i > -N: + file1 = log[i - 1].split()[-1] + for j in range(i, N): + if log[j].find("New best!") > -1: + if log[j].find("Comparing") > -1: + file2 = log[j].split()[2] + else: + k = j - 1 + while log[k].find("Comparing") == -1 and k > -N: + k -= 1 + try: + file2 = log[k].split()[2] + except: + file2 = log[k].split()[1][:-1]+log[j].split()[0] + print "+++++++++++++++++++++++++",file2 + if log[j].find("Saving image diff") > -1: + diff = log[j].split()[-1] + # break + return file1, file2, diff + + +def run_command(command, join_stderr=True): + if isinstance(command, basestring): + command = shlex.split(command) + if args.verbosity > 0: + print "Executing %s in %s" % (" ".join(command), os.getcwd()) + if join_stderr: + stderr = subprocess.STDOUT + else: + stderr = subprocess.PIPE + P = subprocess.Popen( + command, + stdout=subprocess.PIPE, + stderr=stderr, + bufsize=0, + cwd=os.getcwd()) + out = [] + while P.poll() is None: + read = P.stdout.readline().rstrip() + out.append(read) + if args.verbosity > 1 and len(read) != 0: + print read + return P, out + + +def run_nose(test_name): + opts = [] + if args.coverage: + opts += ["--with-coverage"] + for att in args.attributes: + opts += ["-A", att] + command = ["nosetests", ] + opts + ["-s", test_name] + start = time.time() + P, out = run_command(command) + end = time.time() + return {test_name: {"result": P.poll(), "log": out, "times": { + "start": start, "end": end}}} + + +sys.path.append( + os.path.join( + os.path.dirname( + os.path.abspath(__file__)), + "tests")) +if len(args.tests) == 0: + names = glob.glob("tests/test_*.py") +else: + names = set(args.tests) + +if args.failed_only and os.path.exists(os.path.join("tests",".last_failure")): + f = open(os.path.join("tests",".last_failure")) + failed = set(eval(f.read().strip())) + f.close() + new_names = [] + for fnm in failed: + if fnm in names: + new_names.append(fnm) + names = new_names + +if args.verbosity > 1: + print("Names:", names) + +if len(names)==0: + print "No tests to run" + sys.exit(0) + +# Make sure we have sample data +#cdat_info.download_sample_data_files(os.path.join(sys.prefix,"share","EnsoMetrics","test_data_files.txt"),cdat_info.get_sampledata_path()) + +p = multiprocessing.Pool(args.cpus) +try: + outs = p.map_async(run_nose, names).get(3600) +except KeyboardInterrupt: + sys.exit(1) +results = {} +failed = [] +for d in outs: + results.update(d) + nm = d.keys()[0] + if d[nm]["result"] != 0: + failed.append(nm) +f = open(os.path.join("tests",".last_failure"),"w") +f.write(repr(failed)) +f.close() + +if args.verbosity > 0: + print "Ran %i tests, %i failed (%.2f%% success)" %\ + (len(outs), len(failed), 100. - float(len(failed)) / len(outs) * 100.) + if len(failed) > 0: + print "Failed tests:" + for f in failed: + print "\t", f +if args.html or args.package: + if not os.path.exists("tests_html"): + os.makedirs("tests_html") + os.chdir("tests_html") + + js = image_compare.script_data() + + fi = open("index.html", "w") + print>>fi, "" + print>>fi, """
Test | Result | Start Time | End Time | Time | |
---|---|---|---|---|---|
Test | Result | Start Time | End Time | Time | |
%s | " % nm, + fe = codecs.open("%s.html" % nm, "w", encoding="utf-8") + print>>fe, "" + print>>fe, "OK | " % nm, + print>>fe, "" + print>>fe, "Back To Results List" + else: + print>>fi, "Fail | " % nm, + print>>fe, "" % js + print>>fe, "Back To Results List" + print>>fe, "%s | %s | %s |