Merge branch 'release/3.10' into task/568
This commit is contained in:
commit
44761956e6
|
@ -31,7 +31,7 @@ jobs:
|
|||
needs: check-syntax-errors
|
||||
runs-on: windows-latest
|
||||
env:
|
||||
PYTHON_VERSION: 3.8
|
||||
PYTHON_VERSION: 3.9
|
||||
PYTHON_ARCH: x64
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
@ -45,7 +45,7 @@ jobs:
|
|||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ~\AppData\Local\pip\Cache
|
||||
key: windows-py${{ env.PYTHON_VERSION }}-${{ env.PYTHON_ARCH}}-pip-${{ hashFiles('**/requirements*.txt') }}-exe
|
||||
key: windows-py${{ env.PYTHON_VERSION }}-${{ env.PYTHON_ARCH }}-pip-${{ hashFiles('**/requirements*.txt') }}-exe
|
||||
|
||||
- name: Setup conda environment
|
||||
uses: conda-incubator/setup-miniconda@v2
|
||||
|
@ -79,6 +79,11 @@ jobs:
|
|||
- name: Build userguide, binaries, installer
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
# resolves ModuleNotFoundError during pyinstaller build on windows:
|
||||
# On Windows, with Python >= 3.8, DLLs are no longer imported from the PATH.
|
||||
# If gdalXXX.dll is in the PATH, then set the USE_PATH_FOR_GDAL_PYTHON=YES environment variable
|
||||
# to feed the PATH into os.add_dll_directory().
|
||||
export USE_PATH_FOR_GDAL_PYTHON=YES
|
||||
# This builds the users guide, binaries, and installer
|
||||
make windows_installer
|
||||
|
||||
|
|
|
@ -19,10 +19,10 @@ jobs:
|
|||
# Fetch complete history for accurate versioning
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up python 3.8
|
||||
- name: Set up python 3.9
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: 3.8
|
||||
python-version: 3.9
|
||||
|
||||
- name: Set up environment
|
||||
run: python -m pip install --upgrade pip setuptools setuptools_scm flake8
|
||||
|
@ -42,14 +42,19 @@ jobs:
|
|||
fail-fast: false
|
||||
max-parallel: 4
|
||||
matrix:
|
||||
python-version: [3.7, 3.8]
|
||||
python-version: [3.7, 3.8, 3.9]
|
||||
python-architecture: [x64]
|
||||
os: [windows-latest, macos-latest]
|
||||
include:
|
||||
- python-version: 3.7
|
||||
numpy: "numpy=1.15" # fuzzy assertion in conda is single '='
|
||||
gdal: "gdal==3.1.2"
|
||||
- python-version: 3.8
|
||||
numpy: "numpy=1.16"
|
||||
gdal: "gdal==3.2.0"
|
||||
- python-version: 3.9
|
||||
numpy: "numpy=1.20"
|
||||
gdal: "gdal==3.3.1"
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
|
@ -86,7 +91,7 @@ jobs:
|
|||
run: |
|
||||
conda install nomkl # make sure numpy is w/out MKL
|
||||
conda upgrade -y pip setuptools
|
||||
conda install ${{ matrix.numpy }} toml twine
|
||||
conda install ${{ matrix.numpy }} ${{ matrix.gdal }} toml twine
|
||||
conda install $(python -c "import toml;print(' '.join(toml.load('pyproject.toml')['build-system']['requires']))")
|
||||
|
||||
- name: Build wheel
|
||||
|
@ -169,7 +174,7 @@ jobs:
|
|||
needs: check-syntax-errors
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.7, 3.8]
|
||||
python-version: [3.7, 3.8, 3.9]
|
||||
python-architecture: [x64]
|
||||
os: [windows-latest, macos-latest]
|
||||
steps:
|
||||
|
@ -268,7 +273,7 @@ jobs:
|
|||
with:
|
||||
activate-environment: validate-env
|
||||
auto-update-conda: true
|
||||
python-version: 3.8
|
||||
python-version: 3.9
|
||||
channels: conda-forge
|
||||
|
||||
- name: Install dependencies
|
||||
|
@ -294,7 +299,7 @@ jobs:
|
|||
fail-fast: False
|
||||
max-parallel: 4
|
||||
matrix:
|
||||
python-version: [3.7, 3.8]
|
||||
python-version: [3.7, 3.8, 3.9]
|
||||
python-architecture: [x64]
|
||||
os: [windows-latest]
|
||||
|
||||
|
|
37
HISTORY.rst
37
HISTORY.rst
|
@ -22,6 +22,7 @@
|
|||
- Scenic Quality
|
||||
- SDR
|
||||
- Seasonal Water Yield
|
||||
- Stormwater
|
||||
- Urban Cooling
|
||||
- Urban Flood Risk
|
||||
- Wave Energy
|
||||
|
@ -33,6 +34,10 @@
|
|||
|
||||
.. :changelog:
|
||||
|
||||
..
|
||||
Unreleased Changes
|
||||
------------------
|
||||
|
||||
Unreleased Changes (3.10)
|
||||
-------------------------
|
||||
* General:
|
||||
|
@ -41,8 +46,8 @@ Unreleased Changes (3.10)
|
|||
* Major updates to each model's ``ARGS_SPEC`` (and some related validation)
|
||||
to facilitate re-use & display in the Workbench and User's Guide.
|
||||
|
||||
Unreleased Changes (3.9.1)
|
||||
--------------------------
|
||||
3.9.1 (2021-09-22)
|
||||
------------------
|
||||
* General:
|
||||
* Added error-handling for when ``pandas`` fails to decode a non-utf8
|
||||
encoded CSV.
|
||||
|
@ -79,6 +84,19 @@ Unreleased Changes (3.9.1)
|
|||
could have been computed by previous runs.
|
||||
* Validation now returns a more helpful message when a spatial input has
|
||||
no projection defined.
|
||||
* Updated to pygeoprocessing 2.3.2
|
||||
* Added support for GDAL 3.3.1 and above
|
||||
* Added some logging to ``natcap.invest.utils._log_gdal_errors`` to aid in
|
||||
debugging some hard-to-reproduce GDAL logging errors that occasionally
|
||||
cause InVEST models to crash. If GDAL calls ``_log_gdal_errors`` with an
|
||||
incorrect set of arguments, this is now logged.
|
||||
* Improved the reliability and consistency of log messages across the
|
||||
various ways that InVEST models can be run. Running InVEST in
|
||||
``--headless`` mode, for example, will now have the same logging behavior,
|
||||
including with exceptions, as the UI would produce.
|
||||
* The default log level for the CLI has been lowered from
|
||||
``logging.CRITICAL`` to ``logging.ERROR``. This ensures that exceptions
|
||||
should always be written to the correct logging streams.
|
||||
* Carbon
|
||||
* Fixed a bug where, if rate change and discount rate were set to 0, the
|
||||
valuation results were in $/year rather than $, too small by a factor of
|
||||
|
@ -102,6 +120,13 @@ Unreleased Changes (3.9.1)
|
|||
* HRA
|
||||
* Fixed bugs that allowed zeros in DQ & Weight columns of criteria
|
||||
table to raise DivideByZero errors.
|
||||
* NDR
|
||||
* Fixed a bug that allowed SDR to be calculated in areas that don't drain
|
||||
to any stream. Now all outputs that depend on distance to stream (
|
||||
``d_dn``, ``dist_to_channel``, ``ic``, ``ndr_n``, ``ndr_p``,
|
||||
``sub_ndr_n``, ``sub_ndr_p``, ``n_export``, ``p_export``) are only
|
||||
defined for pixels that drain to a stream. They have nodata everywhere
|
||||
else.
|
||||
* Pollination
|
||||
* Updated so that the ``total_pollinator_abundance_[season].tif`` outputs
|
||||
are always created. Before, they weren't created if a farm vector was
|
||||
|
@ -119,6 +144,14 @@ Unreleased Changes (3.9.1)
|
|||
* Changed how SDR thresholds its L factor to allow direct thresholding
|
||||
rather than based off of upstream area. Exposed this parameter as
|
||||
``l_max`` in the ``args`` input and in the user interface.
|
||||
* Fixed a bug that allowed SDR to be calculated in areas that don't drain
|
||||
to any stream. Now all outputs that depend on distance to stream (
|
||||
``d_dn``, ``d_dn_bare``, ``ic``, ``ic_bare``, ``sdr``, ``sdr_bare``,
|
||||
``e_prime``, ``sed_retention``, ``sed_retention_index``,
|
||||
``sed_deposition``, ``sed_export``) are only defined for pixels that
|
||||
drain to a stream. They have nodata everywhere else.
|
||||
* Stormwater
|
||||
* Added this new model
|
||||
* Urban Flood Risk
|
||||
* Fixed a bug where a String ``Type`` column in the infrastructure vector
|
||||
would cause the aggregation step of the model to crash, even with the
|
||||
|
|
6
Makefile
6
Makefile
|
@ -6,7 +6,7 @@ GIT_SAMPLE_DATA_REPO_REV := f8c3ef11d06cee9d6f5a07f48057c05939e83028
|
|||
|
||||
GIT_TEST_DATA_REPO := https://bitbucket.org/natcap/invest-test-data.git
|
||||
GIT_TEST_DATA_REPO_PATH := $(DATA_DIR)/invest-test-data
|
||||
GIT_TEST_DATA_REPO_REV := 912a0732340d706a17968aeb04dc525b139904f2
|
||||
GIT_TEST_DATA_REPO_REV := bc55f553c9d57baac0931f5691598baa1bcf3923
|
||||
|
||||
GIT_UG_REPO := https://github.com/natcap/invest.users-guide
|
||||
GIT_UG_REPO_PATH := doc/users-guide
|
||||
|
@ -393,8 +393,8 @@ signcode:
|
|||
|
||||
signcode_windows:
|
||||
$(GSUTIL) cp 'gs://stanford_cert/$(P12_FILE)' '$(BUILD_DIR)/$(P12_FILE)'
|
||||
powershell.exe "& '$(SIGNTOOL)' sign /f '$(BUILD_DIR)\$(P12_FILE)' /p '$(CERT_KEY_PASS)' '$(BIN_TO_SIGN)'"
|
||||
powershell.exe "& '$(SIGNTOOL)' timestamp -t http://timestamp.sectigo.com '$(BIN_TO_SIGN)'"
|
||||
powershell.exe "& '$(SIGNTOOL)' sign /fd SHA256 /f '$(BUILD_DIR)\$(P12_FILE)' /p '$(CERT_KEY_PASS)' '$(BIN_TO_SIGN)'"
|
||||
powershell.exe "& '$(SIGNTOOL)' timestamp /tr http://timestamp.sectigo.com /td SHA256 '$(BIN_TO_SIGN)'"
|
||||
-$(RM) $(BUILD_DIR)/$(P12_FILE)
|
||||
@echo "Installer was signed with signtool"
|
||||
|
||||
|
|
|
@ -19,3 +19,9 @@ if platform.system() == 'Darwin':
|
|||
# sys._MEIPASS is the path to where the pyinstaller entrypoint bundle
|
||||
# lives. See the pyinstaller docs for more details.
|
||||
os.environ['SPATIALINDEX_C_LIBRARY'] = sys._MEIPASS
|
||||
|
||||
if platform.system() == 'Windows':
|
||||
# On Windows, with Python >= 3.8, DLLs are no longer imported from the PATH.
|
||||
# If gdalXXX.dll is in the PATH, then set the USE_PATH_FOR_GDAL_PYTHON=YES environment variable
|
||||
# to feed the PATH into os.add_dll_directory().
|
||||
os.environ['USE_PATH_FOR_GDAL_PYTHON'] = 'YES'
|
||||
|
|
|
@ -453,6 +453,7 @@ Section "InVEST Tools" Section_InVEST_Tools
|
|||
!insertmacro StartMenuLink "${SMPATH}\Recreation" "recreation"
|
||||
!insertmacro StartMenuLink "${SMPATH}\Urban Flood Risk Mitigation" "ufrm"
|
||||
!insertmacro StartMenuLink "${SMPATH}\Urban Cooling Model" "ucm"
|
||||
!insertmacro StartMenuLink "${SMPATH}\Urban Stormwater Retention Model" "stormwater"
|
||||
!insertmacro StartMenuLink "${SMPATH}\Habitat Risk Assessment" "hra"
|
||||
|
||||
!define COASTALBLUECARBON "${SMPATH}\Coastal Blue Carbon"
|
||||
|
|
|
@ -2,15 +2,18 @@
|
|||
# ---------------------
|
||||
# This file records the packages needed to build on readthedocs.org.
|
||||
# It assumes that the natcap.invest package is being
|
||||
# installed to the readthedocs virtualenv separately.
|
||||
# installed to the readthedocs virtualenv separately.
|
||||
channels:
|
||||
- conda-forge
|
||||
- defaults
|
||||
# set nodefaults to remove the default channels
|
||||
# this is needed to keep memory use below RTD limit due to conda bug:
|
||||
# https://github.com/conda/conda/issues/5003
|
||||
- nodefaults
|
||||
dependencies:
|
||||
- chardet>=3.0.4
|
||||
- Cython
|
||||
- GDAL>=3.1.2,!=3.3.0
|
||||
- Flask
|
||||
- GDAL>=3.1.2,<3.3.0
|
||||
- numpy>=1.11.0,!=1.16.0
|
||||
- pandas>=1.0,<1.2.0
|
||||
- pip
|
||||
|
@ -28,11 +31,10 @@ dependencies:
|
|||
- xlrd>=1.2.0
|
||||
- xlwt
|
||||
- pip:
|
||||
- pygeoprocessing>=2.1.1 # pip-only
|
||||
- pygeoprocessing>=2.3.2 # pip-only
|
||||
- PyInstaller==3.5 # pip-only
|
||||
- Pyro4==4.77 # pip-only
|
||||
- PySide2!=5.15.0 # pip-only
|
||||
- qtawesome # pip-only
|
||||
- setuptools_scm
|
||||
- sphinx-rtd-theme
|
||||
|
||||
|
|
|
@ -12,14 +12,14 @@
|
|||
# scripts/convert-requirements-to-conda-yml.py as though it can only be found
|
||||
# on pip.
|
||||
|
||||
GDAL>=3.1.2,<3.3.0
|
||||
GDAL>=3.1.2,!=3.3.0 # 3.3.0 had a bug that broke our windows builds: https://github.com/OSGeo/gdal/issues/3898
|
||||
Pyro4==4.77 # pip-only
|
||||
pandas>=1.2.1
|
||||
numpy>=1.11.0,!=1.16.0
|
||||
Rtree>=0.8.2,!=0.9.1
|
||||
Shapely>=1.7.1,<2.0.0
|
||||
scipy>=1.6.0 # pip-only
|
||||
pygeoprocessing>=2.1.1,<2.2.0 # pip-only
|
||||
pygeoprocessing>=2.3.2 # pip-only
|
||||
taskgraph[niced_processes]>=0.10.3 # pip-only
|
||||
psutil>=5.6.6
|
||||
chardet>=3.0.4
|
||||
|
|
|
@ -249,9 +249,9 @@ def main(user_args=None):
|
|||
'the console and (if running in headless mode) how much is '
|
||||
'written to the logfile.'))
|
||||
verbosity_group.add_argument(
|
||||
'--debug', dest='log_level', default=logging.CRITICAL,
|
||||
'--debug', dest='log_level', default=logging.ERROR,
|
||||
action='store_const', const=logging.DEBUG,
|
||||
help='Enable debug logging. Alias for -vvvvv')
|
||||
help='Enable debug logging. Alias for -vvvv')
|
||||
|
||||
parser.add_argument(
|
||||
'-L', '--language', default='en', choices=['en', 'es'],
|
||||
|
@ -351,7 +351,7 @@ def main(user_args=None):
|
|||
# arguments. Verbosity: the more v's the lower the logging threshold.
|
||||
# If --debug is used, the logging threshold is 10.
|
||||
# If the user goes lower than logging.DEBUG, default to logging.DEBUG.
|
||||
log_level = min(args.log_level, logging.CRITICAL - (args.verbosity*10))
|
||||
log_level = min(args.log_level, logging.ERROR - (args.verbosity*10))
|
||||
handler.setLevel(max(log_level, logging.DEBUG)) # don't go below DEBUG
|
||||
root_logger.addHandler(handler)
|
||||
LOGGER.info('Setting handler log level to %s', log_level)
|
||||
|
@ -462,6 +462,10 @@ def main(user_args=None):
|
|||
|
||||
# We're deliberately not validating here because the user
|
||||
# can just call ``invest validate <datastack>`` to validate.
|
||||
#
|
||||
# Exceptions will already be logged to the logfile but will ALSO be
|
||||
# written to stdout if this exception is uncaught. This is by
|
||||
# design.
|
||||
model_module.execute(parsed_datastack.args)
|
||||
|
||||
# If we're running in a GUI (either through ``invest run`` or
|
||||
|
|
|
@ -1337,7 +1337,7 @@ def extract_bathymetry_along_ray(
|
|||
"""
|
||||
bathy_values = []
|
||||
n_pts = int(math.ceil(ray_geometry.Length() / bathy_gt[1]))
|
||||
shapely_line = shapely.wkb.loads(ray_geometry.ExportToWkb())
|
||||
shapely_line = shapely.wkb.loads(bytes(ray_geometry.ExportToWkb()))
|
||||
points_along_ray = (
|
||||
shapely_line.interpolate(float(i) / n_pts, normalized=True)
|
||||
for i in range(n_pts + 1)) # +1 to get a point at the end
|
||||
|
@ -1654,7 +1654,7 @@ def calculate_surge_exposure(
|
|||
"Could not transform feature from %s to spatial reference"
|
||||
"system %s", shelf_contour_path, target_srs_wkt)
|
||||
continue
|
||||
shapely_geom = shapely.wkb.loads(clipped_geometry.ExportToWkb())
|
||||
shapely_geom = shapely.wkb.loads(bytes(clipped_geometry.ExportToWkb()))
|
||||
if shapely_geom.type == 'MultiLineString':
|
||||
shapely_geom_explode = [
|
||||
shapely.geometry.LineString(x) for x in shapely_geom]
|
||||
|
@ -1676,7 +1676,7 @@ def calculate_surge_exposure(
|
|||
for point_feature in shore_point_layer:
|
||||
shore_id = point_feature.GetField(SHORE_ID_FIELD)
|
||||
point_geometry = point_feature.GetGeometryRef()
|
||||
point_shapely = shapely.wkb.loads(point_geometry.ExportToWkb())
|
||||
point_shapely = shapely.wkb.loads(bytes(point_geometry.ExportToWkb()))
|
||||
result[shore_id] = point_shapely.distance(shelf_shapely_union)
|
||||
|
||||
with open(target_surge_pickle_path, 'wb') as pickle_file:
|
||||
|
@ -1981,7 +1981,7 @@ def search_for_habitat(
|
|||
habitat_vector_path)
|
||||
continue
|
||||
shapely_geom = shapely.wkb.loads(
|
||||
clipped_geometry.ExportToWkb())
|
||||
bytes(clipped_geometry.ExportToWkb()))
|
||||
if shapely_geom.type == 'MultiPolygon':
|
||||
shapely_geom_explode = [
|
||||
shapely.geometry.Polygon(x) for x in shapely_geom]
|
||||
|
@ -2020,7 +2020,7 @@ def search_for_habitat(
|
|||
|
||||
point_feature_geometry = point_feature.GetGeometryRef()
|
||||
point_shapely = shapely.wkb.loads(
|
||||
point_feature_geometry.ExportToWkb())
|
||||
bytes(point_feature_geometry.ExportToWkb()))
|
||||
|
||||
found_habitat_geoms = [g for g in habitat_rtree.query(
|
||||
point_shapely.buffer(search_radius))]
|
||||
|
@ -2135,7 +2135,7 @@ def calculate_geomorphology_exposure(
|
|||
for feature in geomorph_layer:
|
||||
geometry = feature.GetGeometryRef()
|
||||
shapely_geometry = shapely.wkb.loads(
|
||||
geometry.ExportToWkb())
|
||||
bytes(geometry.ExportToWkb()))
|
||||
# Store the rank field value as an attribute of a shapely geometry so
|
||||
# it's accessible when the geometry is returned from a STRtree query.
|
||||
shapely_geometry.rank = feature.GetField('RANK')
|
||||
|
@ -2178,7 +2178,7 @@ def calculate_geomorphology_exposure(
|
|||
shore_id = point_feature.GetField(SHORE_ID_FIELD)
|
||||
point_geom = point_feature.GetGeometryRef()
|
||||
poly_geom = point_geom.Buffer(search_radius)
|
||||
poly_shapely = shapely.wkb.loads(poly_geom.ExportToWkb())
|
||||
poly_shapely = shapely.wkb.loads(bytes(poly_geom.ExportToWkb()))
|
||||
found_geoms = [g for g in tree.query(poly_shapely)]
|
||||
# the tree.query returns geometries found in envelope of poly_shapely,
|
||||
# so follow-up with a check for intersection.
|
||||
|
@ -2810,7 +2810,7 @@ def _ogr_to_geometry_list(vector_path):
|
|||
feature_geometry = feature.GetGeometryRef()
|
||||
try:
|
||||
shapely_geometry = shapely.wkb.loads(
|
||||
feature_geometry.ExportToWkb())
|
||||
bytes(feature_geometry.ExportToWkb()))
|
||||
except shapely.errors.WKBReadingError:
|
||||
# In my experience a geometry that can't be loaded by shapely
|
||||
# is also a geometry that can't be fixed with the buffer(0) trick,
|
||||
|
|
|
@ -264,7 +264,7 @@ _EXPECTED_NUTRIENT_TABLE_HEADERS = [
|
|||
'Riboflavin', 'Niacin', 'Pantothenic', 'VitB6', 'Folate', 'VitB12',
|
||||
'VitK']
|
||||
_EXPECTED_LUCODE_TABLE_HEADER = 'lucode'
|
||||
_NODATA_YIELD = -1.0
|
||||
_NODATA_YIELD = -1
|
||||
|
||||
|
||||
def execute(args):
|
||||
|
@ -333,7 +333,7 @@ def execute(args):
|
|||
landcover_raster_info = pygeoprocessing.get_raster_info(
|
||||
args['landcover_raster_path'])
|
||||
pixel_area_ha = numpy.product([
|
||||
abs(x) for x in landcover_raster_info['pixel_size']]) / 10000.0
|
||||
abs(x) for x in landcover_raster_info['pixel_size']]) / 10000
|
||||
landcover_nodata = landcover_raster_info['nodata'][0]
|
||||
if landcover_nodata is None:
|
||||
LOGGER.warning(
|
||||
|
@ -412,8 +412,12 @@ def execute(args):
|
|||
(bin_id,
|
||||
crop_climate_percentile_table[bin_id][yield_percentile_id])
|
||||
for bin_id in crop_climate_percentile_table])
|
||||
# reclassify nodata to a valid value of 0
|
||||
# we're assuming that the crop doesn't exist where there is no data
|
||||
# this is more likely than assuming the crop does exist, esp.
|
||||
# in the context of the provided climate bins map
|
||||
bin_to_percentile_yield[
|
||||
crop_climate_bin_raster_info['nodata'][0]] = 0.0
|
||||
crop_climate_bin_raster_info['nodata'][0]] = 0
|
||||
coarse_yield_percentile_raster_path = os.path.join(
|
||||
output_dir,
|
||||
_COARSE_YIELD_PERCENTILE_FILE_PATTERN % (
|
||||
|
@ -456,14 +460,13 @@ def execute(args):
|
|||
crop_name, yield_percentile_id, file_suffix))
|
||||
|
||||
create_percentile_production_task = task_graph.add_task(
|
||||
func=pygeoprocessing.raster_calculator,
|
||||
args=([(args['landcover_raster_path'], 1),
|
||||
(interpolated_yield_percentile_raster_path, 1),
|
||||
(landcover_nodata, 'raw'), (crop_lucode, 'raw'),
|
||||
(pixel_area_ha, 'raw')],
|
||||
_crop_production_op,
|
||||
percentile_crop_production_raster_path,
|
||||
gdal.GDT_Float32, _NODATA_YIELD),
|
||||
func=calculate_crop_production,
|
||||
args=(
|
||||
args['landcover_raster_path'],
|
||||
interpolated_yield_percentile_raster_path,
|
||||
crop_lucode,
|
||||
pixel_area_ha,
|
||||
percentile_crop_production_raster_path),
|
||||
target_path_list=[percentile_crop_production_raster_path],
|
||||
dependent_task_list=[
|
||||
create_interpolated_yield_percentile_task],
|
||||
|
@ -587,32 +590,63 @@ def execute(args):
|
|||
task_graph.join()
|
||||
|
||||
|
||||
def _crop_production_op(
|
||||
lulc_array, yield_array, landcover_nodata, crop_lucode, pixel_area_ha):
|
||||
"""Mask in yields that overlap with `crop_lucode`.
|
||||
def calculate_crop_production(lulc_path, yield_path, crop_lucode,
|
||||
pixel_area_ha, target_path):
|
||||
"""Calculate crop production for a particular crop.
|
||||
|
||||
The resulting production value is:
|
||||
|
||||
- nodata, where either the LULC or yield input has nodata
|
||||
- 0, where the LULC does not match the given LULC code
|
||||
- yield * pixel area, where the given LULC code exists
|
||||
|
||||
Args:
|
||||
lulc_array (numpy.ndarray): landcover raster values
|
||||
yield_array (numpy.ndarray): interpolated yield raster values
|
||||
landcover_nodata (float): extracted from landcover raster values
|
||||
crop_lucode (int): code used to mask in the current crop
|
||||
pixel_area_ha (float): area of lulc raster cells (hectares)
|
||||
lulc_path (str): path to a raster of LULC codes
|
||||
yield_path (str): path of a raster of yields for the crop identified
|
||||
by ``crop_lucode``, in units per hectare
|
||||
crop_lucode (int): LULC code that identifies the crop of interest in
|
||||
the ``lulc_path`` raster.
|
||||
pixel_area_ha (number): Pixel area in hectares for both input rasters
|
||||
target_path (str): Path to write the output crop production raster
|
||||
|
||||
Returns:
|
||||
numpy.ndarray with float values of yields for the current crop
|
||||
|
||||
None
|
||||
"""
|
||||
result = numpy.empty(lulc_array.shape, dtype=numpy.float32)
|
||||
if landcover_nodata is not None:
|
||||
result[:] = _NODATA_YIELD
|
||||
valid_mask = ~numpy.isclose(lulc_array, landcover_nodata)
|
||||
result[valid_mask] = 0.0
|
||||
else:
|
||||
result[:] = 0.0
|
||||
lulc_mask = lulc_array == crop_lucode
|
||||
result[lulc_mask] = (
|
||||
yield_array[lulc_mask] * pixel_area_ha)
|
||||
return result
|
||||
|
||||
lulc_nodata = pygeoprocessing.get_raster_info(lulc_path)['nodata'][0]
|
||||
yield_nodata = pygeoprocessing.get_raster_info(yield_path)['nodata'][0]
|
||||
|
||||
def _crop_production_op(lulc_array, yield_array):
|
||||
"""Mask in yields that overlap with `crop_lucode`.
|
||||
|
||||
Args:
|
||||
lulc_array (numpy.ndarray): landcover raster values
|
||||
yield_array (numpy.ndarray): interpolated yield raster values
|
||||
|
||||
Returns:
|
||||
numpy.ndarray with float values of yields for the current crop
|
||||
|
||||
"""
|
||||
result = numpy.full(lulc_array.shape, _NODATA_YIELD,
|
||||
dtype=numpy.float32)
|
||||
|
||||
valid_mask = numpy.full(lulc_array.shape, True)
|
||||
if lulc_nodata is not None:
|
||||
valid_mask &= ~numpy.isclose(lulc_array, lulc_nodata)
|
||||
if yield_nodata is not None:
|
||||
valid_mask &= ~numpy.isclose(yield_array, yield_nodata)
|
||||
result[valid_mask] = 0
|
||||
|
||||
lulc_mask = lulc_array == crop_lucode
|
||||
result[valid_mask & lulc_mask] = (
|
||||
yield_array[valid_mask & lulc_mask] * pixel_area_ha)
|
||||
return result
|
||||
|
||||
pygeoprocessing.raster_calculator(
|
||||
[(lulc_path, 1), (yield_path, 1)],
|
||||
_crop_production_op,
|
||||
target_path,
|
||||
gdal.GDT_Float32, _NODATA_YIELD),
|
||||
|
||||
|
||||
def _zero_observed_yield_op(observed_yield_array, observed_yield_nodata):
|
||||
|
@ -628,7 +662,7 @@ def _zero_observed_yield_op(observed_yield_array, observed_yield_nodata):
|
|||
"""
|
||||
result = numpy.empty(
|
||||
observed_yield_array.shape, dtype=numpy.float32)
|
||||
result[:] = 0.0
|
||||
result[:] = 0
|
||||
valid_mask = slice(None)
|
||||
if observed_yield_nodata is not None:
|
||||
valid_mask = ~numpy.isclose(
|
||||
|
@ -658,9 +692,9 @@ def _mask_observed_yield_op(
|
|||
if landcover_nodata is not None:
|
||||
result[:] = observed_yield_nodata
|
||||
valid_mask = ~numpy.isclose(lulc_array, landcover_nodata)
|
||||
result[valid_mask] = 0.0
|
||||
result[valid_mask] = 0
|
||||
else:
|
||||
result[:] = 0.0
|
||||
result[:] = 0
|
||||
lulc_mask = lulc_array == crop_lucode
|
||||
result[lulc_mask] = (
|
||||
observed_yield_array[lulc_mask] * pixel_area_ha)
|
||||
|
@ -715,7 +749,7 @@ def tabulate_results(
|
|||
result_table.write(crop_name)
|
||||
production_lookup = {}
|
||||
production_pixel_count = 0
|
||||
yield_sum = 0.0
|
||||
yield_sum = 0
|
||||
observed_production_raster_path = os.path.join(
|
||||
output_dir,
|
||||
_OBSERVED_PRODUCTION_FILE_PATTERN % (
|
||||
|
@ -735,7 +769,7 @@ def tabulate_results(
|
|||
valid_mask = ~numpy.isclose(
|
||||
yield_block, observed_yield_nodata)
|
||||
production_pixel_count += numpy.count_nonzero(
|
||||
valid_mask & (yield_block > 0.0))
|
||||
valid_mask & (yield_block > 0))
|
||||
yield_sum += numpy.sum(yield_block[valid_mask])
|
||||
production_area = production_pixel_count * pixel_area_ha
|
||||
production_lookup['observed'] = yield_sum
|
||||
|
@ -747,7 +781,7 @@ def tabulate_results(
|
|||
output_dir,
|
||||
_PERCENTILE_CROP_PRODUCTION_FILE_PATTERN % (
|
||||
crop_name, yield_percentile_id, file_suffix))
|
||||
yield_sum = 0.0
|
||||
yield_sum = 0
|
||||
for _, yield_block in pygeoprocessing.iterblocks(
|
||||
(yield_percentile_raster_path, 1)):
|
||||
# _NODATA_YIELD will always have a value (defined above)
|
||||
|
@ -759,7 +793,7 @@ def tabulate_results(
|
|||
|
||||
# convert 100g to Mg and fraction left over from refuse
|
||||
nutrient_factor = 1e4 * (
|
||||
1.0 - nutrient_table[crop_name]['Percentrefuse'] / 100.0)
|
||||
1 - nutrient_table[crop_name]['Percentrefuse'] / 100)
|
||||
for nutrient_id in _EXPECTED_NUTRIENT_TABLE_HEADERS:
|
||||
for yield_percentile_id in sorted(yield_percentile_headers):
|
||||
total_nutrient = (
|
||||
|
@ -774,7 +808,7 @@ def tabulate_results(
|
|||
nutrient_table[crop_name][nutrient_id]))
|
||||
result_table.write('\n')
|
||||
|
||||
total_area = 0.0
|
||||
total_area = 0
|
||||
for _, band_values in pygeoprocessing.iterblocks(
|
||||
(landcover_raster_path, 1)):
|
||||
if landcover_nodata is not None:
|
||||
|
@ -832,7 +866,7 @@ def aggregate_to_polygons(
|
|||
for crop_name in crop_to_landcover_table:
|
||||
# convert 100g to Mg and fraction left over from refuse
|
||||
nutrient_factor = 1e4 * (
|
||||
1.0 - nutrient_table[crop_name]['Percentrefuse'] / 100.0)
|
||||
1 - nutrient_table[crop_name]['Percentrefuse'] / 100)
|
||||
# loop over percentiles
|
||||
for yield_percentile_id in yield_percentile_headers:
|
||||
percentile_crop_production_raster_path = os.path.join(
|
||||
|
|
|
@ -250,7 +250,7 @@ _EXPECTED_NUTRIENT_TABLE_HEADERS = [
|
|||
'Riboflavin', 'Niacin', 'Pantothenic', 'VitB6', 'Folate', 'VitB12',
|
||||
'VitK']
|
||||
_EXPECTED_LUCODE_TABLE_HEADER = 'lucode'
|
||||
_NODATA_YIELD = -1.0
|
||||
_NODATA_YIELD = -1
|
||||
|
||||
|
||||
def execute(args):
|
||||
|
@ -359,7 +359,7 @@ def execute(args):
|
|||
landcover_raster_info = pygeoprocessing.get_raster_info(
|
||||
args['landcover_raster_path'])
|
||||
pixel_area_ha = numpy.product([
|
||||
abs(x) for x in landcover_raster_info['pixel_size']]) / 10000.0
|
||||
abs(x) for x in landcover_raster_info['pixel_size']]) / 10000
|
||||
landcover_nodata = landcover_raster_info['nodata'][0]
|
||||
if landcover_nodata is None:
|
||||
LOGGER.warning(
|
||||
|
@ -411,7 +411,7 @@ def execute(args):
|
|||
for bin_id in crop_regression_table:
|
||||
for header in _EXPECTED_REGRESSION_TABLE_HEADERS:
|
||||
if crop_regression_table[bin_id][header.lower()] == '':
|
||||
crop_regression_table[bin_id][header.lower()] = 0.0
|
||||
crop_regression_table[bin_id][header.lower()] = 0
|
||||
|
||||
yield_regression_headers = [
|
||||
x for x in list(crop_regression_table.values())[0]
|
||||
|
@ -436,8 +436,12 @@ def execute(args):
|
|||
(bin_id,
|
||||
crop_regression_table[bin_id][yield_regression_id])
|
||||
for bin_id in crop_regression_table])
|
||||
# reclassify nodata to a valid value of 0
|
||||
# we're assuming that the crop doesn't exist where there is no data
|
||||
# this is more likely than assuming the crop does exist, esp.
|
||||
# in the context of the provided climate bins map
|
||||
bin_to_regression_value[
|
||||
crop_climate_bin_raster_info['nodata'][0]] = 0.0
|
||||
crop_climate_bin_raster_info['nodata'][0]] = 0
|
||||
coarse_regression_parameter_raster_path = os.path.join(
|
||||
output_dir,
|
||||
_COARSE_YIELD_REGRESSION_PARAMETER_FILE_PATTERN % (
|
||||
|
@ -680,6 +684,7 @@ def _x_yield_op(
|
|||
result = numpy.empty(b_x.shape, dtype=numpy.float32)
|
||||
result[:] = _NODATA_YIELD
|
||||
valid_mask = (
|
||||
(y_max != _NODATA_YIELD) &
|
||||
(b_x != _NODATA_YIELD) & (c_x != _NODATA_YIELD) &
|
||||
(lulc_array == crop_lucode))
|
||||
result[valid_mask] = pixel_area_ha * y_max[valid_mask] * (
|
||||
|
@ -716,7 +721,7 @@ def _zero_observed_yield_op(observed_yield_array, observed_yield_nodata):
|
|||
"""
|
||||
result = numpy.empty(
|
||||
observed_yield_array.shape, dtype=numpy.float32)
|
||||
result[:] = 0.0
|
||||
result[:] = 0
|
||||
valid_mask = slice(None)
|
||||
if observed_yield_nodata is not None:
|
||||
valid_mask = ~numpy.isclose(
|
||||
|
@ -746,9 +751,9 @@ def _mask_observed_yield_op(
|
|||
if landcover_nodata is not None:
|
||||
result[:] = observed_yield_nodata
|
||||
valid_mask = ~numpy.isclose(lulc_array, landcover_nodata)
|
||||
result[valid_mask] = 0.0
|
||||
result[valid_mask] = 0
|
||||
else:
|
||||
result[:] = 0.0
|
||||
result[:] = 0
|
||||
lulc_mask = lulc_array == crop_lucode
|
||||
result[lulc_mask] = (
|
||||
observed_yield_array[lulc_mask] * pixel_area_ha)
|
||||
|
@ -791,7 +796,7 @@ def tabulate_regression_results(
|
|||
result_table.write(crop_name)
|
||||
production_lookup = {}
|
||||
production_pixel_count = 0
|
||||
yield_sum = 0.0
|
||||
yield_sum = 0
|
||||
observed_production_raster_path = os.path.join(
|
||||
output_dir,
|
||||
_OBSERVED_PRODUCTION_FILE_PATTERN % (
|
||||
|
@ -821,7 +826,7 @@ def tabulate_regression_results(
|
|||
crop_production_raster_path = os.path.join(
|
||||
output_dir, _CROP_PRODUCTION_FILE_PATTERN % (
|
||||
crop_name, file_suffix))
|
||||
yield_sum = 0.0
|
||||
yield_sum = 0
|
||||
for _, yield_block in pygeoprocessing.iterblocks(
|
||||
(crop_production_raster_path, 1)):
|
||||
yield_sum += numpy.sum(
|
||||
|
@ -832,7 +837,7 @@ def tabulate_regression_results(
|
|||
|
||||
# convert 100g to Mg and fraction left over from refuse
|
||||
nutrient_factor = 1e4 * (
|
||||
1.0 - nutrient_table[crop_name]['Percentrefuse'] / 100.0)
|
||||
1 - nutrient_table[crop_name]['Percentrefuse'] / 100)
|
||||
for nutrient_id in _EXPECTED_NUTRIENT_TABLE_HEADERS:
|
||||
total_nutrient = (
|
||||
nutrient_factor *
|
||||
|
@ -846,7 +851,7 @@ def tabulate_regression_results(
|
|||
nutrient_table[crop_name][nutrient_id]))
|
||||
result_table.write('\n')
|
||||
|
||||
total_area = 0.0
|
||||
total_area = 0
|
||||
for _, band_values in pygeoprocessing.iterblocks(
|
||||
(landcover_raster_path, 1)):
|
||||
if landcover_nodata is not None:
|
||||
|
@ -901,7 +906,7 @@ def aggregate_regression_results_to_polygons(
|
|||
for crop_name in crop_to_landcover_table:
|
||||
# convert 100g to Mg and fraction left over from refuse
|
||||
nutrient_factor = 1e4 * (
|
||||
1.0 - nutrient_table[crop_name]['Percentrefuse'] / 100.0)
|
||||
1 - nutrient_table[crop_name]['Percentrefuse'] / 100)
|
||||
LOGGER.info(
|
||||
"Calculating zonal stats for %s", crop_name)
|
||||
crop_production_raster_path = os.path.join(
|
||||
|
|
|
@ -387,7 +387,8 @@ def check_geometries(outlet_vector_path, dem_path, target_vector_path,
|
|||
original_geometry = feature.GetGeometryRef()
|
||||
|
||||
try:
|
||||
shapely_geom = shapely.wkb.loads(original_geometry.ExportToWkb())
|
||||
shapely_geom = shapely.wkb.loads(
|
||||
bytes(original_geometry.ExportToWkb()))
|
||||
|
||||
# The classic bowtie polygons will load but require a separate
|
||||
# check for validity.
|
||||
|
@ -530,7 +531,7 @@ def snap_points_to_nearest_stream(points_vector_path, stream_raster_path_band,
|
|||
snapped_layer.CreateFeature(new_feature)
|
||||
continue
|
||||
|
||||
point = shapely.wkb.loads(source_geometry.ExportToWkb())
|
||||
point = shapely.wkb.loads(bytes(source_geometry.ExportToWkb()))
|
||||
if geom_name == 'MULTIPOINT':
|
||||
# We already checked (above) that there's only one component point
|
||||
point = point.geoms[0]
|
||||
|
|
|
@ -1390,7 +1390,7 @@ def _get_vector_geometries_by_field(
|
|||
for feat in base_layer:
|
||||
field_value = feat.GetField(field_name)
|
||||
geom = feat.GetGeometryRef()
|
||||
geom_wkb = shapely.wkb.loads(geom.ExportToWkb())
|
||||
geom_wkb = shapely.wkb.loads(bytes(geom.ExportToWkb()))
|
||||
if field_value is None:
|
||||
base_vector = None
|
||||
base_layer = None
|
||||
|
@ -2612,8 +2612,9 @@ def _get_criteria_dataframe(base_criteria_table_path):
|
|||
file_ext = os.path.splitext(base_criteria_table_path)[1].lower()
|
||||
if file_ext == '.csv':
|
||||
# use sep=None, engine='python' to infer what the separator is
|
||||
criteria_df = pandas.read_csv(base_criteria_table_path,
|
||||
index_col=0, header=None, sep=None, engine='python')
|
||||
criteria_df = pandas.read_csv(
|
||||
base_criteria_table_path, index_col=0, header=None, sep=None,
|
||||
engine='python')
|
||||
elif file_ext in ['.xlsx', '.xls']:
|
||||
criteria_df = pandas.read_excel(base_criteria_table_path,
|
||||
index_col=0, header=None)
|
||||
|
@ -2922,6 +2923,10 @@ def _get_overlap_dataframe(criteria_df, habitat_names, stressor_attributes,
|
|||
# Values are always grouped in threes (rating, dq, weight)
|
||||
for idx in range(0, row_data.size, 3):
|
||||
habitat = row_data.keys()[idx]
|
||||
if habitat not in habitat_names:
|
||||
# This is how we ignore extra columns in the csv
|
||||
# like we have in the sample data for "Rating Instruction".
|
||||
break
|
||||
rating = row_data[idx]
|
||||
dq = row_data[idx + 1]
|
||||
weight = row_data[idx + 2]
|
||||
|
|
|
@ -203,6 +203,8 @@ _OUTPUT_BASE_FILES = {
|
|||
'watershed_results_ndr_path': 'watershed_results_ndr.shp',
|
||||
}
|
||||
|
||||
INTERMEDIATE_DIR_NAME = 'intermediate_outputs'
|
||||
|
||||
_INTERMEDIATE_BASE_FILES = {
|
||||
'ic_factor_path': 'ic_factor.tif',
|
||||
'load_n_path': 'load_n.tif',
|
||||
|
@ -383,8 +385,7 @@ def execute(args):
|
|||
# Load all the tables for preprocessing
|
||||
output_dir = os.path.join(args['workspace_dir'])
|
||||
intermediate_output_dir = os.path.join(
|
||||
args['workspace_dir'], 'intermediate_outputs')
|
||||
output_dir = os.path.join(args['workspace_dir'])
|
||||
args['workspace_dir'], INTERMEDIATE_DIR_NAME)
|
||||
cache_dir = os.path.join(intermediate_output_dir, 'cache_dir')
|
||||
utils.make_directories([output_dir, intermediate_output_dir, cache_dir])
|
||||
|
||||
|
@ -477,7 +478,8 @@ def execute(args):
|
|||
args=(
|
||||
(f_reg['flow_accumulation_path'], 1),
|
||||
(f_reg['flow_direction_path'], 1),
|
||||
float(args['threshold_flow_accumulation']), f_reg['stream_path']),
|
||||
float(args['threshold_flow_accumulation']),
|
||||
f_reg['stream_path']),
|
||||
target_path_list=[f_reg['stream_path']],
|
||||
dependent_task_list=[flow_accum_task],
|
||||
task_name='stream extraction')
|
||||
|
@ -486,7 +488,7 @@ def execute(args):
|
|||
func=pygeoprocessing.calculate_slope,
|
||||
args=((f_reg['filled_dem_path'], 1), f_reg['slope_path']),
|
||||
target_path_list=[f_reg['slope_path']],
|
||||
dependent_task_list=[stream_extraction_task],
|
||||
dependent_task_list=[fill_pits_task],
|
||||
task_name='calculate slope')
|
||||
|
||||
threshold_slope_task = task_graph.add_task(
|
||||
|
@ -539,7 +541,8 @@ def execute(args):
|
|||
d_dn_task = task_graph.add_task(
|
||||
func=pygeoprocessing.routing.distance_to_channel_mfd,
|
||||
args=(
|
||||
(f_reg['flow_direction_path'], 1), (f_reg['stream_path'], 1),
|
||||
(f_reg['flow_direction_path'], 1),
|
||||
(f_reg['stream_path'], 1),
|
||||
f_reg['d_dn_path']),
|
||||
kwargs={'weight_raster_path_band': (
|
||||
f_reg['s_factor_inverse_path'], 1)},
|
||||
|
@ -550,7 +553,8 @@ def execute(args):
|
|||
dist_to_channel_task = task_graph.add_task(
|
||||
func=pygeoprocessing.routing.distance_to_channel_mfd,
|
||||
args=(
|
||||
(f_reg['flow_direction_path'], 1), (f_reg['stream_path'], 1),
|
||||
(f_reg['flow_direction_path'], 1),
|
||||
(f_reg['stream_path'], 1),
|
||||
f_reg['dist_to_channel_path']),
|
||||
dependent_task_list=[stream_extraction_task],
|
||||
target_path_list=[f_reg['dist_to_channel_path']],
|
||||
|
@ -635,7 +639,8 @@ def execute(args):
|
|||
ndr_eff_task = task_graph.add_task(
|
||||
func=ndr_core.ndr_eff_calculation,
|
||||
args=(
|
||||
f_reg['flow_direction_path'], f_reg['stream_path'], eff_path,
|
||||
f_reg['flow_direction_path'],
|
||||
f_reg['stream_path'], eff_path,
|
||||
crit_len_path, effective_retention_path),
|
||||
target_path_list=[effective_retention_path],
|
||||
dependent_task_list=[
|
||||
|
@ -1286,7 +1291,7 @@ def _calculate_sub_ndr(
|
|||
|
||||
def _sub_ndr_op(dist_to_channel_array):
|
||||
"""Calculate subsurface NDR."""
|
||||
# nodata value from this ntermediate output should always be
|
||||
# nodata value from this intermediate output should always be
|
||||
# defined by pygeoprocessing, not None
|
||||
valid_mask = ~numpy.isclose(
|
||||
dist_to_channel_array, dist_to_channel_nodata)
|
||||
|
@ -1320,10 +1325,11 @@ def _calculate_export(
|
|||
"""Combine NDR and subsurface NDR."""
|
||||
# these intermediate outputs should always have defined nodata
|
||||
# values assigned by pygeoprocessing
|
||||
valid_mask = ~(numpy.isclose(modified_load_array, load_nodata) |
|
||||
numpy.isclose(ndr_array, ndr_nodata) |
|
||||
numpy.isclose(modified_sub_load_array, subsurface_load_nodata) |
|
||||
numpy.isclose(sub_ndr_array, sub_ndr_nodata))
|
||||
valid_mask = ~(
|
||||
numpy.isclose(modified_load_array, load_nodata) |
|
||||
numpy.isclose(ndr_array, ndr_nodata) |
|
||||
numpy.isclose(modified_sub_load_array, subsurface_load_nodata) |
|
||||
numpy.isclose(sub_ndr_array, sub_ndr_nodata))
|
||||
result = numpy.empty(valid_mask.shape, dtype=numpy.float32)
|
||||
result[:] = _TARGET_NODATA
|
||||
result[valid_mask] = (
|
||||
|
|
|
@ -10,10 +10,8 @@ The SDR method in this model is based on:
|
|||
import os
|
||||
import logging
|
||||
|
||||
from osgeo import gdal
|
||||
from osgeo import ogr
|
||||
from osgeo import gdal, ogr
|
||||
import numpy
|
||||
|
||||
import pygeoprocessing
|
||||
import pygeoprocessing.routing
|
||||
import taskgraph
|
||||
|
@ -179,7 +177,9 @@ _OUTPUT_BASE_FILES = {
|
|||
'stream_path': 'stream.tif',
|
||||
'usle_path': 'usle.tif',
|
||||
'watershed_results_sdr_path': 'watershed_results_sdr.shp',
|
||||
}
|
||||
}
|
||||
|
||||
INTERMEDIATE_DIR_NAME = 'intermediate_outputs'
|
||||
|
||||
_INTERMEDIATE_BASE_FILES = {
|
||||
'cp_factor_path': 'cp.tif',
|
||||
|
@ -187,7 +187,6 @@ _INTERMEDIATE_BASE_FILES = {
|
|||
'd_dn_path': 'd_dn.tif',
|
||||
'd_up_bare_soil_path': 'd_up_bare_soil.tif',
|
||||
'd_up_path': 'd_up.tif',
|
||||
'dem_offset_path': 'dem_offset.tif',
|
||||
'f_path': 'f.tif',
|
||||
'flow_accumulation_path': 'flow_accumulation.tif',
|
||||
'flow_direction_path': 'flow_direction.tif',
|
||||
|
@ -206,11 +205,10 @@ _INTERMEDIATE_BASE_FILES = {
|
|||
'w_accumulation_path': 'w_accumulation.tif',
|
||||
'w_bar_path': 'w_bar.tif',
|
||||
'w_path': 'w.tif',
|
||||
'ws_factor_path': 'ws_factor.tif',
|
||||
'ws_inverse_path': 'ws_inverse.tif',
|
||||
'e_prime_path': 'e_prime.tif',
|
||||
'weighted_avg_aspect_path': 'weighted_avg_aspect.tif'
|
||||
}
|
||||
}
|
||||
|
||||
_TMP_BASE_FILES = {
|
||||
'aligned_dem_path': 'aligned_dem.tif',
|
||||
|
@ -218,7 +216,7 @@ _TMP_BASE_FILES = {
|
|||
'aligned_erodibility_path': 'aligned_erodibility.tif',
|
||||
'aligned_erosivity_path': 'aligned_erosivity.tif',
|
||||
'aligned_lulc_path': 'aligned_lulc.tif',
|
||||
}
|
||||
}
|
||||
|
||||
# Target nodata is for general rasters that are positive, and _IC_NODATA are
|
||||
# for rasters that are any range
|
||||
|
@ -290,7 +288,7 @@ def execute(args):
|
|||
table_key, str(lulc_code), table[table_key]))
|
||||
|
||||
intermediate_output_dir = os.path.join(
|
||||
args['workspace_dir'], 'intermediate_outputs')
|
||||
args['workspace_dir'], INTERMEDIATE_DIR_NAME)
|
||||
output_dir = os.path.join(args['workspace_dir'])
|
||||
churn_dir = os.path.join(
|
||||
intermediate_output_dir, 'churn_dir_not_for_humans')
|
||||
|
@ -342,7 +340,7 @@ def execute(args):
|
|||
'base_vector_path_list': (args['watersheds_path'],),
|
||||
'raster_align_index': 0,
|
||||
'vector_mask_options': vector_mask_options,
|
||||
},
|
||||
},
|
||||
target_path_list=aligned_list,
|
||||
task_name='align input rasters')
|
||||
|
||||
|
@ -435,7 +433,8 @@ def execute(args):
|
|||
drainage_raster_path_task = (
|
||||
f_reg['stream_and_drainage_path'], drainage_task)
|
||||
else:
|
||||
drainage_raster_path_task = (f_reg['stream_path'], stream_task)
|
||||
drainage_raster_path_task = (
|
||||
f_reg['stream_path'], stream_task)
|
||||
|
||||
threshold_w_task = task_graph.add_task(
|
||||
func=_calculate_w,
|
||||
|
@ -1324,7 +1323,7 @@ def _generate_report(
|
|||
(sed_retention_path, 1), watershed_results_sdr_path),
|
||||
'sed_dep': pygeoprocessing.zonal_statistics(
|
||||
(sed_deposition_path, 1), watershed_results_sdr_path),
|
||||
}
|
||||
}
|
||||
|
||||
for field_name in field_summaries:
|
||||
field_def = ogr.FieldDefn(field_name, ogr.OFTReal)
|
||||
|
|
|
@ -356,31 +356,65 @@ cdef class _ManagedRaster:
|
|||
def calculate_sediment_deposition(
|
||||
mfd_flow_direction_path, e_prime_path, f_path, sdr_path,
|
||||
target_sediment_deposition_path):
|
||||
"""Calculate sediment deposition layer
|
||||
"""Calculate sediment deposition layer.
|
||||
|
||||
Args:
|
||||
mfd_flow_direction_path (string): a path to a raster with
|
||||
pygeoprocessing.routing MFD flow direction values.
|
||||
e_prime_path (string): path to a raster that shows sources of
|
||||
sediment that wash off a pixel but do not reach the stream.
|
||||
f_path (string): path to a raster that shows the sediment flux
|
||||
on a pixel for sediment that does not reach the stream.
|
||||
sdr_path (string): path to Sediment Delivery Ratio raster.
|
||||
target_sediment_deposition_path (string): path to created that
|
||||
shows where the E' sources end up across the landscape.
|
||||
This algorithm outputs both sediment deposition (r_i) and flux (f_i)::
|
||||
|
||||
Returns:
|
||||
None.
|
||||
r_i = dr_i * (sum over j ∈ J of f_j * p(i,j)) + E'_i
|
||||
|
||||
f_i = (1 - dr_i) * (sum over j ∈ J of f_j * p(i,j)) + E'_i
|
||||
|
||||
|
||||
(sum over k ∈ K of SDR_k * p(i,k)) - SDR_i
|
||||
dr_i = --------------------------------------------
|
||||
(1 - SDR_i)
|
||||
|
||||
where:
|
||||
|
||||
- ``p(i,j)`` is the proportion of flow from pixel ``i`` into pixel ``j``
|
||||
- ``J`` is the set of pixels that are immediate upstream neighbors of
|
||||
pixel ``i``
|
||||
- ``K`` is the set of pixels that are immediate downstream neighbors of
|
||||
pixel ``i``
|
||||
- ``E'`` is ``USLE * (1 - SDR)``, the amount of sediment loss from pixel
|
||||
``i`` that doesn't reach a stream (``e_prime_path``)
|
||||
- ``SDR`` is the sediment delivery ratio (``sdr_path``)
|
||||
|
||||
``f_i`` is recursively defined in terms of ``i``'s upstream neighbors.
|
||||
The algorithm begins from seed pixels that are local high points and so
|
||||
have no upstream neighbors. It works downstream from each seed pixel,
|
||||
only adding a pixel to the stack when all its upstream neighbors are
|
||||
already calculated.
|
||||
|
||||
Note that this function is designed to be used in the context of the SDR
|
||||
model. Because the algorithm is recursive upstream and downstream of each
|
||||
pixel, nodata values in the SDR input would propagate along the flow path.
|
||||
This case is not handled because we assume the SDR and flow dir inputs
|
||||
will come from the SDR model and have nodata in the same places.
|
||||
|
||||
Args:
|
||||
mfd_flow_direction_path (string): a path to a raster with
|
||||
pygeoprocessing.routing MFD flow direction values.
|
||||
e_prime_path (string): path to a raster that shows sources of
|
||||
sediment that wash off a pixel but do not reach the stream.
|
||||
f_path (string): path to a raster that shows the sediment flux
|
||||
on a pixel for sediment that does not reach the stream.
|
||||
sdr_path (string): path to Sediment Delivery Ratio raster.
|
||||
target_sediment_deposition_path (string): path to created that
|
||||
shows where the E' sources end up across the landscape.
|
||||
|
||||
Returns:
|
||||
None.
|
||||
|
||||
"""
|
||||
LOGGER.info('Calculate sediment deposition')
|
||||
cdef float sediment_deposition_nodata = -1.0
|
||||
cdef float target_nodata = -1
|
||||
pygeoprocessing.new_raster_from_base(
|
||||
mfd_flow_direction_path, target_sediment_deposition_path,
|
||||
gdal.GDT_Float32, [sediment_deposition_nodata])
|
||||
gdal.GDT_Float32, [target_nodata])
|
||||
pygeoprocessing.new_raster_from_base(
|
||||
mfd_flow_direction_path, f_path,
|
||||
gdal.GDT_Float32, [sediment_deposition_nodata])
|
||||
gdal.GDT_Float32, [target_nodata])
|
||||
|
||||
cdef _ManagedRaster mfd_flow_direction_raster = _ManagedRaster(
|
||||
mfd_flow_direction_path, 1, False)
|
||||
|
@ -391,11 +425,19 @@ def calculate_sediment_deposition(
|
|||
cdef _ManagedRaster sediment_deposition_raster = _ManagedRaster(
|
||||
target_sediment_deposition_path, 1, True)
|
||||
|
||||
# given the pixel neighbor numbering system
|
||||
# 3 2 1
|
||||
# 4 x 0
|
||||
# 5 6 7
|
||||
# if a pixel `x` has a neighbor `n` in position `i`,
|
||||
# then `n`'s neighbor in position `inflow_offsets[i]`
|
||||
# is the original pixel `x`
|
||||
cdef int *inflow_offsets = [4, 5, 6, 7, 0, 1, 2, 3]
|
||||
|
||||
cdef int n_cols, n_rows
|
||||
flow_dir_info = pygeoprocessing.get_raster_info(mfd_flow_direction_path)
|
||||
n_cols, n_rows = flow_dir_info['raster_size']
|
||||
cdef int mfd_nodata = 0
|
||||
cdef stack[int] processing_stack
|
||||
cdef float sdr_nodata = pygeoprocessing.get_raster_info(
|
||||
sdr_path)['nodata'][0]
|
||||
|
@ -419,49 +461,58 @@ def calculate_sediment_deposition(
|
|||
xoff = offset_dict['xoff']
|
||||
yoff = offset_dict['yoff']
|
||||
|
||||
LOGGER.info('Sediment deposition %.2f%% complete', 100.0 * (
|
||||
LOGGER.info('Sediment deposition %.2f%% complete', 100 * (
|
||||
(xoff * yoff) / float(n_cols*n_rows)))
|
||||
|
||||
for row_index in range(win_ysize):
|
||||
seed_row = yoff + row_index
|
||||
for col_index in range(win_xsize):
|
||||
seed_col = xoff + col_index
|
||||
# search to see if this is a good seed
|
||||
if mfd_flow_direction_raster.get(seed_col, seed_row) == 0:
|
||||
# check if this is a good seed pixel ( a local high point)
|
||||
if mfd_flow_direction_raster.get(seed_col, seed_row) == mfd_nodata:
|
||||
continue
|
||||
seed_pixel = 1
|
||||
# iterate over each of the pixel's neighbors
|
||||
for j in range(8):
|
||||
# skip if the neighbor is outside the raster bounds
|
||||
neighbor_row = seed_row + ROW_OFFSETS[j]
|
||||
if neighbor_row < 0 or neighbor_row >= n_rows:
|
||||
continue
|
||||
neighbor_col = seed_col + COL_OFFSETS[j]
|
||||
if neighbor_col < 0 or neighbor_col >= n_cols:
|
||||
continue
|
||||
# skip if the neighbor's flow direction is undefined
|
||||
neighbor_flow_val = <int>mfd_flow_direction_raster.get(
|
||||
neighbor_col, neighbor_row)
|
||||
if neighbor_flow_val == 0:
|
||||
if neighbor_flow_val == mfd_nodata:
|
||||
continue
|
||||
# if the neighbor flows into it, it's not a local high
|
||||
# point and so can't be a seed pixel
|
||||
neighbor_flow_weight = (
|
||||
neighbor_flow_val >> (inflow_offsets[j]*4)) & 0xF
|
||||
if neighbor_flow_weight > 0:
|
||||
# neighbor flows in, not a seed
|
||||
seed_pixel = 0
|
||||
seed_pixel = 0 # neighbor flows in, not a seed
|
||||
break
|
||||
if seed_pixel and (
|
||||
sediment_deposition_raster.get(
|
||||
seed_col, seed_row) ==
|
||||
sediment_deposition_nodata):
|
||||
|
||||
# if this can be a seed pixel and hasn't already been
|
||||
# calculated, put it on the stack
|
||||
if seed_pixel and sediment_deposition_raster.get(
|
||||
seed_col, seed_row) == target_nodata:
|
||||
processing_stack.push(seed_row * n_cols + seed_col)
|
||||
|
||||
while processing_stack.size() > 0:
|
||||
# loop invariant: cell has all upstream neighbors
|
||||
# processed
|
||||
# processed. this is true for seed pixels because they
|
||||
# have no upstream neighbors.
|
||||
flat_index = processing_stack.top()
|
||||
processing_stack.pop()
|
||||
global_row = flat_index // n_cols
|
||||
global_col = flat_index % n_cols
|
||||
|
||||
# calculate the upstream Fj contribution to this pixel
|
||||
# (sum over j ∈ J of f_j * p(i,j) in the equation for r_i)
|
||||
# calculate the upstream f_j contribution to this pixel,
|
||||
# the weighted sum of flux flowing onto this pixel from
|
||||
# all neighbors
|
||||
f_j_weighted_sum = 0
|
||||
for j in range(8):
|
||||
neighbor_row = global_row + ROW_OFFSETS[j]
|
||||
|
@ -471,7 +522,8 @@ def calculate_sediment_deposition(
|
|||
if neighbor_col < 0 or neighbor_col >= n_cols:
|
||||
continue
|
||||
|
||||
# see if there's an inflow
|
||||
# see if there's an inflow from the neighbor to the
|
||||
# pixel
|
||||
neighbor_flow_val = (
|
||||
<int>mfd_flow_direction_raster.get(
|
||||
neighbor_col, neighbor_row))
|
||||
|
@ -479,54 +531,72 @@ def calculate_sediment_deposition(
|
|||
neighbor_flow_val >> (inflow_offsets[j]*4)) & 0xF
|
||||
if neighbor_flow_weight > 0:
|
||||
f_j = f_raster.get(neighbor_col, neighbor_row)
|
||||
if f_j == target_nodata:
|
||||
continue
|
||||
# sum up the neighbor's flow dir values in each
|
||||
# direction.
|
||||
# flow dir values are relative to the total
|
||||
neighbor_flow_sum = 0
|
||||
for k in range(8):
|
||||
neighbor_flow_sum += (
|
||||
neighbor_flow_val >> (k*4)) & 0xF
|
||||
# get the proportion of the neighbor's flow that
|
||||
# flows into the original pixel
|
||||
p_val = neighbor_flow_weight / neighbor_flow_sum
|
||||
# add the neighbor's flux value, weighted by the
|
||||
# flow proportion
|
||||
f_j_weighted_sum += p_val * f_j
|
||||
|
||||
# calculate the differential downstream change in sdr
|
||||
# from this pixel
|
||||
downstream_sdr_weighted_sum = 0.0
|
||||
# calculate sum of SDR values of immediate downstream
|
||||
# neighbors, weighted by proportion of flow into each
|
||||
# neighbor
|
||||
# (sum over k ∈ K of SDR_k * p(i,k) in the equation above)
|
||||
downstream_sdr_weighted_sum = 0
|
||||
flow_val = <int>mfd_flow_direction_raster.get(
|
||||
global_col, global_row)
|
||||
flow_sum = 0.0
|
||||
flow_sum = 0
|
||||
for k in range(8):
|
||||
flow_sum += (flow_val >> (k*4)) & 0xF
|
||||
|
||||
# iterate over the neighbors again
|
||||
for j in range(8):
|
||||
# skip if neighbor is outside the raster boundaries
|
||||
neighbor_row = global_row + ROW_OFFSETS[j]
|
||||
if neighbor_row < 0 or neighbor_row >= n_rows:
|
||||
continue
|
||||
neighbor_col = global_col + COL_OFFSETS[j]
|
||||
if neighbor_col < 0 or neighbor_col >= n_cols:
|
||||
continue
|
||||
# if this direction flows out, add to weighted sum
|
||||
# if it is a downstream neighbor, add to the sum and
|
||||
# check if it can be pushed onto the stack yet
|
||||
flow_weight = (flow_val >> (j*4)) & 0xF
|
||||
if flow_weight > 0:
|
||||
sdr_j = sdr_raster.get(neighbor_col, neighbor_row)
|
||||
if sdr_j == 0.0:
|
||||
if sdr_j == sdr_nodata:
|
||||
continue
|
||||
if sdr_j == 0:
|
||||
# this means it's a stream, for SDR deposition
|
||||
# purposes, we set sdr to 1 to indicate this
|
||||
# is the last step on which to retain sediment
|
||||
sdr_j = 1.0
|
||||
if sdr_j == sdr_nodata:
|
||||
sdr_j = 0.0
|
||||
sdr_j = 1
|
||||
p_j = flow_weight / flow_sum
|
||||
downstream_sdr_weighted_sum += sdr_j * p_j
|
||||
|
||||
# check if we can add neighbor j to the stack yet
|
||||
#
|
||||
# if there is a downstream neighbor it
|
||||
# couldn't have been pushed on the processing
|
||||
# stack yet, because the upstream was just
|
||||
# completed
|
||||
upstream_neighbors_processed = 1
|
||||
# iterate over each neighbor-of-neighbor
|
||||
for k in range(8):
|
||||
# no need to push the one we're currently
|
||||
# calculating back onto the stack
|
||||
if inflow_offsets[k] == j:
|
||||
# we don't need to process the one
|
||||
# we're currently calculating
|
||||
continue
|
||||
# see if there's an inflow
|
||||
# skip if neighbor-of-neighbor is outside
|
||||
# raster bounds
|
||||
ds_neighbor_row = (
|
||||
neighbor_row + ROW_OFFSETS[k])
|
||||
if ds_neighbor_row < 0 or ds_neighbor_row >= n_rows:
|
||||
|
@ -535,40 +605,46 @@ def calculate_sediment_deposition(
|
|||
neighbor_col + COL_OFFSETS[k])
|
||||
if ds_neighbor_col < 0 or ds_neighbor_col >= n_cols:
|
||||
continue
|
||||
# if any upstream neighbor of j hasn't been
|
||||
# calculated, we can't push j onto the stack
|
||||
# yet
|
||||
ds_neighbor_flow_val = (
|
||||
<int>mfd_flow_direction_raster.get(
|
||||
ds_neighbor_col, ds_neighbor_row))
|
||||
if (ds_neighbor_flow_val >> (
|
||||
inflow_offsets[k]*4)) & 0xF > 0:
|
||||
if sediment_deposition_raster.get(
|
||||
ds_neighbor_col,
|
||||
ds_neighbor_row) == (
|
||||
sediment_deposition_nodata):
|
||||
# can't push it because not
|
||||
# processed yet
|
||||
if (sediment_deposition_raster.get(
|
||||
ds_neighbor_col, ds_neighbor_row) ==
|
||||
target_nodata):
|
||||
upstream_neighbors_processed = 0
|
||||
break
|
||||
# if all upstream neighbors of neighbor j are
|
||||
# processed, we can push j onto the stack.
|
||||
if upstream_neighbors_processed:
|
||||
processing_stack.push(
|
||||
neighbor_row * n_cols +
|
||||
neighbor_col)
|
||||
|
||||
# nodata pixels should propagate to the results
|
||||
sdr_i = sdr_raster.get(global_col, global_row)
|
||||
if sdr_i == sdr_nodata:
|
||||
sdr_i = 0.0
|
||||
continue
|
||||
e_prime_i = e_prime_raster.get(global_col, global_row)
|
||||
if e_prime_i == e_prime_nodata:
|
||||
e_prime_i = 0.0
|
||||
continue
|
||||
|
||||
if downstream_sdr_weighted_sum < sdr_i:
|
||||
# i think this happens because of our low resolution
|
||||
# flow direction, it's okay to zero out.
|
||||
downstream_sdr_weighted_sum = sdr_i
|
||||
d_ri = (downstream_sdr_weighted_sum - sdr_i) / (1 - sdr_i)
|
||||
r_i = d_ri * (e_prime_i + f_j_weighted_sum)
|
||||
f_i = (1-d_ri) * (e_prime_i + f_j_weighted_sum)
|
||||
sediment_deposition_raster.set(
|
||||
global_col, global_row, r_i)
|
||||
|
||||
# these correspond to the full equations for
|
||||
# dr_i, r_i, and f_i given in the docstring
|
||||
dr_i = (downstream_sdr_weighted_sum - sdr_i) / (1 - sdr_i)
|
||||
r_i = dr_i * (e_prime_i + f_j_weighted_sum)
|
||||
f_i = (1 - dr_i) * (e_prime_i + f_j_weighted_sum)
|
||||
|
||||
sediment_deposition_raster.set(global_col, global_row, r_i)
|
||||
f_raster.set(global_col, global_row, f_i)
|
||||
|
||||
LOGGER.info('Sediment deposition 100% complete')
|
||||
|
@ -576,7 +652,7 @@ def calculate_sediment_deposition(
|
|||
|
||||
|
||||
def calculate_average_aspect(
|
||||
mfd_flow_direction_path, target_average_aspect_path):
|
||||
mfd_flow_direction_path, target_average_aspect_path):
|
||||
"""Calculate the Weighted Average Aspect Ratio from MFD.
|
||||
|
||||
Calculates the average aspect ratio weighted by proportional flow
|
||||
|
@ -594,7 +670,7 @@ def calculate_average_aspect(
|
|||
"""
|
||||
LOGGER.info('Calculating average aspect')
|
||||
|
||||
cdef float average_aspect_nodata = -1.0
|
||||
cdef float average_aspect_nodata = -1
|
||||
pygeoprocessing.new_raster_from_base(
|
||||
mfd_flow_direction_path, target_average_aspect_path,
|
||||
gdal.GDT_Float32, [average_aspect_nodata], [average_aspect_nodata])
|
||||
|
@ -624,10 +700,10 @@ def calculate_average_aspect(
|
|||
# the flow_lengths array is the functional equivalent
|
||||
# of calculating |sin(alpha)| + |cos(alpha)|.
|
||||
cdef float* flow_lengths = [
|
||||
1.0, <float>SQRT2,
|
||||
1.0, <float>SQRT2,
|
||||
1.0, <float>SQRT2,
|
||||
1.0, <float>SQRT2
|
||||
1, <float>SQRT2,
|
||||
1, <float>SQRT2,
|
||||
1, <float>SQRT2,
|
||||
1, <float>SQRT2
|
||||
]
|
||||
|
||||
# Loop over iterblocks to maintain cache locality
|
||||
|
@ -641,7 +717,7 @@ def calculate_average_aspect(
|
|||
xoff = offset_dict['xoff']
|
||||
yoff = offset_dict['yoff']
|
||||
|
||||
LOGGER.info('Average aspect %.2f%% complete', 100.0 * (
|
||||
LOGGER.info('Average aspect %.2f%% complete', 100 * (
|
||||
n_pixels_visited / float(n_cols * n_rows)))
|
||||
|
||||
for row_index in range(win_ysize):
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -15,7 +15,7 @@ class Executor(QtCore.QObject, threading.Thread):
|
|||
|
||||
finished = QtCore.Signal()
|
||||
|
||||
def __init__(self, target, args=None, kwargs=None):
|
||||
def __init__(self, target, args=None, kwargs=None, log_events=True):
|
||||
"""Initialize the Executor object.
|
||||
|
||||
Args:
|
||||
|
@ -27,6 +27,9 @@ class Executor(QtCore.QObject, threading.Thread):
|
|||
kwargs (dict): A dict mapping string parameter names to parameter
|
||||
values that should be passed to ``target``. If ``None``,
|
||||
keyword arguments will be ignored.
|
||||
log_events=True (bool): If ``True``, exceptions raised when calling
|
||||
``target`` as well as completion of ``target`` will
|
||||
be logged.
|
||||
|
||||
Returns:
|
||||
``None``.
|
||||
|
@ -34,6 +37,7 @@ class Executor(QtCore.QObject, threading.Thread):
|
|||
QtCore.QObject.__init__(self)
|
||||
threading.Thread.__init__(self)
|
||||
self.target = target
|
||||
self.log_events = log_events
|
||||
|
||||
if args is None:
|
||||
args = ()
|
||||
|
@ -75,10 +79,17 @@ class Executor(QtCore.QObject, threading.Thread):
|
|||
# there was something else very strange going on.
|
||||
# Failed build:
|
||||
# http://builds.naturalcapitalproject.org/job/test-natcap.invest.ui/100/
|
||||
LOGGER.exception('Target %s failed with exception', self.target)
|
||||
|
||||
# When we're running a model, the exception is logged via
|
||||
# utils.prepare_workspace. But this thread is also used in the Qt
|
||||
# interface by validation and the datastack archive creation
|
||||
# function, and we for sure want to log exceptions in that case.
|
||||
if self.log_events:
|
||||
LOGGER.exception('Target %s failed with exception', self.target)
|
||||
self.failed = True
|
||||
self.exception = error
|
||||
self.traceback = traceback.format_exc()
|
||||
finally:
|
||||
LOGGER.info('Execution finished')
|
||||
if self.log_events:
|
||||
LOGGER.info('Execution finished')
|
||||
self.finished.emit()
|
||||
|
|
|
@ -2497,9 +2497,13 @@ class Form(QtWidgets.QWidget):
|
|||
if not hasattr(target, '__call__'):
|
||||
raise ValueError('Target %s must be callable' % target)
|
||||
|
||||
# Don't need to log an exception or completion in the Executor thread
|
||||
# in the case of a model run; those messages are handled by
|
||||
# utils.prepare_workspace.
|
||||
self._thread = execution.Executor(target,
|
||||
args,
|
||||
kwargs)
|
||||
kwargs,
|
||||
log_events=False)
|
||||
self._thread.finished.connect(self._run_finished)
|
||||
|
||||
self.run_dialog.show()
|
||||
|
|
|
@ -692,8 +692,8 @@ class WindowTitle(QtCore.QObject):
|
|||
# that handle __setattr__ differently. In 3.7, the super()
|
||||
# implementation causes a `TypeError: can't apply this __setattr__
|
||||
# to object object` when running `invest run carbon`. In Python 3.8.4+
|
||||
# with the object implementation the error `TypeError: can't apply
|
||||
# this __setattr__ to Carbon object`
|
||||
# with the object implementation the error `TypeError: can't apply
|
||||
# this __setattr__ to Carbon object`
|
||||
if (sys.version_info.minor >= 8) and (sys.version_info.micro >= 4):
|
||||
super().__setattr__(name, value)
|
||||
else:
|
||||
|
@ -1477,8 +1477,8 @@ class InVESTModel(QtWidgets.QMainWindow):
|
|||
# that handle __setattr__ differently. In 3.7, the super()
|
||||
# implementation causes a `TypeError: can't apply this __setattr__
|
||||
# to object object` when running `invest run carbon`. In Python 3.8.4+
|
||||
# with the object implementation the error `TypeError: can't apply
|
||||
# this __setattr__ to Carbon object`
|
||||
# with the object implementation the error `TypeError: can't apply
|
||||
# this __setattr__ to Carbon object`
|
||||
if (sys.version_info.minor >= 8) and (sys.version_info.micro >= 4):
|
||||
super().__setattr__(name, value)
|
||||
else:
|
||||
|
@ -1645,15 +1645,9 @@ class InVESTModel(QtWidgets.QMainWindow):
|
|||
'Starting model with parameters: \n%s',
|
||||
datastack.format_args_dict(
|
||||
args, self.target.__module__))
|
||||
|
||||
try:
|
||||
return self.target(args=args)
|
||||
except Exception:
|
||||
LOGGER.exception('Exception while executing %s',
|
||||
self.target)
|
||||
raise
|
||||
finally:
|
||||
LOGGER.info('Execution finished')
|
||||
# Exceptions will be captured and logged in
|
||||
# utils.prepare_workspace.
|
||||
return self.target(args=args)
|
||||
|
||||
self.form.run(target=_logged_target,
|
||||
window_title='Running %s' % self.label,
|
||||
|
@ -1853,7 +1847,7 @@ class InVESTModel(QtWidgets.QMainWindow):
|
|||
def _validate(new_value):
|
||||
# We want to validate the whole form; discard the individual value
|
||||
self.validate(block=False)
|
||||
|
||||
|
||||
# Set up quickrun options if we're doing a quickrun
|
||||
if quickrun:
|
||||
@QtCore.Slot()
|
||||
|
|
|
@ -149,6 +149,7 @@ class SDR(model.InVESTModel):
|
|||
self.k_param.args_key: self.k_param.value(),
|
||||
self.ic_0_param.args_key: self.ic_0_param.value(),
|
||||
self.sdr_max.args_key: self.sdr_max.value(),
|
||||
self.l_max.args_key: self.l_max.value(),
|
||||
}
|
||||
|
||||
return args
|
||||
|
|
|
@ -0,0 +1,100 @@
|
|||
from natcap.invest.ui import model, inputs
|
||||
import natcap.invest.stormwater
|
||||
|
||||
|
||||
class Stormwater(model.InVESTModel):
|
||||
def __init__(self):
|
||||
model.InVESTModel.__init__(
|
||||
self,
|
||||
label='InVEST Stormwater Model',
|
||||
target=natcap.invest.stormwater.execute,
|
||||
validator=natcap.invest.stormwater.validate,
|
||||
localdoc='stormwater.html')
|
||||
|
||||
self.lulc_path = inputs.File(
|
||||
args_key='lulc_path',
|
||||
helptext=(
|
||||
"A GDAL-supported raster representing land-cover."),
|
||||
label='Land Use/Land Cover',
|
||||
validator=self.validator)
|
||||
self.add_input(self.lulc_path)
|
||||
self.soil_group_path = inputs.File(
|
||||
args_key='soil_group_path',
|
||||
helptext=(
|
||||
"A GDAL-supported raster representing hydrologic soil groups."),
|
||||
label='Soil Groups',
|
||||
validator=self.validator)
|
||||
self.add_input(self.soil_group_path)
|
||||
self.precipitation_path = inputs.File(
|
||||
args_key='precipitation_path',
|
||||
helptext=(
|
||||
"A GDAL-supported raster showing annual precipitation amounts"),
|
||||
label='Precipitation',
|
||||
validator=self.validator)
|
||||
self.add_input(self.precipitation_path)
|
||||
self.biophysical_table = inputs.File(
|
||||
args_key='biophysical_table',
|
||||
helptext=(
|
||||
"A CSV file with runoff coefficient (RC), infiltration "
|
||||
"coefficient (IR), and pollutant event mean concentration "
|
||||
"(EMC) data for each LULC code."),
|
||||
label='Biophysical Table',
|
||||
validator=self.validator)
|
||||
self.add_input(self.biophysical_table)
|
||||
self.adjust_retention_ratios = inputs.Checkbox(
|
||||
args_key='adjust_retention_ratios',
|
||||
helptext=(
|
||||
'If checked, adjust retention ratios using road centerlines.'),
|
||||
label='Adjust Retention Ratios')
|
||||
self.add_input(self.adjust_retention_ratios)
|
||||
self.retention_radius = inputs.Text(
|
||||
args_key='retention_radius',
|
||||
helptext=('Radius within which to adjust retention ratios'),
|
||||
label='Retention Radius',
|
||||
validator=self.validator)
|
||||
self.add_input(self.retention_radius)
|
||||
self.road_centerlines_path = inputs.File(
|
||||
args_key='road_centerlines_path',
|
||||
helptext=('Polyline vector representing centerlines of roads'),
|
||||
label='Road Centerlines',
|
||||
validator=self.validator)
|
||||
self.add_input(self.road_centerlines_path)
|
||||
self.aggregate_areas_path = inputs.File(
|
||||
args_key='aggregate_areas_path',
|
||||
helptext=(
|
||||
'Polygon vector outlining area(s) of interest by which to '
|
||||
'aggregate results (typically watersheds or sewersheds).'),
|
||||
label='Aggregate Areas',
|
||||
validator=self.validator)
|
||||
self.add_input(self.aggregate_areas_path)
|
||||
self.replacement_cost = inputs.Text(
|
||||
args_key='replacement_cost',
|
||||
helptext=('Replacement cost of retention per cubic meter'),
|
||||
label='Replacement Cost',
|
||||
validator=self.validator)
|
||||
self.add_input(self.replacement_cost)
|
||||
|
||||
# retention_radius is active when adjust_retention_ratios is checked
|
||||
self.adjust_retention_ratios.sufficiency_changed.connect(
|
||||
self.retention_radius.set_interactive)
|
||||
self.adjust_retention_ratios.sufficiency_changed.connect(
|
||||
self.road_centerlines_path.set_interactive)
|
||||
|
||||
|
||||
def assemble_args(self):
|
||||
args = {
|
||||
self.workspace.args_key: self.workspace.value(),
|
||||
self.suffix.args_key: self.suffix.value(),
|
||||
self.lulc_path.args_key: self.lulc_path.value(),
|
||||
self.soil_group_path.args_key: self.soil_group_path.value(),
|
||||
self.precipitation_path.args_key: self.precipitation_path.value(),
|
||||
self.biophysical_table.args_key: self.biophysical_table.value(),
|
||||
self.adjust_retention_ratios.args_key: self.adjust_retention_ratios.value(),
|
||||
self.retention_radius.args_key: self.retention_radius.value(),
|
||||
self.road_centerlines_path.args_key: self.road_centerlines_path.value(),
|
||||
self.aggregate_areas_path.args_key: self.aggregate_areas_path.value(),
|
||||
self.replacement_cost.args_key: self.replacement_cost.value(),
|
||||
}
|
||||
|
||||
return args
|
||||
|
|
@ -115,12 +115,17 @@ def _calculate_args_bounding_box(args, args_spec):
|
|||
# Using gdal.OpenEx to check if an input is spatial caused the
|
||||
# model to hang sometimes (possible race condition), so only
|
||||
# get the bounding box of inputs that are known to be spatial.
|
||||
# Also eliminate any string paths that are empty to prevent an
|
||||
# exception. By the time we've made it to this function, all paths
|
||||
# should already have been validated so the path is either valid or
|
||||
# blank.
|
||||
spatial_info = None
|
||||
if args_spec['args'][key]['type'] == 'raster':
|
||||
if args_spec['args'][key]['type'] == 'raster' and value.strip() != '':
|
||||
spatial_info = pygeoprocessing.get_raster_info(value)
|
||||
elif args_spec['args'][key]['type'] == 'vector':
|
||||
elif (args_spec['args'][key]['type'] == 'vector'
|
||||
and value.strip() != ''):
|
||||
spatial_info = pygeoprocessing.get_vector_info(value)
|
||||
|
||||
|
||||
if spatial_info:
|
||||
local_bb = spatial_info['bounding_box']
|
||||
projection_wkt = spatial_info['projection_wkt']
|
||||
|
@ -181,7 +186,7 @@ def _log_exit_status(session_id, status):
|
|||
log_finish_url = json.loads(urlopen(
|
||||
_ENDPOINTS_INDEX_URL).read().strip())['FINISH']
|
||||
|
||||
# The data must be a python string of bytes. This will be ``bytes``
|
||||
# The data must be a python string of bytes. This will be ``bytes``
|
||||
# in python3.
|
||||
urlopen(Request(log_finish_url, urlencode(payload).encode('utf-8')))
|
||||
except Exception as exception:
|
||||
|
@ -236,7 +241,7 @@ def _log_model(model_name, model_args, session_id=None):
|
|||
log_start_url = json.loads(urlopen(
|
||||
_ENDPOINTS_INDEX_URL).read().strip())['START']
|
||||
|
||||
# The data must be a python string of bytes. This will be ``bytes``
|
||||
# The data must be a python string of bytes. This will be ``bytes``
|
||||
# in python3.
|
||||
urlopen(Request(log_start_url, urlencode(payload).encode('utf-8')))
|
||||
except Exception as exception:
|
||||
|
|
|
@ -529,7 +529,7 @@ def execute(args):
|
|||
(green_area_sum_raster_path, 1),
|
||||
(cc_park_raster_path, 1),
|
||||
(green_area_threshold, 'raw'),
|
||||
], hm_op, hm_raster_path, gdal.GDT_Float32, TARGET_NODATA),
|
||||
], hm_op, hm_raster_path, gdal.GDT_Float32, TARGET_NODATA),
|
||||
target_path_list=[hm_raster_path],
|
||||
dependent_task_list=[cc_task, green_area_sum_task, cc_park_task],
|
||||
task_name='calculate HM index')
|
||||
|
@ -844,7 +844,8 @@ def calculate_uhi_result_vector(
|
|||
LOGGER.info('Parsing building footprint geometry')
|
||||
building_shapely_polygon_lookup = dict(
|
||||
(poly_feat.GetFID(),
|
||||
shapely.wkb.loads(poly_feat.GetGeometryRef().ExportToWkb()))
|
||||
shapely.wkb.loads(
|
||||
bytes(poly_feat.GetGeometryRef().ExportToWkb())))
|
||||
for poly_feat in energy_consumption_layer)
|
||||
|
||||
LOGGER.info("Constructing building footprint spatial index")
|
||||
|
@ -897,7 +898,7 @@ def calculate_uhi_result_vector(
|
|||
if energy_consumption_vector_path:
|
||||
aoi_geometry = feature.GetGeometryRef()
|
||||
aoi_shapely_geometry = shapely.wkb.loads(
|
||||
aoi_geometry.ExportToWkb())
|
||||
bytes(aoi_geometry.ExportToWkb()))
|
||||
aoi_shapely_geometry_prep = shapely.prepared.prep(
|
||||
aoi_shapely_geometry)
|
||||
avd_eng_cn = 0.0
|
||||
|
|
|
@ -566,7 +566,7 @@ def _calculate_damage_to_infrastructure_in_aoi(
|
|||
continue
|
||||
|
||||
shapely_geometry = shapely.wkb.loads(
|
||||
infrastructure_geometry.ExportToWkb())
|
||||
bytes(infrastructure_geometry.ExportToWkb()))
|
||||
|
||||
structures_index.insert(
|
||||
infrastructure_feature.GetFID(), shapely_geometry.bounds)
|
||||
|
@ -577,7 +577,8 @@ def _calculate_damage_to_infrastructure_in_aoi(
|
|||
aoi_damage = {}
|
||||
for aoi_feature in aoi_layer:
|
||||
aoi_geometry = aoi_feature.GetGeometryRef()
|
||||
aoi_geometry_shapely = shapely.wkb.loads(aoi_geometry.ExportToWkb())
|
||||
aoi_geometry_shapely = shapely.wkb.loads(
|
||||
bytes(aoi_geometry.ExportToWkb()))
|
||||
aoi_geometry_prep = shapely.prepared.prep(aoi_geometry_shapely)
|
||||
|
||||
total_damage = 0.0
|
||||
|
@ -586,7 +587,7 @@ def _calculate_damage_to_infrastructure_in_aoi(
|
|||
infrastructure_feature = infrastructure_layer.GetFeature(
|
||||
infrastructure_fid)
|
||||
infrastructure_geometry = shapely.wkb.loads(
|
||||
infrastructure_feature.GetGeometryRef().ExportToWkb())
|
||||
bytes(infrastructure_feature.GetGeometryRef().ExportToWkb()))
|
||||
if aoi_geometry_prep.intersects(infrastructure_geometry):
|
||||
intersection_geometry = aoi_geometry_shapely.intersection(
|
||||
infrastructure_geometry)
|
||||
|
|
|
@ -18,6 +18,7 @@ import pygeoprocessing
|
|||
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
_OSGEO_LOGGER = logging.getLogger('osgeo')
|
||||
LOG_FMT = (
|
||||
"%(asctime)s "
|
||||
"(%(name)s) "
|
||||
|
@ -44,6 +45,60 @@ GDAL_ERROR_LEVELS = {
|
|||
DEFAULT_OSR_AXIS_MAPPING_STRATEGY = osr.OAMS_TRADITIONAL_GIS_ORDER
|
||||
|
||||
|
||||
def _log_gdal_errors(*args, **kwargs):
|
||||
"""Log error messages to osgeo.
|
||||
|
||||
All error messages are logged with reasonable ``logging`` levels based
|
||||
on the GDAL error level.
|
||||
|
||||
Note:
|
||||
This function is designed to accept any number of positional and
|
||||
keyword arguments because of some odd forums questions where this
|
||||
function was being called with an unexpected number of arguments.
|
||||
With this catch-all function signature, we can at least guarantee
|
||||
that in the off chance this function is called with the wrong
|
||||
parameters, we can at least log what happened. See the issue at
|
||||
https://github.com/natcap/invest/issues/630 for details.
|
||||
|
||||
Args:
|
||||
err_level (int): The GDAL error level (e.g. ``gdal.CE_Failure``)
|
||||
err_no (int): The GDAL error number. For a full listing of error
|
||||
codes, see: http://www.gdal.org/cpl__error_8h.html
|
||||
err_msg (string): The error string.
|
||||
|
||||
Returns:
|
||||
``None``
|
||||
"""
|
||||
if len(args) + len(kwargs) != 3:
|
||||
LOGGER.error(
|
||||
'_log_gdal_errors was called with an incorrect number of '
|
||||
f'arguments. args: {args}, kwargs: {kwargs}')
|
||||
|
||||
try:
|
||||
gdal_args = {}
|
||||
for index, key in enumerate(('err_level', 'err_no', 'err_msg')):
|
||||
try:
|
||||
parameter = args[index]
|
||||
except IndexError:
|
||||
parameter = kwargs[key]
|
||||
gdal_args[key] = parameter
|
||||
except KeyError as missing_key:
|
||||
LOGGER.exception(
|
||||
f'_log_gdal_errors called without the argument {missing_key}. '
|
||||
f'Called with args: {args}, kwargs: {kwargs}')
|
||||
|
||||
# Returning from the function because we don't have enough
|
||||
# information to call the ``osgeo_logger`` in the way we intended.
|
||||
return
|
||||
|
||||
err_level = gdal_args['err_level']
|
||||
err_no = gdal_args['err_no']
|
||||
err_msg = gdal_args['err_msg'].replace('\n', '')
|
||||
_OSGEO_LOGGER.log(
|
||||
level=GDAL_ERROR_LEVELS[err_level],
|
||||
msg=f'[errno {err_no}] {err_msg}')
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def capture_gdal_logging():
|
||||
"""Context manager for logging GDAL errors with python logging.
|
||||
|
@ -58,28 +113,6 @@ def capture_gdal_logging():
|
|||
Returns:
|
||||
``None``
|
||||
"""
|
||||
osgeo_logger = logging.getLogger('osgeo')
|
||||
|
||||
def _log_gdal_errors(err_level, err_no, err_msg):
|
||||
"""Log error messages to osgeo.
|
||||
|
||||
All error messages are logged with reasonable ``logging`` levels based
|
||||
on the GDAL error level.
|
||||
|
||||
Args:
|
||||
err_level (int): The GDAL error level (e.g. ``gdal.CE_Failure``)
|
||||
err_no (int): The GDAL error number. For a full listing of error
|
||||
codes, see: http://www.gdal.org/cpl__error_8h.html
|
||||
err_msg (string): The error string.
|
||||
|
||||
Returns:
|
||||
``None``
|
||||
"""
|
||||
osgeo_logger.log(
|
||||
level=GDAL_ERROR_LEVELS[err_level],
|
||||
msg='[errno {err}] {msg}'.format(
|
||||
err=err_no, msg=err_msg.replace('\n', ' ')))
|
||||
|
||||
gdal.PushErrorHandler(_log_gdal_errors)
|
||||
try:
|
||||
yield
|
||||
|
@ -110,10 +143,11 @@ def prepare_workspace(
|
|||
if not os.path.exists(workspace):
|
||||
os.makedirs(workspace)
|
||||
|
||||
modelname = '-'.join(name.replace(':', '').split(' '))
|
||||
logfile = os.path.join(
|
||||
workspace,
|
||||
'InVEST-{modelname}-log-{timestamp}.txt'.format(
|
||||
modelname='-'.join(name.replace(':', '').split(' ')),
|
||||
modelname=modelname,
|
||||
timestamp=datetime.now().strftime("%Y-%m-%d--%H_%M_%S")))
|
||||
|
||||
with capture_gdal_logging(), log_to_file(logfile,
|
||||
|
@ -125,10 +159,14 @@ def prepare_workspace(
|
|||
start_time = time.time()
|
||||
try:
|
||||
yield
|
||||
except Exception:
|
||||
LOGGER.exception(f'Exception while executing {modelname}')
|
||||
raise
|
||||
finally:
|
||||
LOGGER.info('Elapsed time: %s',
|
||||
_format_time(round(time.time() - start_time, 2)))
|
||||
logging.captureWarnings(False)
|
||||
LOGGER.info('Execution finished')
|
||||
|
||||
|
||||
class ThreadFilter(logging.Filter):
|
||||
|
|
|
@ -1425,7 +1425,7 @@ class CoastalVulnerabilityTests(unittest.TestCase):
|
|||
donut_wkt = ('POLYGON ((35 10, 45 45, 15 40, 10 20, 35 10), '
|
||||
'(20 30, 35 35, 30 20, 20 30))')
|
||||
geometry = ogr.CreateGeometryFromWkt(donut_wkt)
|
||||
shapely_geom = shapely.wkb.loads(geometry.ExportToWkb())
|
||||
shapely_geom = shapely.wkb.loads(bytes(geometry.ExportToWkb()))
|
||||
line_list = coastal_vulnerability.polygon_to_lines(shapely_geom)
|
||||
# Polygon has 4 sides on exterior, 3 on interior, expect 7 lines
|
||||
self.assertTrue(len(line_list) == 7)
|
||||
|
|
|
@ -244,7 +244,7 @@ class DelineateItTests(unittest.TestCase):
|
|||
for feature, (expected_geom, expected_fields) in zip(
|
||||
snapped_points_layer, expected_geometries_and_fields):
|
||||
shapely_feature = shapely.wkb.loads(
|
||||
feature.GetGeometryRef().ExportToWkb())
|
||||
bytes(feature.GetGeometryRef().ExportToWkb()))
|
||||
|
||||
self.assertTrue(shapely_feature.equals(expected_geom))
|
||||
self.assertEqual(expected_fields, feature.items())
|
||||
|
@ -320,7 +320,7 @@ class DelineateItTests(unittest.TestCase):
|
|||
expected_feature = shapely.geometry.Point(5, -5)
|
||||
for feature in snapped_points_layer:
|
||||
shapely_feature = shapely.wkb.loads(
|
||||
feature.GetGeometryRef().ExportToWkb())
|
||||
bytes(feature.GetGeometryRef().ExportToWkb()))
|
||||
self.assertTrue(shapely_feature.equals(expected_feature))
|
||||
finally:
|
||||
snapped_points_layer = None
|
||||
|
|
|
@ -303,7 +303,7 @@ def _make_criteria_csv(
|
|||
missing_index=False, missing_layer_names=False,
|
||||
missing_criteria_header=False, unknown_criteria=False,
|
||||
wrong_criteria_type=False, wrong_weight=False, large_rating=False,
|
||||
rel_path=False):
|
||||
rel_path=False, extra_metadata=False):
|
||||
"""Make a synthesized information CSV on the designated path.
|
||||
|
||||
Args:
|
||||
|
@ -339,6 +339,10 @@ def _make_criteria_csv(
|
|||
rel_path (bool): if true, write relative raster and vector paths to
|
||||
the table. File locations are relative to the folder of the table.
|
||||
|
||||
extra_metadata (bool): if true, write extra data at the end of rows.
|
||||
Our sample data template CSV includes this metadata so it's
|
||||
important to test that we can handle it.
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
|
@ -355,72 +359,74 @@ def _make_criteria_csv(
|
|||
abs_rating_vector_path = os.path.join(workspace_dir, 'hab_1_crit_3.shp')
|
||||
_make_rating_vector(abs_rating_vector_path)
|
||||
|
||||
em = 'Rating Instruction' if extra_metadata else ''
|
||||
|
||||
with open(criteria_table_path, 'w') as table:
|
||||
if missing_index:
|
||||
table.write(
|
||||
'"missing index",habitat_0,,,habitat_1,,,"CRITERIA TYPE",\n')
|
||||
f'"missing index",habitat_0,,,habitat_1,,,"CRITERIA TYPE",{em}\n')
|
||||
elif missing_criteria_header:
|
||||
table.write(
|
||||
'"HABITAT NAME",habitat_0,,,habitat_1,,,"missing type",\n')
|
||||
f'"HABITAT NAME",habitat_0,,,habitat_1,,,"missing type",{em}\n')
|
||||
elif missing_layer_names:
|
||||
table.write(
|
||||
'"HABITAT NAME",habitat,,,habitat_1,,,"CRITERIA TYPE",\n')
|
||||
f'"HABITAT NAME",habitat,,,habitat_1,,,"CRITERIA TYPE",{em}\n')
|
||||
else:
|
||||
table.write(
|
||||
'"HABITAT NAME",habitat_0,,,habitat_1,,,"CRITERIA TYPE",\n')
|
||||
table.write('"HABITAT RESILIENCE ATTRIBUTES",Rating,DQ,Weight,Rating,'
|
||||
'DQ,Weight,E/C\n')
|
||||
f'"HABITAT NAME",habitat_0,,,habitat_1,,,"CRITERIA TYPE",{em}\n')
|
||||
table.write(f'"HABITAT RESILIENCE ATTRIBUTES",Rating,DQ,Weight,Rating,'
|
||||
f'DQ,Weight,E/C,{em}\n')
|
||||
|
||||
# Write relative path to the table
|
||||
if rel_path:
|
||||
rel_rating_raster_path = os.path.relpath(
|
||||
abs_rating_raster_path, workspace_dir)
|
||||
table.write(
|
||||
'"criteria 1",'+rel_rating_raster_path+',2,2,3,2,2,C\n')
|
||||
f'"criteria 1",{rel_rating_raster_path},2,2,3,2,2,C,{em}\n')
|
||||
else:
|
||||
table.write(
|
||||
'"criteria 1",'+abs_rating_raster_path+',2,2,3,2,2,C\n')
|
||||
table.write('"criteria 2",0,2,2,1,2,2,C\n')
|
||||
f'"criteria 1",{abs_rating_raster_path},2,2,3,2,2,C,{em}\n')
|
||||
table.write(f'"criteria 2",0,2,2,1,2,2,C,{em}\n')
|
||||
|
||||
if missing_index:
|
||||
table.write('missing index,,,,,,,\n')
|
||||
table.write(f'missing index,,,,,,,,{em}\n')
|
||||
else:
|
||||
table.write('HABITAT STRESSOR OVERLAP PROPERTIES,,,,,,,\n')
|
||||
table.write(f'HABITAT STRESSOR OVERLAP PROPERTIES,,,,,,,,{em}\n')
|
||||
|
||||
if unknown_criteria:
|
||||
table.write('"extra criteria",1,2,2,0,2,2,E\n')
|
||||
table.write('stressor_0,Rating,DQ,Weight,Rating,DQ,Weight,E/C\n')
|
||||
table.write(f'"extra criteria",1,2,2,0,2,2,E,{em}\n')
|
||||
table.write(f'stressor_0,Rating,DQ,Weight,Rating,DQ,Weight,E/C,{em}\n')
|
||||
|
||||
if rel_path:
|
||||
rel_rating_vector_path = os.path.relpath(
|
||||
abs_rating_vector_path, workspace_dir)
|
||||
table.write(
|
||||
'"criteria 3",2,2,2,'+rel_rating_vector_path+',2,2,C\n')
|
||||
f'"criteria 3",2,2,2,{rel_rating_vector_path},2,2,C,{em}\n')
|
||||
else:
|
||||
table.write(
|
||||
'"criteria 3",2,2,2,'+abs_rating_vector_path+',2,2,C\n')
|
||||
table.write('"criteria 4",1,2,2,0,2,2,E\n')
|
||||
f'"criteria 3",2,2,2,{abs_rating_vector_path},2,2,C,{em}\n')
|
||||
table.write(f'"criteria 4",1,2,2,0,2,2,E,{em}\n')
|
||||
|
||||
if missing_layer_names:
|
||||
table.write('stressor,Rating,DQ,Weight,Rating,DQ,Weight,E/C\n')
|
||||
table.write(f'stressor,Rating,DQ,Weight,Rating,DQ,Weight,E/C,{em}\n')
|
||||
else:
|
||||
table.write('stressor_1,Rating,DQ,Weight,Rating,DQ,Weight,E/C\n')
|
||||
table.write('"criteria 5",3,2,2,3,2,2,C\n')
|
||||
table.write(f'stressor_1,Rating,DQ,Weight,Rating,DQ,Weight,E/C,{em}\n')
|
||||
table.write(f'"criteria 5",3,2,2,3,2,2,C,{em}\n')
|
||||
|
||||
if missing_criteria:
|
||||
# Only write C criteria for stressor_1 to test exception
|
||||
table.write('"criteria 6",3,2,2,3,2,2,C\n')
|
||||
table.write(f'"criteria 6",3,2,2,3,2,2,C,{em}\n')
|
||||
elif wrong_criteria_type:
|
||||
# Produce a wrong criteria type "A"
|
||||
table.write('"criteria 6",3,2,2,3,2,2,A\n')
|
||||
table.write(f'"criteria 6",3,2,2,3,2,2,A,{em}\n')
|
||||
elif wrong_weight:
|
||||
# Produce a wrong weight score
|
||||
table.write('"criteria 6",3,2,nan,3,2,2,E\n')
|
||||
table.write(f'"criteria 6",3,2,nan,3,2,2,E,{em}\n')
|
||||
elif large_rating:
|
||||
# Make a large rating score
|
||||
table.write('"criteria 6",99999,2,2,3,2,2,E\n')
|
||||
table.write(f'"criteria 6",99999,2,2,3,2,2,E,{em}\n')
|
||||
else:
|
||||
table.write('"criteria 6",3,2,2,3,2,2,E\n')
|
||||
table.write(f'"criteria 6",3,2,2,3,2,2,E,{em}\n')
|
||||
|
||||
|
||||
class HraUnitTests(unittest.TestCase):
|
||||
|
@ -889,7 +895,9 @@ class HraRegressionTests(unittest.TestCase):
|
|||
# Also test relative file paths in Info CSV file
|
||||
_make_info_csv(
|
||||
args['info_table_path'], args['workspace_dir'], rel_path=True)
|
||||
_make_criteria_csv(args['criteria_table_path'], args['workspace_dir'])
|
||||
_make_criteria_csv(
|
||||
args['criteria_table_path'], args['workspace_dir'],
|
||||
extra_metadata=True)
|
||||
_make_aoi_vector(args['aoi_vector_path'])
|
||||
args['n_workers'] = '' # tests empty string for ``n_workers``
|
||||
|
||||
|
|
|
@ -146,10 +146,10 @@ class NDRTests(unittest.TestCase):
|
|||
|
||||
surf_p_ld = 41.921
|
||||
sub_p_ld = 0
|
||||
p_exp_tot = 7.666
|
||||
p_exp_tot = 5.59887886
|
||||
surf_n_ld = 2978.520
|
||||
sub_n_ld = 28.614
|
||||
n_exp_tot = 339.839
|
||||
n_exp_tot = 304.66061401
|
||||
feature = result_layer.GetFeature(0)
|
||||
if not feature:
|
||||
raise AssertionError("No features were output.")
|
||||
|
@ -233,10 +233,10 @@ class NDRTests(unittest.TestCase):
|
|||
# results
|
||||
for field, expected_value in [
|
||||
('surf_p_ld', 41.921860),
|
||||
('p_exp_tot', 8.598053),
|
||||
('p_exp_tot', 5.899117),
|
||||
('surf_n_ld', 2978.519775),
|
||||
('sub_n_ld', 28.614094),
|
||||
('n_exp_tot', 339.839386)]:
|
||||
('n_exp_tot', 304.660614)]:
|
||||
val = result_feature.GetField(field)
|
||||
if not numpy.isclose(val, expected_value):
|
||||
mismatch_list.append(
|
||||
|
@ -353,13 +353,13 @@ class NDRTests(unittest.TestCase):
|
|||
if not feature:
|
||||
raise AssertionError("The fid %s is missing." % fid)
|
||||
for field, value in [
|
||||
('ws_id', fid),
|
||||
('surf_p_ld', surf_p_ld),
|
||||
('sub_p_ld', sub_p_ld),
|
||||
('p_exp_tot', p_exp_tot),
|
||||
('surf_n_ld', surf_n_ld),
|
||||
('sub_n_ld', sub_n_ld),
|
||||
('n_exp_tot', n_exp_tot)]:
|
||||
('ws_id', fid),
|
||||
('surf_p_ld', surf_p_ld),
|
||||
('sub_p_ld', sub_p_ld),
|
||||
('p_exp_tot', p_exp_tot),
|
||||
('surf_n_ld', surf_n_ld),
|
||||
('sub_n_ld', sub_n_ld),
|
||||
('n_exp_tot', n_exp_tot)]:
|
||||
if not numpy.isclose(feature.GetField(field), value):
|
||||
error_results[fid][field] = (
|
||||
feature.GetField(field), value)
|
||||
|
|
|
@ -213,9 +213,9 @@ class SDRTests(unittest.TestCase):
|
|||
sdr.execute(args)
|
||||
expected_results = {
|
||||
'usle_tot': 14.25030517578,
|
||||
'sed_retent': 443994.1875,
|
||||
'sed_export': 0.87300693989,
|
||||
'sed_dep': 9.32623577118,
|
||||
'sed_retent': 308382.125,
|
||||
'sed_export': 0.60502111912,
|
||||
'sed_dep': 9.05251502991
|
||||
}
|
||||
|
||||
vector_path = os.path.join(
|
||||
|
@ -261,8 +261,8 @@ class SDRTests(unittest.TestCase):
|
|||
|
||||
sdr.execute(args)
|
||||
expected_results = {
|
||||
'sed_retent': 443994.1875,
|
||||
'sed_export': 0.87300693989,
|
||||
'sed_retent': 308382.125,
|
||||
'sed_export': 0.60502111912,
|
||||
'usle_tot': 14.25030517578,
|
||||
}
|
||||
|
||||
|
@ -285,8 +285,8 @@ class SDRTests(unittest.TestCase):
|
|||
sdr.execute(args)
|
||||
|
||||
expected_results = {
|
||||
'sed_retent': 376752.75,
|
||||
'sed_export': 0.6872266531,
|
||||
'sed_retent': 369182.09375,
|
||||
'sed_export': 0.67064666748,
|
||||
'usle_tot': 12.6965303421,
|
||||
}
|
||||
|
||||
|
@ -310,8 +310,8 @@ class SDRTests(unittest.TestCase):
|
|||
sdr.execute(args)
|
||||
|
||||
expected_results = {
|
||||
'sed_retent': 479059.4375,
|
||||
'sed_export': 1.03590250015,
|
||||
'sed_retent': 476649.875,
|
||||
'sed_export': 1.02959537506,
|
||||
'usle_tot': 12.97211265564,
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,840 @@
|
|||
"""InVEST stormwater model tests."""
|
||||
import functools
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
import unittest
|
||||
from unittest import mock
|
||||
|
||||
import numpy
|
||||
from osgeo import gdal, osr
|
||||
import pandas
|
||||
import pygeoprocessing
|
||||
from pygeoprocessing.geoprocessing_core import (
|
||||
DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS as opts_tuple)
|
||||
|
||||
|
||||
TEST_DATA = os.path.join(os.path.dirname(
|
||||
__file__), '..', 'data', 'invest-test-data', 'stormwater')
|
||||
|
||||
|
||||
def to_raster(array, path, nodata=-1, pixel_size=(20, -20), origin=(0, 0),
|
||||
epsg=3857, raster_driver_creation_tuple=opts_tuple):
|
||||
"""Wrap around pygeoprocessing.numpy_array_to_raster to set defaults.
|
||||
|
||||
Sets some reasonable defaults for ``numpy_array_to_raster`` and takes care
|
||||
of setting up a WKT spatial reference so that it can be done in one line.
|
||||
|
||||
Args:
|
||||
array (numpy.ndarray): array to be written to ``path`` as a raster
|
||||
path (str): raster path to write ``array` to
|
||||
nodata (float): nodata value to pass to ``numpy_array_to_raster``
|
||||
pixel_size (tuple(float, float)): pixel size value to pass to
|
||||
``numpy_array_to_raster``
|
||||
origin (tuple(float, float)): origin value to pass to
|
||||
``numpy_array_to_raster``
|
||||
epsg (int): EPSG code used to instantiate a spatial reference that is
|
||||
passed to ``numpy_array_to_raster`` in WKT format
|
||||
raster_driver_creation_tuple (tuple): a tuple containing a GDAL driver
|
||||
name string as the first element and a GDAL creation options
|
||||
tuple/list as the second.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
srs = osr.SpatialReference()
|
||||
srs.ImportFromEPSG(epsg)
|
||||
projection_wkt = srs.ExportToWkt()
|
||||
pygeoprocessing.numpy_array_to_raster(
|
||||
array,
|
||||
nodata,
|
||||
pixel_size,
|
||||
origin,
|
||||
projection_wkt,
|
||||
path)
|
||||
|
||||
|
||||
def mock_iterblocks(*args, **kwargs):
|
||||
"""Mock function for pygeoprocessing.iterblocks that yields custom blocks.
|
||||
|
||||
Args:
|
||||
xoffs (list[int]): list of x-offsets for each block in order
|
||||
xsizes (list[int]): list of widths for each block in order
|
||||
yoffs (list[int]): list of y-offsets for each block in order
|
||||
ysizes (list[int]): list of heights for each block in order
|
||||
|
||||
For python 3.7 compatibility, these have to be extracted from the
|
||||
kwargs dictionary (can't have keyword-only arguments).
|
||||
|
||||
Yields:
|
||||
dictionary with keys 'xoff', 'yoff', 'win_xsize', 'win_ysize'
|
||||
that have the same meaning as in pygeoprocessing.iterblocks.
|
||||
"""
|
||||
for yoff, ysize in zip(kwargs['yoffs'], kwargs['ysizes']):
|
||||
for xoff, xsize in zip(kwargs['xoffs'], kwargs['xsizes']):
|
||||
yield {
|
||||
'xoff': xoff,
|
||||
'yoff': yoff,
|
||||
'win_xsize': xsize,
|
||||
'win_ysize': ysize}
|
||||
|
||||
|
||||
class StormwaterTests(unittest.TestCase):
|
||||
"""Tests for InVEST stormwater model."""
|
||||
|
||||
def setUp(self):
|
||||
"""Create a temp directory for the workspace."""
|
||||
self.workspace_dir = tempfile.mkdtemp()
|
||||
|
||||
def tearDown(self):
|
||||
"""Override tearDown function to remove temporary directory."""
|
||||
shutil.rmtree(self.workspace_dir)
|
||||
|
||||
@staticmethod
|
||||
def basic_setup(workspace_dir, ir=False):
|
||||
"""
|
||||
Set up for the full model run tests.
|
||||
|
||||
Args:
|
||||
workspace_dir (str): path to a directory in which to create files
|
||||
ir (bool): if True, include IR data in the biophysical table
|
||||
|
||||
Returns:
|
||||
List of the data and files that were created, in this order:
|
||||
|
||||
0 (numpy.ndarray): array written to the biophysical table path
|
||||
1 (str): path to the biophysical table csv
|
||||
2 (numpy.ndarray): array of LULC values written to the LULC path
|
||||
3 (str): path to the LULC raster
|
||||
4 (numpy.ndarray): array of soil group values written to the soil
|
||||
group raster path
|
||||
5 (str): path to the soil group raster
|
||||
6 (numpy.ndarray): array of precipitation values written to the
|
||||
precipitation raster path
|
||||
7 (str): path to the precipitation raster
|
||||
8 (float): stormwater retention cost value per cubic meter
|
||||
9 (float): pixel area for all the rasters created
|
||||
"""
|
||||
# In practice RC_X + IR_X <= 1, but they are independent in the model,
|
||||
# so ignoring that constraint for convenience.
|
||||
biophysical_dict = {
|
||||
'lucode': [0, 1, 11, 12],
|
||||
'EMC_pollutant1': [2.55, 0, 1, 5],
|
||||
'RC_A': [0, 0.15, 0.1, 1],
|
||||
'RC_B': [0, 0.25, 0.2, 1],
|
||||
'RC_C': [0, 0.35, 0.3, 1],
|
||||
'RC_D': [0, 0.45, 0.4, 1],
|
||||
'is_connected': [0, 0, 0, 1]
|
||||
}
|
||||
if ir:
|
||||
biophysical_dict.update({
|
||||
'IR_A': [0, 0.55, 0.5, 1],
|
||||
'IR_B': [0, 0.65, 0.6, 1],
|
||||
'IR_C': [0, 0.75, 0.7, 1],
|
||||
'IR_D': [0, 0.85, 0.8, 1]
|
||||
})
|
||||
|
||||
biophysical_table = pandas.DataFrame(
|
||||
biophysical_dict).set_index(['lucode'])
|
||||
retention_cost = 2.53
|
||||
|
||||
lulc_array = numpy.array([
|
||||
[0, 0, 0, 0],
|
||||
[1, 1, 1, 1],
|
||||
[11, 11, 11, 11],
|
||||
[12, 12, 12, 12]], dtype=numpy.uint8)
|
||||
soil_group_array = numpy.array([
|
||||
[1, 2, 3, 4],
|
||||
[1, 2, 3, 4],
|
||||
[1, 2, 3, 4],
|
||||
[1, 2, 3, 4]], dtype=numpy.uint8)
|
||||
precipitation_array = numpy.array([
|
||||
[0, 0, 0, 0],
|
||||
[0, 0, 0, 0],
|
||||
[12.5, 12.5, 12.5, 12.5],
|
||||
[12.5, 12.5, 12.5, 12.5]], dtype=numpy.float32)
|
||||
lulc_path = os.path.join(workspace_dir, 'lulc.tif')
|
||||
soil_group_path = os.path.join(workspace_dir, 'soil_group.tif')
|
||||
precipitation_path = os.path.join(workspace_dir, 'precipitation.tif')
|
||||
biophysical_table_path = os.path.join(workspace_dir, 'biophysical.csv')
|
||||
|
||||
pixel_size = (20, -20)
|
||||
pixel_area = abs(pixel_size[0] * pixel_size[1])
|
||||
# save each dataset to a file
|
||||
for (array, path) in [
|
||||
(lulc_array, lulc_path),
|
||||
(soil_group_array, soil_group_path),
|
||||
(precipitation_array, precipitation_path)]:
|
||||
to_raster(array, path, pixel_size=pixel_size)
|
||||
biophysical_table.to_csv(biophysical_table_path)
|
||||
|
||||
return [
|
||||
biophysical_table,
|
||||
biophysical_table_path,
|
||||
lulc_array,
|
||||
lulc_path,
|
||||
soil_group_array,
|
||||
soil_group_path,
|
||||
precipitation_array,
|
||||
precipitation_path,
|
||||
retention_cost,
|
||||
pixel_area
|
||||
]
|
||||
|
||||
def test_basic(self):
|
||||
"""Stormwater: basic model run."""
|
||||
from natcap.invest import stormwater
|
||||
|
||||
(biophysical_table,
|
||||
biophysical_table_path,
|
||||
lulc_array,
|
||||
lulc_path,
|
||||
soil_group_array,
|
||||
soil_group_path,
|
||||
precipitation_array,
|
||||
precipitation_path,
|
||||
retention_cost,
|
||||
pixel_area) = self.basic_setup(self.workspace_dir)
|
||||
|
||||
args = {
|
||||
'workspace_dir': self.workspace_dir,
|
||||
'results_suffix': 'suffix',
|
||||
'lulc_path': lulc_path,
|
||||
'soil_group_path': soil_group_path,
|
||||
'precipitation_path': precipitation_path,
|
||||
'biophysical_table': biophysical_table_path,
|
||||
'adjust_retention_ratios': False,
|
||||
'retention_radius': None,
|
||||
'road_centerlines_path': None,
|
||||
'aggregate_areas_path': None,
|
||||
'replacement_cost': retention_cost
|
||||
}
|
||||
|
||||
soil_group_codes = {1: 'A', 2: 'B', 3: 'C', 4: 'D'}
|
||||
|
||||
stormwater.execute(args)
|
||||
|
||||
retention_volume_path = os.path.join(
|
||||
self.workspace_dir, 'retention_volume_suffix.tif')
|
||||
infiltration_volume_path = os.path.join(
|
||||
self.workspace_dir, 'infiltration_volume_suffix.tif')
|
||||
pollutant_path = os.path.join(
|
||||
self.workspace_dir, 'avoided_pollutant_load_pollutant1_suffix.tif')
|
||||
value_path = os.path.join(
|
||||
self.workspace_dir, 'retention_value_suffix.tif')
|
||||
# there should be no infiltration output because there's no
|
||||
# infiltration data in the biophysical table
|
||||
self.assertFalse(os.path.exists(infiltration_volume_path))
|
||||
|
||||
retention_raster = gdal.OpenEx(retention_volume_path, gdal.OF_RASTER)
|
||||
retention_volume = retention_raster.GetRasterBand(1).ReadAsArray()
|
||||
|
||||
avoided_pollutant_raster = gdal.OpenEx(pollutant_path, gdal.OF_RASTER)
|
||||
avoided_pollutant_load = avoided_pollutant_raster.GetRasterBand(
|
||||
1).ReadAsArray()
|
||||
|
||||
retention_value_raster = gdal.OpenEx(value_path, gdal.OF_RASTER)
|
||||
retention_value = retention_value_raster.GetRasterBand(1).ReadAsArray()
|
||||
|
||||
for row in range(retention_volume.shape[0]):
|
||||
for col in range(retention_volume.shape[1]):
|
||||
|
||||
soil_group = soil_group_array[row, col]
|
||||
lulc = lulc_array[row, col]
|
||||
precipitation = precipitation_array[row, col]
|
||||
|
||||
rc_value = biophysical_table[
|
||||
f'RC_{soil_group_codes[soil_group]}'][lulc]
|
||||
|
||||
# precipitation (mm/yr) * 0.001 (m/mm) * pixel area (m^2) =
|
||||
# m^3/yr
|
||||
actual_retention_volume = retention_volume[row, col]
|
||||
expected_retention_volume = (1 - rc_value) * \
|
||||
precipitation * 0.001 * pixel_area
|
||||
numpy.testing.assert_allclose(
|
||||
actual_retention_volume,
|
||||
expected_retention_volume, rtol=1e-6)
|
||||
|
||||
# retention (m^3/yr) * cost ($/m^3) = value ($/yr)
|
||||
actual_value = retention_value[row, col]
|
||||
expected_value = expected_retention_volume * retention_cost
|
||||
numpy.testing.assert_allclose(
|
||||
actual_value, expected_value, rtol=1e-6)
|
||||
|
||||
for row in range(avoided_pollutant_load.shape[0]):
|
||||
for col in range(avoided_pollutant_load.shape[1]):
|
||||
|
||||
lulc = lulc_array[row, col]
|
||||
retention = retention_volume[row, col]
|
||||
emc = biophysical_table['EMC_pollutant1'][lulc]
|
||||
|
||||
# retention (m^3/yr) * emc (mg/L) * 1000 (L/m^3) * 0.000001
|
||||
# (kg/mg) = kg/yr
|
||||
avoided_load = avoided_pollutant_load[row, col]
|
||||
expected_avoided_load = retention * emc * 0.001
|
||||
numpy.testing.assert_allclose(
|
||||
avoided_load, expected_avoided_load, rtol=1e-6)
|
||||
|
||||
def test_ir(self):
|
||||
"""Stormwater: full model run with IR data in biophysical table."""
|
||||
from natcap.invest import stormwater
|
||||
|
||||
(biophysical_table,
|
||||
biophysical_table_path,
|
||||
lulc_array,
|
||||
lulc_path,
|
||||
soil_group_array,
|
||||
soil_group_path,
|
||||
precipitation_array,
|
||||
precipitation_path,
|
||||
retention_cost,
|
||||
pixel_area) = self.basic_setup(self.workspace_dir, ir=True)
|
||||
|
||||
args = {
|
||||
'workspace_dir': self.workspace_dir,
|
||||
'lulc_path': lulc_path,
|
||||
'soil_group_path': soil_group_path,
|
||||
'precipitation_path': precipitation_path,
|
||||
'biophysical_table': biophysical_table_path,
|
||||
'adjust_retention_ratios': False,
|
||||
'retention_radius': None,
|
||||
'road_centerlines_path': None,
|
||||
'aggregate_areas_path': None,
|
||||
'replacement_cost': retention_cost
|
||||
}
|
||||
|
||||
soil_group_codes = {1: 'A', 2: 'B', 3: 'C', 4: 'D'}
|
||||
|
||||
stormwater.execute(args)
|
||||
|
||||
retention_volume_path = os.path.join(
|
||||
self.workspace_dir,
|
||||
stormwater.FINAL_OUTPUTS['retention_volume_path'])
|
||||
infiltration_volume_path = os.path.join(
|
||||
self.workspace_dir,
|
||||
stormwater.FINAL_OUTPUTS['infiltration_volume_path'])
|
||||
value_path = os.path.join(
|
||||
self.workspace_dir,
|
||||
stormwater.FINAL_OUTPUTS['retention_value_path'])
|
||||
|
||||
retention_raster = gdal.OpenEx(retention_volume_path, gdal.OF_RASTER)
|
||||
retention_volume = retention_raster.GetRasterBand(1).ReadAsArray()
|
||||
|
||||
infiltration_raster = gdal.OpenEx(
|
||||
infiltration_volume_path, gdal.OF_RASTER)
|
||||
infiltration_volume = infiltration_raster.GetRasterBand(
|
||||
1).ReadAsArray()
|
||||
|
||||
retention_value_raster = gdal.OpenEx(value_path, gdal.OF_RASTER)
|
||||
retention_value = retention_value_raster.GetRasterBand(1).ReadAsArray()
|
||||
|
||||
for row in range(retention_volume.shape[0]):
|
||||
for col in range(retention_volume.shape[1]):
|
||||
|
||||
soil_group = soil_group_array[row, col]
|
||||
lulc = lulc_array[row, col]
|
||||
precipitation = precipitation_array[row, col]
|
||||
|
||||
rc_value = biophysical_table[
|
||||
f'RC_{soil_group_codes[soil_group]}'][lulc]
|
||||
|
||||
# precipitation (mm/yr) * 0.001 (m/mm) * pixel area (m^2) =
|
||||
# m^3/yr
|
||||
actual_volume = retention_volume[row, col]
|
||||
expected_volume = (1 - rc_value) * \
|
||||
precipitation * 0.001 * pixel_area
|
||||
numpy.testing.assert_allclose(actual_volume, expected_volume,
|
||||
rtol=1e-6)
|
||||
|
||||
# retention (m^3/yr) * cost ($/m^3) = value ($/yr)
|
||||
actual_value = retention_value[row, col]
|
||||
expected_value = expected_volume * retention_cost
|
||||
numpy.testing.assert_allclose(actual_value, expected_value,
|
||||
rtol=1e-6)
|
||||
|
||||
for row in range(infiltration_volume.shape[0]):
|
||||
for col in range(infiltration_volume.shape[1]):
|
||||
|
||||
soil_group = soil_group_array[row][col]
|
||||
lulc = lulc_array[row][col]
|
||||
precipitation = precipitation_array[row][col]
|
||||
|
||||
ir_value = biophysical_table[
|
||||
f'IR_{soil_group_codes[soil_group]}'][lulc]
|
||||
|
||||
# precipitation (mm/yr) * 0.001 (m/mm) * pixel area (m^2) = m^3
|
||||
expected_volume = (ir_value) * \
|
||||
precipitation * 0.001 * pixel_area
|
||||
numpy.testing.assert_allclose(infiltration_volume[row][col],
|
||||
expected_volume, rtol=1e-6)
|
||||
|
||||
def test_adjust(self):
|
||||
"""Stormwater: full model run with adjust retention ratios."""
|
||||
from natcap.invest import stormwater
|
||||
|
||||
(biophysical_table,
|
||||
biophysical_table_path,
|
||||
lulc_array,
|
||||
lulc_path,
|
||||
soil_group_array,
|
||||
soil_group_path,
|
||||
precipitation_array,
|
||||
precipitation_path,
|
||||
retention_cost,
|
||||
pixel_area) = self.basic_setup(self.workspace_dir)
|
||||
|
||||
args = {
|
||||
'workspace_dir': self.workspace_dir,
|
||||
'lulc_path': lulc_path,
|
||||
'soil_group_path': soil_group_path,
|
||||
'precipitation_path': precipitation_path,
|
||||
'biophysical_table': biophysical_table_path,
|
||||
'adjust_retention_ratios': True,
|
||||
'retention_radius': 30,
|
||||
'road_centerlines_path': os.path.join(
|
||||
TEST_DATA, 'centerlines.gpkg'),
|
||||
'aggregate_areas_path': None,
|
||||
'replacement_cost': retention_cost
|
||||
}
|
||||
stormwater.execute(args)
|
||||
|
||||
adjusted_ratio_raster = gdal.OpenEx(
|
||||
os.path.join(
|
||||
self.workspace_dir,
|
||||
stormwater.FINAL_OUTPUTS['adjusted_retention_ratio_path']),
|
||||
gdal.OF_RASTER)
|
||||
retention_volume_raster = gdal.OpenEx(
|
||||
os.path.join(
|
||||
self.workspace_dir,
|
||||
stormwater.FINAL_OUTPUTS['retention_volume_path']),
|
||||
gdal.OF_RASTER)
|
||||
runoff_volume_raster = gdal.OpenEx(
|
||||
os.path.join(
|
||||
self.workspace_dir,
|
||||
stormwater.FINAL_OUTPUTS['runoff_volume_path']),
|
||||
gdal.OF_RASTER)
|
||||
actual_runoff_volume = runoff_volume_raster.GetRasterBand(
|
||||
1).ReadAsArray()
|
||||
actual_adjusted_ratios = adjusted_ratio_raster.GetRasterBand(
|
||||
1).ReadAsArray()
|
||||
actual_retention_volume = retention_volume_raster.GetRasterBand(
|
||||
1).ReadAsArray()
|
||||
|
||||
expected_adjusted_ratios = numpy.array([
|
||||
[1, 1, 1, 1],
|
||||
[0.9825, 0.9625, 0.924167, 0.8875],
|
||||
[0.9, 0.8, 0.7, 0.6],
|
||||
[0, 0, 0, 0]], dtype=numpy.float32)
|
||||
numpy.testing.assert_allclose(actual_adjusted_ratios,
|
||||
expected_adjusted_ratios, rtol=1e-6)
|
||||
expected_retention_volume = (expected_adjusted_ratios *
|
||||
precipitation_array * pixel_area * 0.001)
|
||||
numpy.testing.assert_allclose(actual_retention_volume,
|
||||
expected_retention_volume, rtol=1e-6)
|
||||
expected_runoff_volume = ((1 - expected_adjusted_ratios) *
|
||||
precipitation_array * pixel_area * 0.001)
|
||||
numpy.testing.assert_allclose(actual_runoff_volume,
|
||||
expected_runoff_volume, rtol=1e-6)
|
||||
|
||||
def test_aggregate(self):
|
||||
"""Stormwater: full model run with aggregate results."""
|
||||
from natcap.invest import stormwater
|
||||
(biophysical_table,
|
||||
biophysical_table_path,
|
||||
lulc_array,
|
||||
lulc_path,
|
||||
soil_group_array,
|
||||
soil_group_path,
|
||||
precipitation_array,
|
||||
precipitation_path,
|
||||
retention_cost,
|
||||
pixel_area) = self.basic_setup(self.workspace_dir)
|
||||
|
||||
args = {
|
||||
'workspace_dir': self.workspace_dir,
|
||||
'lulc_path': lulc_path,
|
||||
'soil_group_path': soil_group_path,
|
||||
'precipitation_path': precipitation_path,
|
||||
'biophysical_table': biophysical_table_path,
|
||||
'adjust_retention_ratios': False,
|
||||
'retention_radius': None,
|
||||
'road_centerlines_path': None,
|
||||
'aggregate_areas_path': os.path.join(TEST_DATA, 'aoi.gpkg'),
|
||||
'replacement_cost': retention_cost
|
||||
}
|
||||
stormwater.execute(args)
|
||||
|
||||
expected_feature_fields = {
|
||||
1: {
|
||||
'RR_mean': 0.825,
|
||||
'RV_sum': 8.5,
|
||||
'avoided_pollutant1': .0085,
|
||||
'load_pollutant1': .000375,
|
||||
'val_sum': 21.505
|
||||
},
|
||||
2: {
|
||||
'RR_mean': 0.5375,
|
||||
'RV_sum': 7.5,
|
||||
'avoided_pollutant1': .0075,
|
||||
'load_pollutant1': .006875,
|
||||
'val_sum': 18.975
|
||||
},
|
||||
3: {
|
||||
'RR_mean': 0,
|
||||
'RV_sum': 0,
|
||||
'avoided_pollutant1': 0,
|
||||
'load_pollutant1': 0,
|
||||
'val_sum': 0
|
||||
}
|
||||
}
|
||||
|
||||
aggregate_data_path = os.path.join(
|
||||
self.workspace_dir,
|
||||
stormwater.FINAL_OUTPUTS['reprojected_aggregate_areas_path'])
|
||||
aggregate_vector = gdal.OpenEx(aggregate_data_path, gdal.OF_VECTOR)
|
||||
aggregate_layer = aggregate_vector.GetLayer()
|
||||
for feature in aggregate_layer:
|
||||
feature_id = feature.GetFID()
|
||||
for key, val in expected_feature_fields[feature_id].items():
|
||||
field_value = feature.GetField(key)
|
||||
numpy.testing.assert_allclose(field_value, val, rtol=1e-6)
|
||||
|
||||
def test_lookup_ratios(self):
|
||||
"""Stormwater: test lookup_ratios function."""
|
||||
from natcap.invest import stormwater
|
||||
|
||||
sorted_lucodes = [10, 11, 12, 13]
|
||||
lulc_array = numpy.array([
|
||||
[13, 12],
|
||||
[11, 10]], dtype=numpy.uint8)
|
||||
soil_group_array = numpy.array([
|
||||
[4, 4],
|
||||
[2, 2]], dtype=numpy.uint8)
|
||||
lulc_path = os.path.join(self.workspace_dir, 'lulc.tif')
|
||||
soil_group_path = os.path.join(self.workspace_dir, 'soil_groups.tif')
|
||||
output_path = os.path.join(self.workspace_dir, 'out.tif')
|
||||
to_raster(lulc_array, lulc_path, nodata=255)
|
||||
to_raster(soil_group_array, soil_group_path, nodata=255)
|
||||
# rows correspond to sorted lucodes, columns to soil groups A-D
|
||||
ratio_array = numpy.array([
|
||||
[0.11, 0.12, 0.13, 0.14],
|
||||
[0.21, 0.22, 0.23, 0.24],
|
||||
[0.31, 0.32, 0.33, 0.34],
|
||||
[0.41, 0.42, 0.43, 0.44]], dtype=numpy.float32)
|
||||
expected_output = numpy.array([
|
||||
[0.44, 0.34],
|
||||
[0.22, 0.12]], dtype=numpy.float32)
|
||||
stormwater.lookup_ratios(
|
||||
lulc_path,
|
||||
soil_group_path,
|
||||
ratio_array,
|
||||
sorted_lucodes,
|
||||
output_path)
|
||||
actual_output = pygeoprocessing.raster_to_numpy_array(output_path)
|
||||
numpy.testing.assert_allclose(expected_output, actual_output)
|
||||
|
||||
def test_volume_op(self):
|
||||
"""Stormwater: test volume_op function."""
|
||||
from natcap.invest import stormwater
|
||||
|
||||
precip_nodata = -2.5
|
||||
ratio_array = numpy.array([
|
||||
[0, 0.0001, stormwater.FLOAT_NODATA],
|
||||
[0.5, 0.9, 1]], dtype=numpy.float32)
|
||||
precip_array = numpy.array([
|
||||
[10.5, 0, 1],
|
||||
[0.5, 0, precip_nodata]], dtype=numpy.float32)
|
||||
pixel_area = 400
|
||||
|
||||
out = stormwater.volume_op(
|
||||
ratio_array,
|
||||
precip_array,
|
||||
precip_nodata,
|
||||
pixel_area)
|
||||
# precip (mm/yr) * area (m^2) * 0.001 (m/mm) * ratio = volume (m^3/yr)
|
||||
for y in range(ratio_array.shape[0]):
|
||||
for x in range(ratio_array.shape[1]):
|
||||
if (ratio_array[y, x] == stormwater.FLOAT_NODATA or
|
||||
precip_array[y, x] == precip_nodata):
|
||||
numpy.testing.assert_allclose(
|
||||
out[y, x],
|
||||
stormwater.FLOAT_NODATA)
|
||||
else:
|
||||
numpy.testing.assert_allclose(
|
||||
out[y, x],
|
||||
precip_array[y, x]*ratio_array[y, x]*pixel_area/1000)
|
||||
|
||||
def test_pollutant_load_op(self):
|
||||
"""Stormwater: test pollutant_load_op function."""
|
||||
from natcap.invest import stormwater
|
||||
|
||||
# test with nodata values greater and less than the LULC codes
|
||||
# there was a bug that only happened with a larger nodata value
|
||||
for lulc_nodata in [-1, 127]:
|
||||
with self.subTest(lulc_nodata=lulc_nodata):
|
||||
lulc_array = numpy.array([
|
||||
[0, 0, 0],
|
||||
[1, 1, 1],
|
||||
[2, 2, lulc_nodata]], dtype=numpy.int8)
|
||||
retention_volume_array = numpy.array([
|
||||
[0, 1.5, stormwater.FLOAT_NODATA],
|
||||
[0, 1.5, 100],
|
||||
[0, 1.5, 100]], dtype=numpy.float32)
|
||||
sorted_lucodes = numpy.array([0, 1, 2], dtype=numpy.uint8)
|
||||
emc_array = numpy.array([0, 0.5, 3], dtype=numpy.float32)
|
||||
|
||||
out = stormwater.pollutant_load_op(
|
||||
lulc_array,
|
||||
lulc_nodata,
|
||||
retention_volume_array,
|
||||
sorted_lucodes,
|
||||
emc_array)
|
||||
for y in range(lulc_array.shape[0]):
|
||||
for x in range(lulc_array.shape[1]):
|
||||
if (lulc_array[y, x] == lulc_nodata or
|
||||
retention_volume_array[y, x] == stormwater.FLOAT_NODATA):
|
||||
numpy.testing.assert_allclose(
|
||||
out[y, x], stormwater.FLOAT_NODATA)
|
||||
else:
|
||||
emc_value = emc_array[lulc_array[y, x]]
|
||||
expected = emc_value * \
|
||||
retention_volume_array[y, x] / 1000
|
||||
numpy.testing.assert_allclose(out[y, x], expected)
|
||||
|
||||
def test_retention_value_op(self):
|
||||
"""Stormwater: test retention_value_op function."""
|
||||
from natcap.invest import stormwater
|
||||
|
||||
retention_volume_array = numpy.array([
|
||||
[0, 1.5, stormwater.FLOAT_NODATA],
|
||||
[0, 1.5, 100]], dtype=numpy.float32)
|
||||
replacement_cost = 1.5
|
||||
expected = numpy.array([
|
||||
[0, 2.25, stormwater.FLOAT_NODATA],
|
||||
[0, 2.25, 150]], dtype=numpy.float32)
|
||||
actual = stormwater.retention_value_op(
|
||||
retention_volume_array,
|
||||
replacement_cost)
|
||||
numpy.testing.assert_allclose(actual, expected)
|
||||
|
||||
def test_adjust_op(self):
|
||||
"""Stormwater: test adjust_op function."""
|
||||
from natcap.invest import stormwater
|
||||
|
||||
ratio_array = numpy.array([
|
||||
[0, 0.0001, stormwater.FLOAT_NODATA],
|
||||
[0.5, 0.9, 1]], dtype=numpy.float32)
|
||||
# these are obviously not averages from the above array but
|
||||
# it doesn't matter for this test
|
||||
avg_ratio_array = numpy.array([
|
||||
[0.5, 0.5, 0.5],
|
||||
[0.5, stormwater.FLOAT_NODATA, 0.5]], dtype=numpy.float32)
|
||||
near_connected_lulc_array = numpy.array([
|
||||
[0, 0, 1],
|
||||
[stormwater.UINT8_NODATA, 0, 1]], dtype=numpy.uint8)
|
||||
near_road_centerline_array = numpy.array([
|
||||
[1, 1, 1],
|
||||
[0, 0, 0]], dtype=numpy.uint8)
|
||||
|
||||
out = stormwater.adjust_op(
|
||||
ratio_array,
|
||||
avg_ratio_array,
|
||||
near_connected_lulc_array,
|
||||
near_road_centerline_array)
|
||||
for y in range(ratio_array.shape[0]):
|
||||
for x in range(ratio_array.shape[1]):
|
||||
if (ratio_array[y, x] == stormwater.FLOAT_NODATA or
|
||||
avg_ratio_array[y, x] == stormwater.FLOAT_NODATA or
|
||||
near_connected_lulc_array[y, x] == stormwater.UINT8_NODATA or
|
||||
near_road_centerline_array[y, x] == stormwater.UINT8_NODATA):
|
||||
numpy.testing.assert_allclose(
|
||||
out[y, x], stormwater.FLOAT_NODATA)
|
||||
else:
|
||||
# equation 2-4: Radj_ij = R_ij + (1 - R_ij) * C_ij
|
||||
adjust_factor = (
|
||||
0 if (
|
||||
near_connected_lulc_array[y, x] or
|
||||
near_road_centerline_array[y, x]
|
||||
) else avg_ratio_array[y, x])
|
||||
adjusted = (ratio_array[y, x] +
|
||||
(1 - ratio_array[y, x]) * adjust_factor)
|
||||
numpy.testing.assert_allclose(out[y, x], adjusted,
|
||||
rtol=1e-6)
|
||||
|
||||
def test_is_near(self):
|
||||
"""Stormwater: test is_near function."""
|
||||
from natcap.invest import stormwater
|
||||
is_connected_array = numpy.array([
|
||||
[0, 0, 1, 0, 0, 0],
|
||||
[1, 0, 1, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 1]
|
||||
], dtype=numpy.uint8)
|
||||
radius = 1 # 1 pixel
|
||||
# search kernel:
|
||||
# [0, 1, 0],
|
||||
# [1, 1, 1],
|
||||
# [0, 1, 0]
|
||||
# convolution sum array:
|
||||
# [1, 1, 2, 1, 0, 0],
|
||||
# [1, 1, 2, 1, 0, 1],
|
||||
# [1, 0, 1, 0, 1, 1]
|
||||
# expected is_near array: sum > 0
|
||||
expected = numpy.array([
|
||||
[1, 1, 1, 1, 0, 0],
|
||||
[1, 1, 1, 1, 0, 1],
|
||||
[1, 0, 1, 0, 1, 1]
|
||||
], dtype=numpy.uint8)
|
||||
|
||||
connected_path = os.path.join(self.workspace_dir, 'connected.tif')
|
||||
distance_path = os.path.join(self.workspace_dir, 'distance.tif')
|
||||
out_path = os.path.join(self.workspace_dir, 'near_connected.tif')
|
||||
to_raster(is_connected_array, connected_path, pixel_size=(10, -10))
|
||||
|
||||
mocked = functools.partial(mock_iterblocks, yoffs=[0], ysizes=[3],
|
||||
xoffs=[0, 3], xsizes=[3, 3])
|
||||
with mock.patch('natcap.invest.stormwater.pygeoprocessing.iterblocks',
|
||||
mocked):
|
||||
stormwater.is_near(connected_path, radius, distance_path, out_path)
|
||||
actual = pygeoprocessing.raster_to_numpy_array(out_path)
|
||||
numpy.testing.assert_equal(expected, actual)
|
||||
|
||||
def test_make_search_kernel(self):
|
||||
"""Stormwater: test make_search_kernel function."""
|
||||
from natcap.invest import stormwater
|
||||
|
||||
array = numpy.zeros((10, 10))
|
||||
path = os.path.join(self.workspace_dir, 'make_search_kernel.tif')
|
||||
to_raster(array, path, pixel_size=(10, -10))
|
||||
|
||||
expected_5 = numpy.array([[1]], dtype=numpy.uint8)
|
||||
actual_5 = stormwater.make_search_kernel(path, 5)
|
||||
numpy.testing.assert_equal(expected_5, actual_5)
|
||||
|
||||
expected_9 = numpy.array([[1]], dtype=numpy.uint8)
|
||||
actual_9 = stormwater.make_search_kernel(path, 9)
|
||||
numpy.testing.assert_equal(expected_9, actual_9)
|
||||
|
||||
expected_10 = numpy.array([
|
||||
[0, 1, 0],
|
||||
[1, 1, 1],
|
||||
[0, 1, 0]], dtype=numpy.uint8)
|
||||
actual_10 = stormwater.make_search_kernel(path, 10)
|
||||
numpy.testing.assert_equal(expected_10, actual_10)
|
||||
|
||||
expected_15 = numpy.array([
|
||||
[1, 1, 1],
|
||||
[1, 1, 1],
|
||||
[1, 1, 1]], dtype=numpy.uint8)
|
||||
actual_15 = stormwater.make_search_kernel(path, 15)
|
||||
numpy.testing.assert_equal(expected_15, actual_15)
|
||||
|
||||
def test_raster_average(self):
|
||||
"""Stormwater: test raster_average function."""
|
||||
from natcap.invest import stormwater
|
||||
|
||||
array = numpy.empty((150, 150))
|
||||
nodata = -1
|
||||
array[:, 0:128] = 10
|
||||
array[:, 128:149] = 20
|
||||
array[:, 149] = nodata
|
||||
|
||||
data_path = os.path.join(self.workspace_dir, 'data.tif')
|
||||
kernel_path = os.path.join(self.workspace_dir, 'kernel.tif')
|
||||
average_path = os.path.join(self.workspace_dir, 'average.tif')
|
||||
to_raster(array, data_path, pixel_size=(10, -10))
|
||||
stormwater.raster_average(data_path, 11, kernel_path, average_path)
|
||||
|
||||
expected_kernel = numpy.array([
|
||||
[0, 1, 0],
|
||||
[1, 1, 1],
|
||||
[0, 1, 0]], dtype=numpy.uint8)
|
||||
actual_kernel = pygeoprocessing.raster_to_numpy_array(kernel_path)
|
||||
numpy.testing.assert_equal(actual_kernel, expected_kernel)
|
||||
|
||||
actual_average = pygeoprocessing.raster_to_numpy_array(average_path)
|
||||
expected_average = numpy.empty((150, 150))
|
||||
expected_average[:, 0:127] = 10
|
||||
expected_average[:, 127] = 12
|
||||
expected_average[0, 127] = 12.5
|
||||
expected_average[-1, 127] = 12.5
|
||||
expected_average[:, 128] = 18
|
||||
expected_average[0, 128] = 17.5
|
||||
expected_average[-1, 128] = 17.5
|
||||
expected_average[:, 129:149] = 20
|
||||
expected_average[:, 149] = -1
|
||||
numpy.testing.assert_allclose(actual_average, expected_average)
|
||||
|
||||
def test_validate(self):
|
||||
"""Stormwater: test arg validation."""
|
||||
from natcap.invest import stormwater
|
||||
|
||||
# test args missing necessary values for adjust ratios
|
||||
args = {
|
||||
'workspace_dir': self.workspace_dir,
|
||||
'lulc_path': 'x',
|
||||
'soil_group_path': 'x',
|
||||
'precipitation_path': 'x',
|
||||
'biophysical_table': 'x',
|
||||
'adjust_retention_ratios': True,
|
||||
'retention_radius': None,
|
||||
'road_centerlines_path': None,
|
||||
'aggregate_areas_path': None,
|
||||
'replacement_cost': None
|
||||
}
|
||||
messages = stormwater.validate(args)
|
||||
for arg_list, message in messages:
|
||||
if arg_list[0] in ['retention_radius', 'road_centerlines_path']:
|
||||
self.assertEqual(message, 'Key is required but has no value')
|
||||
|
||||
def test_lulc_signed_byte(self):
|
||||
"""Stormwater: regression test for handling signed byte LULC input."""
|
||||
from natcap.invest import stormwater
|
||||
|
||||
(_,
|
||||
biophysical_table_path, _, _, _,
|
||||
soil_group_path, _,
|
||||
precipitation_path,
|
||||
retention_cost,
|
||||
pixel_size) = self.basic_setup(self.workspace_dir)
|
||||
|
||||
# make custom lulc raster with signed byte type
|
||||
lulc_array = numpy.array([
|
||||
[0, 0, 0, 0],
|
||||
[1, 1, 1, 1],
|
||||
[11, 11, 11, 11],
|
||||
[12, 12, 12, 12]], dtype=numpy.int8)
|
||||
lulc_path = os.path.join(self.workspace_dir, 'lulc.tif')
|
||||
signed_byte_creation_opts = opts_tuple[1] + ('PIXELTYPE=SIGNEDBYTE',)
|
||||
to_raster(
|
||||
lulc_array,
|
||||
lulc_path,
|
||||
raster_driver_creation_tuple=(
|
||||
opts_tuple[0], signed_byte_creation_opts
|
||||
)
|
||||
)
|
||||
|
||||
args = {
|
||||
'workspace_dir': self.workspace_dir,
|
||||
'results_suffix': '',
|
||||
'lulc_path': lulc_path,
|
||||
'soil_group_path': soil_group_path,
|
||||
'precipitation_path': precipitation_path,
|
||||
'biophysical_table': biophysical_table_path,
|
||||
'adjust_retention_ratios': True,
|
||||
'retention_radius': 20,
|
||||
'road_centerlines_path': os.path.join(
|
||||
TEST_DATA, 'centerlines.gpkg'),
|
||||
'aggregate_areas_path': None,
|
||||
'replacement_cost': retention_cost
|
||||
}
|
||||
|
||||
stormwater.execute(args)
|
||||
|
||||
# assert that not all distances to roads are zero
|
||||
# this problem resulted from not handling signed byte rasters
|
||||
# when calling `new_raster_from_base`
|
||||
road_distance_path = os.path.join(
|
||||
self.workspace_dir, 'intermediate', 'road_distance.tif')
|
||||
distance_is_zero = pygeoprocessing.raster_to_numpy_array(
|
||||
road_distance_path) == 0
|
||||
self.assertFalse(numpy.all(distance_is_zero))
|
|
@ -5,11 +5,13 @@ import os
|
|||
import tempfile
|
||||
import shutil
|
||||
import logging
|
||||
import logging.handlers
|
||||
import threading
|
||||
import warnings
|
||||
import re
|
||||
import glob
|
||||
import textwrap
|
||||
import queue
|
||||
|
||||
import numpy
|
||||
from osgeo import gdal
|
||||
|
@ -457,6 +459,50 @@ class GDALWarningsLoggingTests(unittest.TestCase):
|
|||
|
||||
self.assertEqual(len(messages), 1)
|
||||
|
||||
def test_log_gdal_errors_bad_n_args(self):
|
||||
"""utils: test error capture when number of args != 3."""
|
||||
from natcap.invest import utils
|
||||
|
||||
log_queue = queue.Queue()
|
||||
log_queue_handler = logging.handlers.QueueHandler(log_queue)
|
||||
utils.LOGGER.addHandler(log_queue_handler)
|
||||
|
||||
try:
|
||||
# 1 parameter, expected 3
|
||||
utils._log_gdal_errors('foo')
|
||||
finally:
|
||||
utils.LOGGER.removeHandler(log_queue_handler)
|
||||
|
||||
record = log_queue.get()
|
||||
self.assertEqual(record.name, 'natcap.invest.utils')
|
||||
self.assertEqual(record.levelno, logging.ERROR)
|
||||
self.assertIn(
|
||||
'_log_gdal_errors was called with an incorrect number',
|
||||
record.msg)
|
||||
|
||||
def test_log_gdal_errors_missing_param(self):
|
||||
"""utils: test error when specific parameters missing."""
|
||||
from natcap.invest import utils
|
||||
|
||||
log_queue = queue.Queue()
|
||||
log_queue_handler = logging.handlers.QueueHandler(log_queue)
|
||||
utils.LOGGER.addHandler(log_queue_handler)
|
||||
|
||||
try:
|
||||
# Missing third parameter, "err_msg"
|
||||
utils._log_gdal_errors(
|
||||
gdal.CE_Failure, 123,
|
||||
bad_param='bad param') # param obviously bad
|
||||
finally:
|
||||
utils.LOGGER.removeHandler(log_queue_handler)
|
||||
|
||||
record = log_queue.get()
|
||||
self.assertEqual(record.name, 'natcap.invest.utils')
|
||||
self.assertEqual(record.levelno, logging.ERROR)
|
||||
self.assertIn(
|
||||
"_log_gdal_errors called without the argument 'err_msg'",
|
||||
record.msg)
|
||||
|
||||
|
||||
class PrepareWorkspaceTests(unittest.TestCase):
|
||||
"""Test Prepare Workspace."""
|
||||
|
@ -1436,8 +1482,7 @@ class ReclassifyRasterOpTests(unittest.TestCase):
|
|||
origin = (1180000, 690000)
|
||||
raster_path = os.path.join(self.workspace_dir, 'tmp_raster.tif')
|
||||
|
||||
array = numpy.array(
|
||||
[[1, 1, 1], [2, 2, 2], [3, 3, 3]], dtype=numpy.int32)
|
||||
array = numpy.array([[1,1,1], [2,2,2], [3,3,3]], dtype=numpy.int32)
|
||||
|
||||
pygeoprocessing.numpy_array_to_raster(
|
||||
array, -1, (1, -1), origin, projection_wkt, raster_path)
|
||||
|
|
|
@ -363,7 +363,7 @@ class WindEnergyRegressionTests(unittest.TestCase):
|
|||
'min_depth': 3,
|
||||
'max_depth': 180,
|
||||
'n_workers': -1
|
||||
}
|
||||
}
|
||||
|
||||
return args
|
||||
|
||||
|
@ -486,7 +486,7 @@ class WindEnergyRegressionTests(unittest.TestCase):
|
|||
|
||||
raster_results = [
|
||||
'carbon_emissions_tons.tif',
|
||||
'levelized_cost_price_per_kWh.tif', 'npv_US_millions.tif']
|
||||
'levelized_cost_price_per_kWh.tif', 'npv_US_millions.tif']
|
||||
|
||||
for raster_path in raster_results:
|
||||
model_array = pygeoprocessing.raster_to_numpy_array(
|
||||
|
@ -565,7 +565,7 @@ class WindEnergyRegressionTests(unittest.TestCase):
|
|||
args['discount_rate'] = 0.07
|
||||
args['price_table'] = True
|
||||
args['wind_schedule'] = os.path.join(
|
||||
SAMPLE_DATA, 'price_table_example.csv')
|
||||
SAMPLE_DATA, 'price_table_example.csv')
|
||||
args['wind_price'] = 0.187
|
||||
args['rate_change'] = 0.2
|
||||
args['avg_grid_distance'] = 4
|
||||
|
@ -588,7 +588,47 @@ class WindEnergyRegressionTests(unittest.TestCase):
|
|||
os.path.join(args['workspace_dir'], 'output', vector_path),
|
||||
os.path.join(REGRESSION_DATA, 'priceval', vector_path))
|
||||
|
||||
def test_time_period_exceptoin(self):
|
||||
def test_field_error_missing_bio_param(self):
|
||||
"""WindEnergy: test that ValueError raised when missing bio param."""
|
||||
from natcap.invest import wind_energy
|
||||
|
||||
# for testing raised exceptions, running on a set of data that was
|
||||
# created by hand and has no numerical validity. Helps test the
|
||||
# raised exception quicker
|
||||
args = {
|
||||
'workspace_dir': self.workspace_dir,
|
||||
'wind_data_path': os.path.join(
|
||||
REGRESSION_DATA, 'smoke', 'wind_data_smoke.csv'),
|
||||
'bathymetry_path': os.path.join(
|
||||
REGRESSION_DATA, 'smoke', 'dem_smoke.tif'),
|
||||
'global_wind_parameters_path': os.path.join(
|
||||
SAMPLE_DATA, 'global_wind_energy_parameters.csv'),
|
||||
'number_of_turbines': 80,
|
||||
'min_depth': 3,
|
||||
'max_depth': 200,
|
||||
'aoi_vector_path': os.path.join(
|
||||
REGRESSION_DATA, 'smoke', 'aoi_smoke.shp'),
|
||||
'land_polygon_vector_path': os.path.join(
|
||||
REGRESSION_DATA, 'smoke', 'landpoly_smoke.shp'),
|
||||
'min_distance': 0,
|
||||
'max_distance': 200000
|
||||
}
|
||||
|
||||
# creating a stand in turbine parameter csv file that is missing
|
||||
# the 'cut_out_wspd' entry. This should raise the exception
|
||||
tmp, file_path = tempfile.mkstemp(
|
||||
suffix='.csv', dir=args['workspace_dir'])
|
||||
os.close(tmp)
|
||||
data = {
|
||||
'hub_height': 80, 'cut_in_wspd': 4.0, 'rated_wspd': 12.5,
|
||||
'turbine_rated_pwr': 3.6, 'turbine_cost': 8.0
|
||||
}
|
||||
_create_vertical_csv(data, file_path)
|
||||
args['turbine_parameters_path'] = file_path
|
||||
|
||||
self.assertRaises(ValueError, wind_energy.execute, args)
|
||||
|
||||
def test_time_period_exception(self):
|
||||
"""WindEnergy: raise ValueError if 'time' and 'wind_sched' differ."""
|
||||
from natcap.invest import wind_energy
|
||||
|
||||
|
@ -653,7 +693,8 @@ class WindEnergyRegressionTests(unittest.TestCase):
|
|||
SAMPLE_DATA, 'New_England_US_Aoi.shp')
|
||||
|
||||
# Make up some Wind Data points that live outside AOI
|
||||
wind_data_csv = os.path.join(args['workspace_dir'], 'temp-wind-data.csv')
|
||||
wind_data_csv = os.path.join(
|
||||
args['workspace_dir'], 'temp-wind-data.csv')
|
||||
with open(wind_data_csv, 'w') as open_table:
|
||||
open_table.write('LONG,LATI,LAM,K,REF\n')
|
||||
open_table.write('-60.5,25.0,7.59,2.6,10\n')
|
||||
|
|
|
@ -58,14 +58,18 @@ class ModelLoggingTests(unittest.TestCase):
|
|||
model_args = {
|
||||
'raster': raster_path,
|
||||
'vector': vector_path,
|
||||
'not_a_gis_input': 'foobar'
|
||||
'not_a_gis_input': 'foobar',
|
||||
'blank_raster_path': '',
|
||||
'blank_vector_path': '',
|
||||
}
|
||||
|
||||
args_spec = {
|
||||
'args': {
|
||||
'raster': {'type': 'raster'},
|
||||
'vector': {'type': 'vector'},
|
||||
'not_a_gis_input': {'type': 'freestyle_string'}
|
||||
'not_a_gis_input': {'type': 'freestyle_string'},
|
||||
'blank_raster_path': {'type': 'raster'},
|
||||
'blank_vector_path': {'type': 'vector'},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -4,9 +4,11 @@ import functools
|
|||
import warnings
|
||||
import threading
|
||||
import logging
|
||||
import logging.handlers
|
||||
import contextlib
|
||||
import sys
|
||||
import os
|
||||
import queue
|
||||
import requests
|
||||
import time
|
||||
import tempfile
|
||||
|
@ -1617,9 +1619,49 @@ class OpenWorkspaceTest(_QtTest):
|
|||
patch.assert_called()
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _capture_logging_messages(logger):
|
||||
"""Capture logging messages and return them as a list.
|
||||
|
||||
This context manager also sets the root logger's level to
|
||||
``logging.NOTSET`` and restores the former log level on completion of the
|
||||
context being managed.
|
||||
|
||||
Args:
|
||||
logger (logging.Logger): A logger instance from python's logging system
|
||||
from which log records should be captured.
|
||||
|
||||
Returns:
|
||||
``None``."""
|
||||
# The root logger has a default level of WARNING, but for tests we
|
||||
# want to capture everything.
|
||||
root_logger = logging.getLogger()
|
||||
root_logger_prior_level = root_logger.level
|
||||
root_logger.setLevel(logging.NOTSET)
|
||||
|
||||
log_queue = queue.Queue()
|
||||
log_queue_handler = logging.handlers.QueueHandler(log_queue)
|
||||
log_queue_handler.setLevel(logging.NOTSET) # capture all logging
|
||||
logger.addHandler(log_queue_handler)
|
||||
|
||||
records = []
|
||||
try:
|
||||
yield records
|
||||
finally:
|
||||
# No matter what happens, always restore root logger level.
|
||||
root_logger.setLevel(root_logger_prior_level)
|
||||
|
||||
try:
|
||||
while True:
|
||||
records.append(log_queue.get_nowait())
|
||||
except queue.Empty:
|
||||
logger.removeHandler(log_queue_handler)
|
||||
|
||||
|
||||
class ExecutionTest(_QtTest):
|
||||
def test_executor_run(self):
|
||||
from natcap.invest.ui.execution import Executor
|
||||
from natcap.invest.ui.execution import LOGGER as execution_logger
|
||||
|
||||
thread_event = threading.Event()
|
||||
|
||||
|
@ -1634,7 +1676,8 @@ class ExecutionTest(_QtTest):
|
|||
executor = Executor(
|
||||
target=target,
|
||||
args=args,
|
||||
kwargs=kwargs)
|
||||
kwargs=kwargs,
|
||||
log_events=False) # produce no extra logging
|
||||
|
||||
self.assertEqual(executor.target, target)
|
||||
self.assertEqual(executor.args, args)
|
||||
|
@ -1643,17 +1686,22 @@ class ExecutionTest(_QtTest):
|
|||
# register the callback with the finished signal.
|
||||
executor.finished.connect(callback)
|
||||
|
||||
executor.start()
|
||||
thread_event.set()
|
||||
executor.join()
|
||||
if self.qt_app.hasPendingEvents():
|
||||
self.qt_app.processEvents()
|
||||
with _capture_logging_messages(execution_logger) as log_records:
|
||||
executor.start()
|
||||
thread_event.set()
|
||||
executor.join()
|
||||
if self.qt_app.hasPendingEvents():
|
||||
self.qt_app.processEvents()
|
||||
|
||||
callback.assert_called_once()
|
||||
target.assert_called_once()
|
||||
target.assert_called_with(*args, **kwargs)
|
||||
|
||||
self.assertEqual(len(log_records), 0)
|
||||
|
||||
def test_executor_exception(self):
|
||||
from natcap.invest.ui.execution import Executor
|
||||
from natcap.invest.ui.execution import LOGGER as execution_logger
|
||||
|
||||
thread_event = threading.Event()
|
||||
|
||||
|
@ -1678,11 +1726,13 @@ class ExecutionTest(_QtTest):
|
|||
# register the callback with the finished signal.
|
||||
executor.finished.connect(callback)
|
||||
|
||||
executor.start()
|
||||
thread_event.set()
|
||||
executor.join()
|
||||
if self.qt_app.hasPendingEvents():
|
||||
self.qt_app.processEvents()
|
||||
with _capture_logging_messages(execution_logger) as log_records:
|
||||
executor.start()
|
||||
thread_event.set()
|
||||
executor.join()
|
||||
if self.qt_app.hasPendingEvents():
|
||||
self.qt_app.processEvents()
|
||||
|
||||
callback.assert_called_once()
|
||||
target.assert_called_once()
|
||||
target.assert_called_with(*args, **kwargs)
|
||||
|
@ -1692,6 +1742,10 @@ class ExecutionTest(_QtTest):
|
|||
'Some demo exception')
|
||||
self.assertTrue(isinstance(executor.traceback, basestring))
|
||||
|
||||
self.assertEqual(len(log_records), 2)
|
||||
self.assertIn('failed with exception', log_records[0].msg)
|
||||
self.assertIn('Execution finished', log_records[1].msg)
|
||||
|
||||
def test_default_args(self):
|
||||
from natcap.invest.ui.execution import Executor
|
||||
|
||||
|
|
Loading…
Reference in New Issue