Merge branch 'main' into feature/plugins

This commit is contained in:
Emily Soth 2025-02-19 10:31:10 -08:00
commit 08795c94ee
38 changed files with 2325 additions and 267 deletions

View File

@ -18,7 +18,7 @@ jobs:
name: PR main into release/**
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
fetch-depth: 0
- run: git fetch origin +refs/tags/*:refs/tags/*
@ -71,11 +71,12 @@ jobs:
# the person who merged the PR that caused this commit to be
# created. Others could of course be assigned later.
gh pr create \
--head $GITHUB_REPOSITORY:$SOURCE_BRANCH \
--base $GITHUB_REPOSITORY:$RELEASE_BRANCH \
--head $SOURCE_BRANCH \
--base $RELEASE_BRANCH \
--reviewer "$PR_USERNAME" \
--assignee "$PR_USERNAME" \
--label "auto" \
--title "auto merge main into release/*" \
--body-file $PR_BODY_FILE || ERRORSPRESENT=$(($ERRORSPRESENT | $?))
done

View File

@ -21,7 +21,7 @@ env:
# nomkl: make sure numpy w/out mkl
# setuptools_scm: needed for versioning to work
CONDA_DEFAULT_DEPENDENCIES: python-build nomkl setuptools_scm
LATEST_SUPPORTED_PYTHON_VERSION: "3.12"
LATEST_SUPPORTED_PYTHON_VERSION: "3.13"
jobs:
check-syntax-errors:
@ -72,7 +72,7 @@ jobs:
strategy:
fail-fast: false
matrix:
python-version: [3.8, 3.9, "3.10", "3.11", "3.12"]
python-version: [3.9, "3.10", "3.11", "3.12", "3.13"]
os: [windows-latest, macos-13]
steps:
- uses: actions/checkout@v4
@ -159,7 +159,7 @@ jobs:
needs: check-syntax-errors
strategy:
matrix:
python-version: [3.8, 3.9, "3.10", "3.11", "3.12"]
python-version: [3.9, "3.10", "3.11", "3.12", "3.13"]
os: [windows-latest, macos-13]
steps:
- uses: actions/checkout@v4
@ -198,6 +198,9 @@ jobs:
- uses: actions/upload-artifact@v4
with:
# NOTE: if you change this name, make sure the source distribution
# pattern defined in .github/workflows/release-part-2.yml will still
# grab it!
name: Source distribution ${{ matrix.os }} ${{ matrix.python-version }}
path: dist
@ -444,21 +447,20 @@ jobs:
WORKBENCH_BINARY=$(find "$(pwd)/workbench/dist" -type f -name 'invest_*.dmg' | head -n 1)
make WORKBENCH_BIN_TO_SIGN="$WORKBENCH_BINARY" codesign_mac
#- name: Sign binaries (Windows)
# if: github.event_name != 'pull_request' && matrix.os == 'windows-latest' # secrets not available in PR
# env:
# CERT_FILE: Stanford-natcap-code-signing-cert-expires-2024-01-26.p12
# CERT_PASS: ${{ secrets.WINDOWS_CODESIGN_CERT_PASS }}
# run: |
# # figure out the path to signtool.exe (it keeps changing with SDK updates)
# SIGNTOOL_PATH=$(find 'C:\\Program Files (x86)\\Windows Kits\\10' -type f -name 'signtool.exe*' | head -n 1)
# WORKBENCH_BINARY=$(find "$(pwd)/workbench/dist" -type f -name 'invest_*.exe' | head -n 1)
# make WORKBENCH_BIN_TO_SIGN="$WORKBENCH_BINARY" SIGNTOOL="$SIGNTOOL_PATH" codesign_windows
- name: Deploy artifacts to GCS
if: github.event_name != 'pull_request'
run: make deploy
# This relies on the file existing on GCP, so it must be run after `make
# deploy` is called.
- name: Queue windows binaries for signing
if: github.event_name != 'pull_request' && matrix.os == 'windows-latest' # secrets not available in PR
env:
ACCESS_TOKEN: ${{ secrets.CODESIGN_QUEUE_ACCESS_TOKEN }}
run: |
cd codesigning
bash enqueue-current-windows-installer.sh
- name: Upload workbench binary artifact
if: always()
uses: actions/upload-artifact@v4

View File

@ -1,6 +1,12 @@
name: Release (Part 1 of 2)
on:
schedule:
# every tuesday at 10:20am PST (6:20pm UTC)
# cron doesn't support "first tuesday of the month", so we will use github
# actions syntax below to skip tuesdays that aren't in the first week.
# it is recommended not to start on the hour to avoid peak traffic
- cron: "20 18 * * 2"
workflow_dispatch:
inputs:
version:
@ -16,13 +22,43 @@ env:
jobs:
start-release:
if: github.repository_owner == 'natcap'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
# skip scheduled runs that aren't the first tuesday of the month
- name: Check day of month
run: |
if [[ $GITHUB_EVENT_NAME == "workflow_dispatch" || $(date +%d) -le 7 ]]; then
echo "RUN=true" >> $GITHUB_ENV
else
echo "RUN=false" >> $GITHUB_ENV
fi
- uses: actions/checkout@v4
if: env.RUN == 'true'
with:
fetch-depth: 0 # fetch entire history, for versioning
token: ${{ secrets.AUTORELEASE_BOT_PAT }}
- name: Install dependencies
if: env.RUN == 'true'
run: pip install rst2html5 setuptools_scm
- name: Get current version number (scheduled runs only)
if: env.RUN == 'true' && github.event_name == 'schedule'
run: echo "CURRENT_VERSION=$(python -m setuptools_scm)" > $GITHUB_ENV
- name: Get next bugfix version number (scheduled runs only)
if: env.RUN == 'true' && github.event_name == 'schedule'
shell: python
run: |
import os
major, minor, bugfix, *_ = os.environ['CURRENT_VERSION'].split('.')
with open(os.environ['GITHUB_ENV']) as env_file:
env_file.write(f'VERSION={major}.{minor}.{int(bugfix) + 1}')
- name: Configure git
if: env.RUN == 'true'
run: |
git config user.name "GitHub Actions"
git config user.email "<>"
@ -30,6 +66,7 @@ jobs:
# Members of the natcap software team can push to the autorelease branch on
# natcap/invest; this branch is a special case for our release process.
- name: Create autorelease branch
if: env.RUN == 'true'
run: git checkout -b "$AUTORELEASE_BRANCH"
# Replace
@ -46,6 +83,7 @@ jobs:
# X.X.X (XXXX-XX-XX)
# ------------------
- name: Update HISTORY.rst
if: env.RUN == 'true'
run: |
HEADER="$VERSION ($(date '+%Y-%m-%d'))"
HEADER_LENGTH=${#HEADER}
@ -54,19 +92,19 @@ jobs:
"s/Unreleased Changes\n------------------/..\n Unreleased Changes\n ------------------\n\n${HEADER}\n${UNDERLINE}/g" \
HISTORY.rst
- name: Install dependencies
run: pip install rst2html5
- name: Generate changelog.html
if: env.RUN == 'true'
run: rst2html5 HISTORY.rst workbench/changelog.html
- name: Update package.json version
if: ${{ env.RUN }}
uses: BellCubeDev/update-package-version-by-release-tag@v2
with:
version: ${{ inputs.version }}
package-json-path: workbench/package.json
- name: Commit updated HISTORY.rst, changelog.html, and package.json
if: env.RUN == 'true'
run: |
git add HISTORY.rst
git add workbench/changelog.html
@ -74,11 +112,13 @@ jobs:
git commit -m "Committing the $VERSION release."
- name: Tag and push
if: env.RUN == 'true'
run: |
git tag $VERSION
git push --atomic origin $AUTORELEASE_BRANCH $VERSION
- name: Find actions run for the version tag
if: env.RUN == 'true'
run: |
# wait a few seconds to make sure the actions run exists before querying it
sleep 5
@ -90,6 +130,7 @@ jobs:
--jq .[].url)" >> $GITHUB_ENV
- name: Create a PR from the autorelease branch into main
if: env.RUN == 'true'
run: |
gh pr create \
--base main \

View File

@ -22,7 +22,7 @@ jobs:
if: startsWith(github.head_ref, 'autorelease') && github.event.pull_request.merged != true
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Extract version from autorelease branch name
run: echo "VERSION=$(echo ${{ github.head_ref }} | cut -c 13-)" >> $GITHUB_ENV
@ -39,7 +39,7 @@ jobs:
if: ${{ ! github.head_ref || (startsWith(github.head_ref, 'autorelease') && github.event.pull_request.merged == true) }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Install dependencies
run: pip install twine
@ -72,15 +72,23 @@ jobs:
rm -rf artifacts/Wheel*
# download each artifact separately so that the command will fail if any is missing
for artifact in Workbench-Windows-binary \
Workbench-macOS-binary \
for artifact in Workbench-macOS-binary \
InVEST-sample-data \
InVEST-user-guide \
"Source distribution"
InVEST-user-guide
do
gh run download $RUN_ID --dir artifacts --name "$artifact"
done
# download the signed windows workbench file from GCS
wget --directory-prefix=artifacts https://storage.googleapis.com/releases.naturalcapitalproject.org/invest/${{ env.VERSION }}/workbench/invest_${{ env.VERSION }}_workbench_win32_x64.exe
# We build one sdist per combination of OS and python version, so just
# download and unzip all of them into an sdists directory so we can
# just grab the first one. This approach is more flexible to changes
# in OS and python versions than just statically defining the artifact name.
gh run download $RUN_ID --dir sdists --pattern "Source*"
cp "$(find sdists -name '*.tar.gz' -print -quit)" artifacts/
- name: Create Github release
run: |
# Copy the history notes for this version into the release message

View File

@ -35,8 +35,40 @@
.. :changelog:
Unreleased Changes
------------------
* General
* Fixed an issue where a user's PROJ_DATA environment variable could
trigger a RuntimeError about a missing proj.db file.
https://github.com/natcap/invest/issues/1742
* Now testing and building against Python 3.13.
No longer testing and building with Python 3.8, which reached EOL.
https://github.com/natcap/invest/issues/1755
* InVEST's windows binaries are now distributed once again with a valid
signature, signed by Stanford University.
https://github.com/natcap/invest/issues/1580
* Annual Water Yield
* Fixed an issue where the model would crash if the valuation table was
provided, but the demand table was not. Validation will now warn about
this, and the ``MODEL_SPEC`` has been improved to reflect that this table
is now required when doing valuation.
https://github.com/natcap/invest/issues/1769
* Carbon
* Updated styling of the HTML report generated by the carbon model, for
visual consistency with the Workbench (`InVEST #1732
<https://github.com/natcap/invest/issues/1732>`_).
* Urban Cooling
* Updated the documentation for the ``mean_t_air`` attribute of the
``buildings_with_stats.shp`` output to clarify how the value is
calculated. https://github.com/natcap/invest/issues/1746
* Wind Energy
* Fixed a bug that could cause the Workbench to crash when running the Wind
Energy model with ``Taskgraph`` logging set to ``DEBUG`` (`InVEST #1497
<https://github.com/natcap/invest/issues/1497>`_).
3.14.3 (2024-12-19)
-------------------
* General
* InVEST has been updated to build against numpy 2.
https://github.com/natcap/invest/issues/1641
@ -97,6 +129,9 @@ Unreleased Changes
* Added support for zero padding in month numbers in ET and precipitation
file names (i.e., users can now name their file Precip_01.tif).
(https://github.com/natcap/invest/issues/1166)
* Fixed a bug where ``numpy.nan`` pixel values would not be correctly
detected as nodata in local recharge and baseflow routing functions.
(https://github.com/natcap/invest/issues/1705)
* Urban Flood Risk
* Fields present on the input AOI vector are now retained in the output.
(https://github.com/natcap/invest/issues/1600)

View File

@ -10,7 +10,7 @@ GIT_TEST_DATA_REPO_REV := 324abde73e1d770ad75921466ecafd1ec6297752
GIT_UG_REPO := https://github.com/natcap/invest.users-guide
GIT_UG_REPO_PATH := doc/users-guide
GIT_UG_REPO_REV := 5ee3616d4549baf3b1e44e0fcef485145389e29a
GIT_UG_REPO_REV := fd3194f35bc1652a93cf1c0241f98be1e7c9d43d
ENV = "./env"
ifeq ($(OS),Windows_NT)
@ -356,10 +356,8 @@ codesign_mac:
codesign --timestamp --verbose --sign Stanford $(WORKBENCH_BIN_TO_SIGN)
codesign_windows:
$(GSUTIL) cp gs://stanford_cert/$(CERT_FILE) $(BUILD_DIR)/$(CERT_FILE)
"$(SIGNTOOL)" sign -fd SHA256 -f $(BUILD_DIR)/$(CERT_FILE) -p $(CERT_PASS) $(WORKBENCH_BIN_TO_SIGN)
"$(SIGNTOOL)" timestamp -tr http://timestamp.sectigo.com -td SHA256 $(WORKBENCH_BIN_TO_SIGN)
$(RM) $(BUILD_DIR)/$(CERT_FILE)
@echo "Installer was signed with signtool"
deploy:

21
codesigning/Makefile Normal file
View File

@ -0,0 +1,21 @@
.PHONY: deploy-cloudfunction deploy-worker
deploy-cloudfunction:
gcloud functions deploy \
--project natcap-servers \
codesigning-queue \
--memory=256Mi \
--trigger-http \
--gen2 \
--region us-west1 \
--allow-unauthenticated \
--entry-point main \
--runtime python312 \
--source gcp-cloudfunc/
# NOTE: This must be executed from a computer that has SSH access to ncp-inkwell.
deploy-worker:
cd signing-worker && ansible-playbook \
--ask-become-pass \
--inventory-file inventory.ini \
playbook.yml

74
codesigning/README.md Normal file
View File

@ -0,0 +1,74 @@
# InVEST Codesigning Service
This directory contains all of the functional code and configuration (minus a
few secrets) that are needed to deploy our code-signing service. There are
three key components to this service:
1. A cloud function (`gcp-cloudfunc/') that handles a google cloud
storage-backed cloud function that operates as a high-latency queue.
2. A script (`enqueue-binary.py`) that will enqueue a binary that already
exists on one of our GCS buckets.
3. A `systemd` service that runs on a debian:bookworm machine and periodically
polls the cloud function to dequeue the next item to sign.
## Deploying the Cloud Function
The necessary `gcloud` deployment configuration can be executed with
```bash
$ make deploy-cloudfunction
```
### Secrets
The current deployment process requires you to manually create an environment
variable, ``ACCESS_TOKEN``, that contains the secret token shared by the cloud
function, systemd service and enqueue script.
## Deploying the Systemd Service
To deploy the systemd service, you will need to be on a computer that has ssh
access to `ncp-inkwell`, which is a computer that has a yubikey installed in
it. This computer is assumed to run debian:bookworm at this time. To deploy
(non-secret) changes to ncp-inkwell, run this in an environment where
`ansible-playbook` is available (`pip install ansible` to install):
```bash
$ make deploy-worker
```
### Secrets
The systemd service requires several secrets to be available in the codesigning
workspace, which is located at `/opt/natcap-codesign':
* `/opt/natcap-codesign/pass.txt` is a plain text file containing only the PIN
for the yubikey
* `/opt/natcap-codesign/access_token.txt` is a plain text file containing the
access token shared with the cloud function, systemd service and enqueue script.
* `/opt/natcap-codesign/slack_token.txt` is a plain text file containing the
slack token used to post messages to our slack workspace.
* `/opt/natcap-codesign/natcap-servers-1732552f0202.json` is a GCP service
account key used to authenticate to google cloud storage. This file must be
available in the `gcp-cloudfunc/` directory at the time of deployment.
## Future Work
### Authenticate to the function with Identity Federation
The cloud function has access controlled by a secret token, which is not ideal.
Instead, we should be using github/GCP identity federation to control access.
### Trigger the function with GCS Events
GCP Cloud Functions have the ability to subscribe to bucket events, which
should allow us to subscribe very specifically to just those `finalize` events
that apply to the Windows workbench binaries. Doing so will require reworking this cloud function into 2 cloud functions:
1. An endpoint for ncp-inkwell to poll for the next binary to sign
2. A cloud function that subscribes to GCS bucket events and enqueues the binary to sign.
Relevant docs include:
* https://cloud.google.com/functions/docs/writing/write-event-driven-functions#cloudevent-example-python

View File

@ -0,0 +1,28 @@
"""Enqueue a windows binary for signing.
To call this script, you need to set the ACCESS_TOKEN environment variable from
the software team secrets store.
Example invocation:
$ ACCESS_TOKEN=abcs1234 python3 enqueue-binary.py <gs:// uri to binary on gcs>
"""
import os
import sys
import requests
DATA = {
'token': os.environ['ACCESS_TOKEN'],
'action': 'enqueue',
'url': sys.argv[1].replace(
'gs://', 'https://storage.googleapis.com/'),
}
response = requests.post(
'https://us-west1-natcap-servers.cloudfunctions.net/codesigning-queue',
json=DATA
)
if response.status_code >= 400:
print(response.text)
sys.exit(1)

View File

@ -0,0 +1,13 @@
#!/usr/bin/env sh
#
# Run this script to enqueue the windows binary for this current version of the
# InVEST windows workbench installer for code signing.
#
# NOTE: this script must be run from the directory containing this script.
version=$(python -m setuptools_scm)
url_base=$(make -C .. --no-print-directory print-DIST_URL_BASE | awk ' { print $3 } ')
url="${url_base}/workbench/invest_${version}_workbench_win32_x64.exe"
echo "Enqueuing URL ${url}"
python enqueue-binary.py "${url}"

View File

@ -0,0 +1,180 @@
import contextlib
import datetime
import json
import logging
import os
import time
from urllib.parse import unquote
import functions_framework
import google.cloud.logging # pip install google-cloud-logging
import requests
from flask import jsonify
from google.cloud import storage # pip install google-cloud-storage
GOOGLE_PREFIX = 'https://storage.googleapis.com'
CODESIGN_DATA_BUCKET = 'natcap-codesigning'
LOG_CLIENT = google.cloud.logging.Client()
LOG_CLIENT.setup_logging()
@contextlib.contextmanager
def get_lock():
"""Acquire a GCS-based mutex.
This requires that the bucket we are using has versioning.
"""
storage_client = storage.Client()
bucket = storage_client.bucket(CODESIGN_DATA_BUCKET)
lock_obtained = False
n_tries = 100
for i in range(n_tries):
lockfile = bucket.blob('mutex.lock')
if not lockfile.generation:
lockfile.upload_from_string(
f"Lock acquired {datetime.datetime.now().isoformat()}")
lock_obtained = True
break
else:
time.sleep(0.1)
if not lock_obtained:
raise RuntimeError(f'Could not obtain lock after {n_tries} tries')
try:
yield
finally:
lockfile.delete()
@functions_framework.http
def main(request):
"""Handle requests to this GCP Cloud Function.
All requests must be POST requests and have a JSON body with the following
attributes:
* token: a secret token that matches the ACCESS_TOKEN environment
variable that is defined in the cloud function configuration.
* action: either 'enqueue' or 'dequeue'
If the action is 'enqueue', the request must also have a 'url' attribute.
The 'url' attribute, when provided, must be a URL to a file that meets
these requirements:
* The URL must be a publicly accessible URL
* The URL must be a file that ends in '.exe'
* The URL must be located in either the releases bucket, or else
in the dev builds bucket. It doesn't necessarily have to be an
InVEST binary.
* The URL must be a file that is not older than June 1, 2024
* The URL must be a file that is not already in the queue
* The URL should be a file that is not already signed (if the file has
already been signed, its signature will be overwritten)
"""
data = request.get_json()
if data['token'] != os.environ['ACCESS_TOKEN']:
logging.info('Rejecting request due to invalid token')
return jsonify('Invalid token'), 403
if request.method != 'POST':
logging.info('Rejecting request due to invalid HTTP method')
return jsonify('Invalid request method'), 405
storage_client = storage.Client()
bucket = storage_client.bucket(CODESIGN_DATA_BUCKET)
logging.debug(f'Data POSTed: {data}')
if data['action'] == 'dequeue':
with get_lock():
queuefile = bucket.blob('queue.json')
queue_dict = json.loads(queuefile.download_as_string())
try:
next_file_url = queue_dict['queue'].pop(0)
except IndexError:
# No items in the queue!
logging.info('No binaries are currently queued for signing')
return jsonify('No items in the queue'), 204
queuefile.upload_from_string(json.dumps(queue_dict))
data = {
'https-url': next_file_url,
'basename': os.path.basename(next_file_url),
'gs-uri': unquote(next_file_url.replace(
f'{GOOGLE_PREFIX}/', 'gs://')),
}
logging.info(f'Dequeued {next_file_url}')
return jsonify(data)
elif data['action'] == 'enqueue':
url = data['url']
logging.info(f'Attempting to enqueue url {url}')
if not url.endswith('.exe'):
logging.info("Rejecting URL because it doesn't end in .exe")
return jsonify('Invalid URL to sign'), 400
if not url.startswith(GOOGLE_PREFIX):
logging.info(f'Rejecting URL because it does not start with {GOOGLE_PREFIX}')
return jsonify('Invalid host'), 400
if not url.startswith((
f'{GOOGLE_PREFIX}/releases.naturalcapitalproject.org/',
f'{GOOGLE_PREFIX}/natcap-dev-build-artifacts/')):
logging.info('Rejecting URL because the bucket is incorrect')
return jsonify("Invalid target bucket"), 400
# Remove http character quoting
url = unquote(url)
binary_bucket_name, *binary_obj_paths = url.replace(
GOOGLE_PREFIX + '/', '').split('/')
codesign_bucket = storage_client.bucket(CODESIGN_DATA_BUCKET)
# If the file does not exist at this URL, reject it.
response = requests.head(url)
if response.status_code >= 400:
logging.info('Rejecting URL because it does not exist')
return jsonify('Requested file does not exist'), 403
# If the file is too old, reject it. Trying to avoid a
# denial-of-service by invoking the service with very old files.
# I just pulled June 1 out of thin air as a date that is a little while
# ago, but not so long ago that we could suddenly have many files
# enqueued.
mday, mmonth, myear = response.headers['Last-Modified'].split(' ')[1:4]
modified_time = datetime.datetime.strptime(
' '.join((mday, mmonth, myear)), '%d %b %Y')
if modified_time < datetime.datetime(year=2024, month=6, day=1):
logging.info('Rejecting URL because it is too old')
return jsonify('File is too old'), 400
response = requests.head(f'{url}.signature')
if response.status_code != 404:
logging.info('Rejecting URL because it has already been signed.')
return jsonify('File has already been signed'), 204
with get_lock():
# Since the file has not already been signed, add the file to the
# queue
queuefile = codesign_bucket.blob('queue.json')
if not queuefile.exists():
queue_dict = {'queue': []}
else:
queue_dict = json.loads(queuefile.download_as_string())
if url not in queue_dict['queue']:
queue_dict['queue'].append(url)
else:
return jsonify(
'File is already in the queue', 200, 'application/json')
queuefile.upload_from_string(json.dumps(queue_dict))
logging.info(f'Enqueued {url}')
return jsonify("OK"), 200
else:
return jsonify('Invalid action request'), 405

View File

@ -0,0 +1,5 @@
google-cloud-storage
google-cloud-logging
functions-framework==3.*
flask
requests

5
codesigning/signing-worker/.gitignore vendored Normal file
View File

@ -0,0 +1,5 @@
# This key is copied from GCP. I've added it to the .gitignore to try to
# prevent it from getting committed and pushed to git, while still allowing it
# to be where ansible expects the key to be so ansible can copy the file to the
# remote server.
natcap-servers-1732552f0202.json

View File

@ -0,0 +1,5 @@
# This is an ansible inventory file. If we had more hostnames to list here, we
# could group them into functional groups (e.g. codesign-workers).
[ncp-inkwell]
ncp-inkwell

View File

@ -0,0 +1,265 @@
#!/usr/bin/env python3
"""Service script to sign InVEST windows binaries."""
import logging
import os
import shutil
import subprocess
import sys
import textwrap
import time
import traceback
import pexpect # apt install python3-pexpect
import requests # apt install python3-requests
LOGGER = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
CERTIFICATE = sys.argv[1]
FILE_DIR = os.path.dirname(__file__)
QUEUE_TOKEN_FILE = os.path.join(FILE_DIR, "access_token.txt")
with open(QUEUE_TOKEN_FILE) as token_file:
ACCESS_TOKEN = token_file.read().strip()
SLACK_TOKEN_FILE = os.path.join(FILE_DIR, "slack_token.txt")
with open(SLACK_TOKEN_FILE) as token_file:
SLACK_ACCESS_TOKEN = token_file.read().strip()
SLACK_NOTIFICATION_SUCCESS = textwrap.dedent(
"""\
:lower_left_fountain_pen: Successfully signed and uploaded `{filename}` to
<{url}|google cloud>
""")
SLACK_NOTIFICATION_ALREADY_SIGNED = textwrap.dedent(
"""\
:lower_left_fountain_pen: `{filename}` is already signed!
<{url}|google cloud>
""")
SLACK_NOTIFICATION_FAILURE = textwrap.dedent(
"""\
:red-flag: Something went wrong while signing {filename}:
```
{traceback}
```
Please investigate on ncp-inkwell using:
```
sudo journalctl -u natcap-codesign.service
```
""")
def post_to_slack(message):
"""Post a message to the slack channel.
Args:
message (str): The message to post.
Returns:
``None``
"""
resp = requests.post(
"https://slack.com/api/chat.postMessage",
headers={
"Authorization": f"Bearer {SLACK_ACCESS_TOKEN}",
"Content-Type": "application/json; charset=utf-8"
},
json={
"channel": "CESG428BH", # sw-invest
"text": message
})
resp.raise_for_status()
def get_from_queue():
"""Get an item to sign from the queue.
Returns:
``None`` if there are no items in the queue, the JSON response dict
otherwise.
"""
response = requests.post(
"https://us-west1-natcap-servers.cloudfunctions.net/codesigning-queue",
headers={"Content-Type": "application/json"},
json={
"token": ACCESS_TOKEN,
"action": "dequeue"
})
if response.status_code == 204:
return None
else:
return response.json()
def download_file(url):
"""Download an arbitrarily large file.
Adapted from https://stackoverflow.com/a/16696317
Args:
url (str): The URL to download.
Returns:
``None``
"""
local_filename = url.split('/')[-1]
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
return local_filename
def upload_to_bucket(filename, path_on_bucket):
"""Upload a file to a GCS bucket.
Args:
filename (str): The local file to upload.
path_on_bucket (str): The path to the file on the GCS bucket, including
the ``gs://`` prefix.
Returns:
``None``
"""
subprocess.run(['gsutil', 'cp', filename, path_on_bucket], check=True)
def sign_file(file_to_sign):
"""Sign a local .exe file.
Uses ``osslsigncode`` to sign the file using the private key stored on a
Yubikey, and the corresponding certificate that has been exported from the
PIV slot 9c.
Args:
file_to_sign (str): The local filepath to the file to sign.
Returns:
``None``
"""
signed_file = f"{file_to_sign}.signed"
pass_file = os.path.join(FILE_DIR, 'pass.txt')
signcode_command = textwrap.dedent(f"""\
osslsigncode sign \
-pkcs11engine /usr/lib/x86_64-linux-gnu/engines-3/pkcs11.so \
-pkcs11module /usr/lib/x86_64-linux-gnu/libykcs11.so \
-key "pkcs11:id=%02;type=private" \
-certs {CERTIFICATE} \
-h sha256 \
-ts http://timestamp.sectigo.com \
-readpass {pass_file} \
-verbose \
-in {file_to_sign} \
-out {signed_file}""")
process = pexpect.spawnu(signcode_command)
process.expect('Enter PKCS#11 key PIN for Private key for Digital Signature:')
with open(pass_file) as passfile:
process.sendline(passfile.read().strip())
# print remainder of program output for our logging.
print(process.read())
shutil.move(signed_file, file_to_sign)
def note_signature_complete(local_filepath, target_gs_uri):
"""Create a small file next to the signed file to indicate signature.
Args:
gs_uri (str): The GCS URI of the signed file.
"""
# Using osslsigncode to verify the output always fails for me, even though
# the signature is clearly valid when checked on Windows.
process = subprocess.run(
['osslsigncode', 'verify', '-in', local_filepath], check=False,
capture_output=True)
temp_filepath = f'/tmp/{os.path.basename(local_filepath)}.signed'
with open(temp_filepath, 'w') as signature_file:
signature_file.write(process.stdout.decode())
try:
# Upload alongside the original file
subprocess.run(
['gsutil', 'cp', temp_filepath, f'{target_gs_uri}.signature'],
check=True)
finally:
os.remove(temp_filepath)
def has_signature(filename):
"""Check if a file is already signed.
Args:
filename (str): The local filepath to the file to check.
Returns:
``True`` if the file is signed, ``False`` otherwise.
"""
process = subprocess.run(
['osslsigncode', 'verify', '-in', filename], capture_output=True,
check=False)
# Handle the case where it's possible there might not be any stdout or
# stderr to decode.
process_output = ""
for output in (process.stdout, process.stderr):
if output is not None:
process_output += output.decode()
if 'No signature found' in process_output:
return False
return True
def main():
while True:
try:
file_to_sign = get_from_queue()
if file_to_sign is None:
LOGGER.info('No items in the queue')
else:
LOGGER.info(f"Dequeued and downloading {file_to_sign['https-url']}")
filename = download_file(file_to_sign['https-url'])
LOGGER.info(f"Checking if {filename} is already signed")
if has_signature(filename):
LOGGER.info(f"{filename} is already signed, skipping")
post_to_slack(
SLACK_NOTIFICATION_ALREADY_SIGNED.format(
filename=filename,
url=file_to_sign['https-url']))
note_signature_complete(filename, file_to_sign['gs-uri'])
else:
LOGGER.info(f"Signing {filename}")
sign_file(filename)
LOGGER.info(f"Uploading signed file to {file_to_sign['gs-uri']}")
upload_to_bucket(filename, file_to_sign['gs-uri'])
LOGGER.info(
f"Adding {file_to_sign['https-url']} to signed files list")
note_signature_complete(filename, file_to_sign['gs-uri'])
LOGGER.info(f"Removing {filename}")
post_to_slack(
SLACK_NOTIFICATION_SUCCESS.format(
filename=filename,
url=file_to_sign['https-url']))
LOGGER.info("Signing complete.")
os.remove(filename)
except Exception as e:
LOGGER.exception(f"Unexpected error signing file: {e}")
post_to_slack(
SLACK_NOTIFICATION_FAILURE.format(
filename=file_to_sign['https-url'],
traceback=traceback.format_exc()))
time.sleep(60)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,33 @@
# Systemd service for debian:bookworm for signing InVEST windows binaries.
#
# To install this service, copy this onto the host as /etc/systemd/system/natcap-codesign.service
#
# To use, run (for example):
# # On modifying the service file, run:
# $ sudo systemctl daemon-reload
#
# # enable the service
# $ sudo systemctl enable natcap-codesign.service
#
# # start the service
# $ sudo systemctl start natcap-codesign
#
# # check the service status
# $ sudo systemctl status natcap-codesign
#
# This service is built to run in the foreground.
#
# See https://wiki.debian.org/systemd/Services for background info about systemd services.
[Unit]
Description=NatCap Code Signing for Windows EXE Binaries
User=natcap-codesign
Group=natcap-codesign
WorkingDirectory=/tmp
[Service]
# Run in the foreground
Type=simple
Restart=always
ExecStart=python3 /opt/natcap-codesign/natcap-codesign.py /opt/natcap-codesign/codesign-cert-chain.pem

View File

@ -0,0 +1,112 @@
---
- name: Set up everything needed on NCP-Inkwell
hosts: all
become: true
become_method: sudo
tasks:
- name: Install GCP SDK dependencies
ansible.builtin.apt:
update_cache: true
pkg:
- apt-transport-https
- ca-certificates
- gnupg
- curl
- name: Download the Google Cloud SDK package repository signing key
ansible.builtin.shell:
cmd: curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | gpg --dearmor -o /usr/share/keyrings/cloud.google.gpg
creates: /usr/share/keyrings/cloud.google.gpg
- name: Add Google Cloud SDK package repository source
ansible.builtin.apt_repository:
update_cache: true
filename: google-cloud-sdk.list
repo: "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main"
- name: Install packages
ansible.builtin.apt:
update_cache: true
pkg:
- python3
- python3-pexpect
- python3-requests
- wget
- vim-nox
- yubico-piv-tool
- libengine-pkcs11-openssl
- ykcs11
- libssl-dev
- google-cloud-sdk
- google-cloud-cli
- yubikey-manager
- name: Add bookworm-backports repository
ansible.builtin.apt_repository:
update_cache: true
repo: "deb http://deb.debian.org/debian {{ ansible_distribution_release }}-backports main"
filename: bookworm-backports.list
- name: Install osslsigncode from backports
ansible.builtin.apt:
update_cache: true
default_release: "{{ ansible_distribution_release }}-backports"
pkg:
# The normal debian:bookworm repos have osslsigncode 2.5, which has a
# bug in it that prevents it from signing our binaries. This was
# fixed in osslsigncode 2.6. The version available in
# bookworm-backports is 2.9. The issue (and solution) was similar to
# https://stackoverflow.com/a/78308879
- osslsigncode
- name: Create the codesign directory
ansible.builtin.file:
state: directory
path: /opt/natcap-codesign
- name: Install the certificate
ansible.builtin.shell:
cmd: ykman piv certificates export 9c /opt/natcap-codesign/codesign-cert-chain.pem
creates: /opt/natcap-codesign/codesign-cert-chain.pem
- name: Create codesigning group
ansible.builtin.group:
name: natcap-codesign
state: present
- name: Create codesigning user
ansible.builtin.user:
name: natcap-codesign
group: natcap-codesign
shell: /bin/bash
createhome: true
- name: Install the service account key
ansible.builtin.copy:
src: natcap-servers-1732552f0202.json
dest: /opt/natcap-codesign/natcap-servers-1732552f0202.json
mode: 0600
- name: Set up application credentials
ansible.builtin.shell:
cmd: gcloud auth activate-service-account --key-file=/opt/natcap-codesign/natcap-servers-1732552f0202.json
- name: Install codesigning python script
ansible.builtin.copy:
src: natcap-codesign.py
dest: /opt/natcap-codesign/natcap-codesign.py
mode: 0755
- name: Install the codesign service
ansible.builtin.copy:
src: natcap-codesign.service
dest: /etc/systemd/system/natcap-codesign.service
mode: 0644
- name: Enable the natcap-codesign service
ansible.builtin.systemd_service:
name: natcap-codesign
daemon_reload: true # reload in case there are any config changes
state: restarted
enabled: true

View File

@ -3,6 +3,7 @@ import platform
import sys
os.environ['PROJ_LIB'] = os.path.join(sys._MEIPASS, 'proj')
os.environ['PROJ_DATA'] = os.path.join(sys._MEIPASS, 'proj')
if platform.system() == 'Darwin':
# Rtree will look in this directory first for libspatialindex_c.dylib.

View File

@ -2,7 +2,7 @@
name = "natcap.invest"
description = "InVEST Ecosystem Service models"
readme = "README_PYTHON.rst"
requires-python = ">=3.8"
requires-python = ">=3.9"
license = {file = "LICENSE.txt"}
maintainers = [
{name = "Natural Capital Project Software Team"}
@ -17,11 +17,11 @@ classifiers = [
"Operating System :: Microsoft",
"Operating System :: POSIX",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
"Programming Language :: Cython",
"License :: OSI Approved :: BSD License",
"Topic :: Scientific/Engineering :: GIS"

View File

@ -261,11 +261,11 @@ MODEL_SPEC = {
}
},
"index_col": "lucode",
"required": False,
"required": "valuation_table_path",
"about": gettext(
"A table of water demand for each LULC class. Each LULC code "
"in the LULC raster must have a corresponding row in this "
"table."),
"table. Required if 'valuation_table_path' is provided."),
"name": gettext("water demand table")
},
"valuation_table_path": {
@ -512,14 +512,13 @@ def execute(args):
a path to an input CSV
table of LULC classes, showing consumptive water use for each
landuse / land-cover type (cubic meters per year) to calculate
water scarcity.
water scarcity. Required if ``valuation_table_path`` is provided.
args['valuation_table_path'] (string): (optional) if a non-empty
string, a path to an input CSV table of
hydropower stations with the following fields to calculate
valuation: 'ws_id', 'time_span', 'discount', 'efficiency',
'fraction', 'cost', 'height', 'kw_price'
Required if ``calculate_valuation`` is True.
args['n_workers'] (int): (optional) The number of worker processes to
use for processing this model. If omitted, computation will take

View File

@ -651,34 +651,79 @@ def _generate_report(raster_file_set, model_args, file_registry):
with codecs.open(html_report_path, 'w', encoding='utf-8') as report_doc:
# Boilerplate header that defines style and intro header.
header = (
'<!DOCTYPE html><html><head><meta charset="utf-8"><title>Carbon R'
'esults</title><style type="text/css">body { background-color: #E'
'FECCA; color: #002F2F} h1 { text-align: center } h1, h2, h3, h4,'
'strong, th { color: #046380; } h2 { border-bottom: 1px solid #A7'
'A37E; } table { border: 5px solid #A7A37E; margin-bottom: 50px; '
'background-color: #E6E2AF; } td, th { margin-left: 0px; margin-r'
'ight: 0px; padding-left: 8px; padding-right: 8px; padding-bottom'
': 2px; padding-top: 2px; text-align:left; } td { border-top: 5px'
'solid #EFECCA; } .number {text-align: right; font-family: monosp'
'ace;} img { margin: 20px; }</style></head><body><h1>InVEST Carbo'
'n Model Results</h1><p>This document summarizes the results from'
'running the InVEST carbon model with the following data.</p>')
"""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Carbon Results</title>
<style type="text/css">
body {
--invest-green: #148f68;
background: #ffffff;
color: #000000;
font-family: Roboto, "Helvetica Neue", Arial, sans-serif;
}
h1, h2, th {
font-weight: bold;
}
h1, h2 {
color: var(--invest-green);
}
h1 {
font-size: 2rem;
}
h2 {
font-size: 1.5rem;
}
table {
border: 0.25rem solid var(--invest-green);
border-collapse: collapse;
}
thead tr {
background: #e9ecef;
border-bottom: 0.1875rem solid var(--invest-green);
}
tbody tr:nth-child(even) {
background: ghostwhite;
}
th {
padding: 0.5rem;
text-align:left;
}
td {
padding: 0.375rem 0.5rem;
}
.number {
text-align: right;
font-family: monospace;
}
</style>
</head>
<body>
<h1>InVEST Carbon Model Results</h1>
<p>This document summarizes the results from
running the InVEST carbon model with the following data.</p>
"""
)
report_doc.write(header)
report_doc.write('<p>Report generated at %s</p>' % (
time.strftime("%Y-%m-%d %H:%M")))
# Report input arguments
report_doc.write('<table><tr><th>arg id</th><th>arg value</th></tr>')
report_doc.write('<h2>Inputs</h2>')
report_doc.write('<table><thead><tr><th>arg id</th><th>arg value</th>'
'</tr></thead><tbody>')
for key, value in model_args.items():
report_doc.write('<tr><td>%s</td><td>%s</td></tr>' % (key, value))
report_doc.write('</table>')
report_doc.write('</tbody></table>')
# Report aggregate results
report_doc.write('<h3>Aggregate Results</h3>')
report_doc.write('<h2>Aggregate Results</h2>')
report_doc.write(
'<table><tr><th>Description</th><th>Value</th><th>Units</th><th>R'
'aw File</th></tr>')
'<table><thead><tr><th>Description</th><th>Value</th><th>Units'
'</th><th>Raw File</th></tr></thead><tbody>')
# value lists are [sort priority, description, statistic, units]
report = [
@ -701,7 +746,7 @@ def _generate_report(raster_file_set, model_args, file_registry):
'<tr><td>%s</td><td class="number">%.2f</td><td>%s</td>'
'<td>%s</td></tr>' % (
description, summary_stat, units, raster_uri))
report_doc.write('</body></html>')
report_doc.write('</tbody></table></body></html>')
@validation.invest_validator

View File

@ -21,48 +21,187 @@ from .unit_registry import u
LOGGER = logging.getLogger(__name__)
CROP_OPTIONS = {
# TODO: use human-readable translatable crop names (#614)
crop: {"description": crop} for crop in [
"abaca", "agave", "alfalfa", "almond", "aniseetc",
"apple", "apricot", "areca", "artichoke", "asparagus",
"avocado", "bambara", "banana", "barley", "bean",
"beetfor", "berrynes", "blueberry", "brazil",
"canaryseed", "carob", "carrot", "carrotfor", "cashew",
"broadbean", "buckwheat", "cabbage", "cabbagefor",
"cashewapple", "cassava", "castor", "cauliflower",
"cerealnes", "cherry", "chestnut", "chickpea",
"chicory", "chilleetc", "cinnamon", "citrusnes",
"clove", "clover", "cocoa", "coconut", "coffee",
"cotton", "cowpea", "cranberry", "cucumberetc",
"currant", "date", "eggplant", "fibrenes", "fig",
"flax", "fonio", "fornes", "fruitnes", "garlic",
"ginger", "gooseberry", "grape", "grapefruitetc",
"grassnes", "greenbean", "greenbroadbean", "greencorn",
"greenonion", "greenpea", "groundnut", "hazelnut",
"hemp", "hempseed", "hop", "jute", "jutelikefiber",
"kapokfiber", "kapokseed", "karite", "kiwi", "kolanut",
"legumenes", "lemonlime", "lentil", "lettuce",
"linseed", "lupin", "maize", "maizefor", "mango",
"mate", "melonetc", "melonseed", "millet",
"mixedgrain", "mixedgrass", "mushroom", "mustard",
"nutmeg", "nutnes", "oats", "oilpalm", "oilseedfor",
"oilseednes", "okra", "olive", "onion", "orange",
"papaya", "pea", "peachetc", "pear", "pepper",
"peppermint", "persimmon", "pigeonpea", "pimento",
"pineapple", "pistachio", "plantain", "plum", "poppy",
"potato", "pulsenes", "pumpkinetc", "pyrethrum",
"quince", "quinoa", "ramie", "rapeseed", "rasberry",
"rice", "rootnes", "rubber", "rye", "ryefor",
"safflower", "sesame", "sisal", "sorghum",
"sorghumfor", "sourcherry, soybean", "spicenes",
"spinach", "stonefruitnes", "strawberry", "stringbean",
"sugarbeet", "sugarcane", "sugarnes", "sunflower",
"swedefor", "sweetpotato", "tangetc", "taro", "tea",
"tobacco", "tomato", "triticale", "tropicalnes",
"tung", "turnipfor", "vanilla", "vegetablenes",
"vegfor", "vetch", "walnut", "watermelon", "wheat",
"yam", "yautia"
]
# Human-readable/translatable crop names come from three sources:
# (1) Monfreda et. al. Table 1
# (2) "EarthStat and FAO crop names and crop groups" table
# (3) FAO's _World Programme for the Census of Agriculture 2020_
# Where (1) and (2) differ, default to (1), except where (2) is
# more descriptive (i.e., include additional list items, alternate
# names, qualifiers, and other disambiguations).
# Where discrepancies remain, consult (3) for additional context.
# See #614 for more details and links to sources.
"abaca": {"description": gettext("Abaca (manila hemp)")},
"agave": {"description": gettext("Agave fibers, other")},
"alfalfa": {"description": gettext("Alfalfa")},
"almond": {"description": gettext("Almonds, with shell")},
"aniseetc": {"description": gettext("Anise, badian, fennel, coriander")},
"apple": {"description": gettext("Apples")},
"apricot": {"description": gettext("Apricots")},
"areca": {"description": gettext("Areca nuts (betel)")},
"artichoke": {"description": gettext("Artichokes")},
"asparagus": {"description": gettext("Asparagus")},
"avocado": {"description": gettext("Avocados")},
"bambara": {"description": gettext("Bambara beans")},
"banana": {"description": gettext("Bananas")},
"barley": {"description": gettext("Barley")},
"bean": {"description": gettext("Beans, dry")},
"beetfor": {"description": gettext("Beets for fodder")},
"berrynes": {"description": gettext("Berries, other")},
"blueberry": {"description": gettext("Blueberries")},
"brazil": {"description": gettext("Brazil nuts, with shell")},
"broadbean": {"description": gettext("Broad beans, horse beans, dry")},
"buckwheat": {"description": gettext("Buckwheat")},
"cabbage": {"description": gettext("Cabbages and other brassicas")},
"cabbagefor": {"description": gettext("Cabbage for fodder")},
"canaryseed": {"description": gettext("Canary seed")},
"carob": {"description": gettext("Carobs")},
"carrot": {"description": gettext("Carrots and turnips")},
"carrotfor": {"description": gettext("Carrots for fodder")},
"cashew": {"description": gettext("Cashew nuts, with shell")},
"cashewapple": {"description": gettext("Cashew apple")},
"cassava": {"description": gettext("Cassava")},
"castor": {"description": gettext("Castor beans")},
"cauliflower": {"description": gettext("Cauliflower and broccoli")},
"cerealnes": {"description": gettext("Cereals, other")},
"cherry": {"description": gettext("Cherries")},
"chestnut": {"description": gettext("Chestnuts")},
"chickpea": {"description": gettext("Chick peas")},
"chicory": {"description": gettext("Chicory roots")},
"chilleetc": {"description": gettext("Chilies and peppers, green")},
"cinnamon": {"description": gettext("Cinnamon (canella)")},
"citrusnes": {"description": gettext("Citrus fruit, other")},
"clove": {"description": gettext("Cloves")},
"clover": {"description": gettext("Clover")},
"cocoa": {"description": gettext("Cocoa beans")},
"coconut": {"description": gettext("Coconuts")},
"coffee": {"description": gettext("Coffee, green")},
"cotton": {"description": gettext("Cotton")},
"cowpea": {"description": gettext("Cow peas, dry")},
"cranberry": {"description": gettext("Cranberries")},
"cucumberetc": {"description": gettext("Cucumbers and gherkins")},
"currant": {"description": gettext("Currants")},
"date": {"description": gettext("Dates")},
"eggplant": {"description": gettext("Eggplants (aubergines)")},
"fibrenes": {"description": gettext("Fiber crops, other")},
"fig": {"description": gettext("Figs")},
"flax": {"description": gettext("Flax fiber and tow")},
"fonio": {"description": gettext("Fonio")},
"fornes": {"description": gettext("Forage products, other")},
"fruitnes": {"description": gettext("Fresh fruit, other")},
"garlic": {"description": gettext("Garlic")},
"ginger": {"description": gettext("Ginger")},
"gooseberry": {"description": gettext("Gooseberries")},
"grape": {"description": gettext("Grapes")},
"grapefruitetc": {"description": gettext("Grapefruit and pomelos")},
"grassnes": {"description": gettext("Grasses, other")},
"greenbean": {"description": gettext("Beans, green")},
"greenbroadbean": {"description": gettext("Broad beans, green")},
"greencorn": {"description": gettext("Green corn (maize)")},
"greenonion": {"description": gettext("Onions and shallots, green")},
"greenpea": {"description": gettext("Peas, green")},
"groundnut": {"description": gettext("Groundnuts, with shell")},
"hazelnut": {"description": gettext("Hazelnuts (filberts), with shell")},
"hemp": {"description": gettext("Hemp fiber and tow")},
"hempseed": {"description": gettext("Hempseed")},
"hop": {"description": gettext("Hops")},
"jute": {"description": gettext("Jute")},
"jutelikefiber": {"description": gettext("Jute-like fibers")},
"kapokfiber": {"description": gettext("Kapok fiber")},
"kapokseed": {"description": gettext("Kapok seed in shell")},
"karite": {"description": gettext("Karite nuts (shea nuts)")},
"kiwi": {"description": gettext("Kiwi fruit")},
"kolanut": {"description": gettext("Kola nuts")},
"legumenes": {"description": gettext("Legumes, other")},
"lemonlime": {"description": gettext("Lemons and limes")},
"lentil": {"description": gettext("Lentils")},
"lettuce": {"description": gettext("Lettuce and chicory")},
"linseed": {"description": gettext("Linseed")},
"lupin": {"description": gettext("Lupins")},
"maize": {"description": gettext("Maize")},
"maizefor": {"description": gettext("Maize for forage and silage")},
"mango": {"description": gettext("Mangoes, mangosteens, guavas")},
"mate": {"description": gettext("Mate")},
"melonetc": {"description": gettext("Cantaloupes and other melons")},
"melonseed": {"description": gettext("Melon seed")},
"millet": {"description": gettext("Millet")},
"mixedgrain": {"description": gettext("Mixed grain")},
"mixedgrass": {"description": gettext("Mixed grasses and legumes")},
"mushroom": {"description": gettext("Mushrooms and truffles")},
"mustard": {"description": gettext("Mustard seed")},
"nutmeg": {"description": gettext("Nutmeg, mace, and cardamoms")},
"nutnes": {"description": gettext("Nuts, other")},
"oats": {"description": gettext("Oats")},
"oilpalm": {"description": gettext("Oil palm fruit")},
"oilseedfor": {"description": gettext("Green oilseeds for fodder")},
"oilseednes": {"description": gettext("Oilseeds, other")},
"okra": {"description": gettext("Okra")},
"olive": {"description": gettext("Olives")},
"onion": {"description": gettext("Onions, dry")},
"orange": {"description": gettext("Oranges")},
"papaya": {"description": gettext("Papayas")},
"pea": {"description": gettext("Peas, dry")},
"peachetc": {"description": gettext("Peaches and nectarines")},
"pear": {"description": gettext("Pears")},
"pepper": {"description": gettext("Pepper (Piper spp.)")},
"peppermint": {"description": gettext("Peppermint")},
"persimmon": {"description": gettext("Persimmons")},
"pigeonpea": {"description": gettext("Pigeon peas")},
"pimento": {"description": gettext("Chilies and peppers, dry")},
"pineapple": {"description": gettext("Pineapples")},
"pistachio": {"description": gettext("Pistachios")},
"plantain": {"description": gettext("Plantains")},
"plum": {"description": gettext("Plums and sloes")},
"poppy": {"description": gettext("Poppy seed")},
"potato": {"description": gettext("Potatoes")},
"pulsenes": {"description": gettext("Pulses, other")},
"pumpkinetc": {"description": gettext("Pumpkins, squash, gourds")},
"pyrethrum": {"description": gettext("Pyrethrum, dried flowers")},
"quince": {"description": gettext("Quinces")},
"quinoa": {"description": gettext("Quinoa")},
"ramie": {"description": gettext("Ramie")},
"rapeseed": {"description": gettext("Rapeseed")},
"rasberry": {"description": gettext("Raspberries")},
"rice": {"description": gettext("Rice")},
"rootnes": {"description": gettext("Roots and tubers, other")},
"rubber": {"description": gettext("Natural rubber")},
"rye": {"description": gettext("Rye")},
"ryefor": {"description": gettext("Rye grass for forage and silage")},
"safflower": {"description": gettext("Safflower seed")},
"sesame": {"description": gettext("Sesame seed")},
"sisal": {"description": gettext("Sisal")},
"sorghum": {"description": gettext("Sorghum")},
"sorghumfor": {"description": gettext("Sorghum for forage and silage")},
"sourcherry": {"description": gettext("Sour cherries")},
"soybean": {"description": gettext("Soybeans")},
"spicenes": {"description": gettext("Spices, other")},
"spinach": {"description": gettext("Spinach")},
"stonefruitnes": {"description": gettext("Stone fruit, other")},
"strawberry": {"description": gettext("Strawberries")},
"stringbean": {"description": gettext("String beans")},
"sugarbeet": {"description": gettext("Sugar beets")},
"sugarcane": {"description": gettext("Sugar cane")},
"sugarnes": {"description": gettext("Sugar crops, other")},
"sunflower": {"description": gettext("Sunflower seed")},
"swedefor": {"description": gettext("Swedes for fodder")},
"sweetpotato": {"description": gettext("Sweet potatoes")},
"tangetc": {"description": gettext("Tangerines, mandarins, clementines")},
"taro": {"description": gettext("Taro")},
"tea": {"description": gettext("Tea")},
"tobacco": {"description": gettext("Tobacco leaves")},
"tomato": {"description": gettext("Tomatoes")},
"triticale": {"description": gettext("Triticale")},
"tropicalnes": {"description": gettext("Fresh tropical fruit, other")},
"tung": {"description": gettext("Tung nuts")},
"turnipfor": {"description": gettext("Turnips for fodder")},
"vanilla": {"description": gettext("Vanilla")},
"vegetablenes": {"description": gettext("Fresh vegetables, other")},
"vegfor": {"description": gettext("Vegetables and roots for fodder")},
"vetch": {"description": gettext("Vetches")},
"walnut": {"description": gettext("Walnuts, with shell")},
"watermelon": {"description": gettext("Watermelons")},
"wheat": {"description": gettext("Wheat")},
"yam": {"description": gettext("Yams")},
"yautia": {"description": gettext("Yautia")},
}
nutrient_units = {

View File

@ -18,15 +18,16 @@ from .unit_registry import u
LOGGER = logging.getLogger(__name__)
CROPS = {
"barley": {"description": gettext("barley")},
"maize": {"description": gettext("maize")},
"oilpalm": {"description": gettext("oil palm")},
"potato": {"description": gettext("potato")},
"rice": {"description": gettext("rice")},
"soybean": {"description": gettext("soybean")},
"sugarbeet": {"description": gettext("sugar beet")},
"sugarcane": {"description": gettext("sugarcane")},
"wheat": {"description": gettext("wheat")}
"barley": {"description": gettext("Barley")},
"maize": {"description": gettext("Maize")},
"oilpalm": {"description": gettext("Oil palm fruit")},
"potato": {"description": gettext("Potatoes")},
"rice": {"description": gettext("Rice")},
"soybean": {"description": gettext("Soybeans")},
"sugarbeet": {"description": gettext("Sugar beets")},
"sugarcane": {"description": gettext("Sugar cane")},
"sunflower": {"description": gettext("Sunflower seed")},
"wheat": {"description": gettext("Wheat")}
}
NUTRIENTS = [

View File

@ -62,7 +62,14 @@ MODEL_SPEC = {
"contents": {
# monthly et0 maps, each file ending in a number 1-12
"[MONTH]": {
**spec_utils.ET0,
"name": gettext("reference evapotranspiration"),
"type": "raster",
"bands": {
1: {
"type": "number",
"units": u.millimeter/u.month
}
},
"about": gettext(
"Twelve files, one for each month. File names must "
"end with the month number (1-12). For example, "
@ -76,7 +83,8 @@ MODEL_SPEC = {
"about": gettext(
"Directory containing maps of reference evapotranspiration "
"for each month. Only .tif files should be in this folder "
"(no .tfw, .xml, etc files)."),
"(no .tfw, .xml, etc files). Required if User-Defined Local "
"Recharge is not selected."),
"name": gettext("ET0 directory")
},
"precip_dir": {
@ -104,7 +112,8 @@ MODEL_SPEC = {
"about": gettext(
"Directory containing maps of monthly precipitation for each "
"month. Only .tif files should be in this folder (no .tfw, "
".xml, etc files)."),
".xml, etc files). Required if User-Defined Local Recharge is "
"not selected."),
"name": gettext("precipitation directory")
},
"dem_raster_path": {

View File

@ -16,6 +16,7 @@ from cython.operator cimport dereference as deref
from cpython.mem cimport PyMem_Malloc, PyMem_Free
from cython.operator cimport dereference as deref
from cython.operator cimport preincrement as inc
from libc.math cimport isnan
from libcpp.list cimport list as clist
from libcpp.set cimport set as cset
from libcpp.pair cimport pair
@ -28,6 +29,8 @@ cdef extern from "time.h" nogil:
time_t time(time_t*)
cdef int is_close(double x, double y):
if isnan(x) and isnan(y):
return 1
return abs(x-y) <= (1e-8+1e-05*abs(y))
cdef extern from "LRUCache.h":

View File

@ -94,7 +94,7 @@ PRECIP = {
"name": gettext("precipitation")
}
ET0 = {
"name": gettext("evapotranspiration"),
"name": gettext("reference evapotranspiration"),
"type": "raster",
"bands": {
1: {
@ -102,7 +102,7 @@ ET0 = {
"units": u.millimeter
}
},
"about": gettext("Map of evapotranspiration values.")
"about": gettext("Map of reference evapotranspiration values.")
}
SOIL_GROUP = {
"type": "raster",

View File

@ -316,7 +316,10 @@ MODEL_SPEC = {
"units": u.none
},
"mean_t_air": {
"about": "Average temperature value in building.",
"about": (
"Average temperature value in building. Calculated "
"from the mean T_air pixel value under this building "
"geometry."),
"type": "number",
"units": u.degree_Celsius
}

View File

@ -715,14 +715,6 @@ def execute(args):
number_of_turbines = int(args['number_of_turbines'])
# Create a list of the biophysical parameters we are looking for from the
# input csv files
biophysical_params = [
'cut_in_wspd', 'cut_out_wspd', 'rated_wspd', 'hub_height',
'turbine_rated_pwr', 'air_density', 'exponent_power_curve',
'air_density_coefficient', 'loss_parameter'
]
# Read the biophysical turbine parameters into a dictionary
turbine_dict = validation.get_validated_dataframe(
args['turbine_parameters_path'],
@ -774,31 +766,13 @@ def execute(args):
for time_step in range(int(time) + 1):
price_list.append(wind_price * (1 + change_rate)**(time_step))
# Hub Height to use for setting Weibull parameters
hub_height = parameters_dict['hub_height']
LOGGER.debug('hub_height : %s', hub_height)
# Read the wind energy data into a dictionary
LOGGER.info('Reading in Wind Data into a dictionary')
wind_point_df = validation.get_validated_dataframe(
args['wind_data_path'], **MODEL_SPEC['args']['wind_data_path'])
wind_point_df.columns = wind_point_df.columns.str.upper()
# Calculate scale value at new hub height given reference values.
# See equation 3 in users guide
wind_point_df.rename(columns={'LAM': 'REF_LAM'}, inplace=True)
wind_point_df['LAM'] = wind_point_df.apply(
lambda row: row.REF_LAM * (hub_height / row.REF)**_ALPHA, axis=1)
wind_point_df.drop(['REF'], axis=1) # REF is not needed after calculation
wind_data = wind_point_df.to_dict('index') # so keys will be 0, 1, 2, ...
# Compute Wind Density and Harvested Wind Energy, adding the values to the
# points to the dictionary, and pickle the dictionary
# Compute Wind Density and Harvested Wind Energy,
# and pickle the resulting dictionary
wind_data_pickle_path = os.path.join(
inter_dir, 'wind_data%s.pickle' % suffix)
compute_density_harvested_task = task_graph.add_task(
func=_compute_density_harvested_fields,
args=(wind_data, parameters_dict, number_of_turbines,
args=(args['wind_data_path'], parameters_dict, number_of_turbines,
wind_data_pickle_path),
target_path_list=[wind_data_pickle_path],
task_name='compute_density_harvested_fields')
@ -1932,14 +1906,12 @@ def _create_distance_raster(base_raster_path, base_vector_path,
def _compute_density_harvested_fields(
wind_dict, parameters_dict, number_of_turbines,
wind_data_path, parameters_dict, number_of_turbines,
target_pickle_path):
"""Compute the density and harvested energy based on scale and shape keys.
Args:
wind_dict (dict): a dictionary whose values are a dictionary with
keys ``LAM``, ``LATI``, ``K``, ``LONG``, ``REF_LAM``, and ``REF``,
and numbers indicating their corresponding values.
wind_data_path (str): path to wind data input.
parameters_dict (dict): a dictionary where the 'parameter_list'
strings are the keys that have values pulled from bio-parameters
@ -1949,13 +1921,30 @@ def _compute_density_harvested_fields(
for the wind farm.
target_pickle_path (str): a path to the pickle file that has
wind_dict_copy, a modified dictionary with new fields computed
from the existing fields and bio-parameters.
wind_dict_copy, a modified dictionary of wind data with additional
fields computed from the existing fields and bio-parameters.
Returns:
None
"""
# Hub Height to use for setting Weibull parameters
hub_height = parameters_dict['hub_height']
LOGGER.debug('hub_height : %s', hub_height)
# Read the wind energy data into a dictionary
LOGGER.info('Reading in Wind Data into a dictionary')
wind_point_df = validation.get_validated_dataframe(
wind_data_path, **MODEL_SPEC['args']['wind_data_path'])
wind_point_df.columns = wind_point_df.columns.str.upper()
# Calculate scale value at new hub height given reference values.
# See equation 3 in users guide
wind_point_df.rename(columns={'LAM': 'REF_LAM'}, inplace=True)
wind_point_df['LAM'] = wind_point_df.apply(
lambda row: row.REF_LAM * (hub_height / row.REF)**_ALPHA, axis=1)
wind_point_df.drop(['REF'], axis=1) # REF is not needed after calculation
wind_dict = wind_point_df.to_dict('index') # so keys will be 0, 1, 2, ...
wind_dict_copy = wind_dict.copy()
# The rated power is expressed in units of MW but the harvested energy
@ -1973,9 +1962,6 @@ def _compute_density_harvested_fields(
air_density_coef = parameters_dict['air_density_coefficient']
losses = parameters_dict['loss_parameter']
# Hub Height to use for setting Weibull parameters
hub_height = parameters_dict['hub_height']
# Compute the mean air density, given by CKs formulas
mean_air_density = air_density_standard - air_density_coef * hub_height

View File

@ -1,20 +1,53 @@
"""Module for Regression Testing the InVEST Annual Water Yield module."""
import unittest
import tempfile
import shutil
import os
import shutil
import tempfile
import unittest
import numpy
from shapely.geometry import Polygon
import pandas
import numpy
from osgeo import gdal
import pygeoprocessing
from osgeo import gdal, ogr, osr
REGRESSION_DATA = os.path.join(
os.path.dirname(__file__), '..', 'data', 'invest-test-data', 'annual_water_yield')
SAMPLE_DATA = os.path.join(REGRESSION_DATA, 'input')
gdal.UseExceptions()
def make_watershed_vector(path_to_shp):
"""
Generate watershed results shapefile with two polygons
Args:
path_to_shp (str): path to store watershed results vector
Outputs:
None
"""
shapely_geometry_list = [
Polygon([(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)]),
Polygon([(2, 2), (3, 2), (3, 3), (2, 3), (2, 2)])
]
projection_wkt = osr.GetUserInputAsWKT("EPSG:4326")
vector_format = "ESRI Shapefile"
fields = {"hp_energy": ogr.OFTReal, "hp_val": ogr.OFTReal,
"ws_id": ogr.OFTReal, "rsupply_vl": ogr.OFTReal,
"wyield_mn": ogr.OFTReal, "wyield_vol": ogr.OFTReal,
"consum_mn": ogr.OFTReal, "consum_vol": ogr.OFTReal}
attribute_list = [
{"hp_energy": 1, "hp_val": 1, "ws_id": 0, "rsupply_vl": 2},
{"hp_energy": 11, "hp_val": 3, "ws_id": 1, "rsupply_vl": 52}
]
pygeoprocessing.shapely_geometry_to_vector(shapely_geometry_list,
path_to_shp, projection_wkt,
vector_format, fields,
attribute_list)
class AnnualWaterYieldTests(unittest.TestCase):
"""Regression Tests for Annual Water Yield Model."""
@ -74,7 +107,7 @@ class AnnualWaterYieldTests(unittest.TestCase):
with self.assertRaises(ValueError) as cm:
annual_water_yield.execute(args)
self.assertTrue('veg value must be either 1 or 0' in str(cm.exception))
def test_missing_lulc_value(self):
"""Hydro: catching missing LULC value in Biophysical table."""
from natcap.invest import annual_water_yield
@ -89,7 +122,7 @@ class AnnualWaterYieldTests(unittest.TestCase):
bio_df = bio_df[bio_df['lucode'] != 2]
bio_df.to_csv(bad_biophysical_path)
bio_df = None
args['biophysical_table_path'] = bad_biophysical_path
with self.assertRaises(ValueError) as cm:
@ -97,13 +130,13 @@ class AnnualWaterYieldTests(unittest.TestCase):
self.assertTrue(
"The missing values found in the LULC raster but not the table"
" are: [2]" in str(cm.exception))
def test_missing_lulc_demand_value(self):
"""Hydro: catching missing LULC value in Demand table."""
from natcap.invest import annual_water_yield
args = AnnualWaterYieldTests.generate_base_args(self.workspace_dir)
args['demand_table_path'] = os.path.join(
SAMPLE_DATA, 'water_demand_table.csv')
args['sub_watersheds_path'] = os.path.join(
@ -117,7 +150,7 @@ class AnnualWaterYieldTests(unittest.TestCase):
demand_df = demand_df[demand_df['lucode'] != 2]
demand_df.to_csv(bad_demand_path)
demand_df = None
args['demand_table_path'] = bad_demand_path
with self.assertRaises(ValueError) as cm:
@ -247,7 +280,8 @@ class AnnualWaterYieldTests(unittest.TestCase):
def test_validation(self):
"""Hydro: test failure cases on the validation function."""
from natcap.invest import annual_water_yield, validation
from natcap.invest import annual_water_yield
from natcap.invest import validation
args = AnnualWaterYieldTests.generate_base_args(self.workspace_dir)
@ -367,3 +401,124 @@ class AnnualWaterYieldTests(unittest.TestCase):
self.assertTrue(
'but are not found in the valuation table' in
actual_message, actual_message)
# if the demand table is missing but the valuation table is present,
# make sure we have a validation error.
args_missing_demand_table = args.copy()
args_missing_demand_table['demand_table_path'] = ''
args_missing_demand_table['valuation_table_path'] = (
os.path.join(SAMPLE_DATA, 'hydropower_valuation_table.csv'))
validation_warnings = annual_water_yield.validate(
args_missing_demand_table)
self.assertEqual(len(validation_warnings), 1)
self.assertEqual(
validation_warnings[0],
(['demand_table_path'], 'Input is required but has no value'))
def test_fractp_op(self):
"""Test `fractp_op`"""
from natcap.invest.annual_water_yield import fractp_op
# generate fake data
kc = numpy.array([[1, .1, .1], [.6, .6, .1]])
eto = numpy.array([[1000, 900, 900], [1100, 1005, 1000]])
precip = numpy.array([[100, 1000, 10], [500, 800, 1100]])
root = numpy.array([[99, 300, 400], [5, 500, 800]])
soil = numpy.array([[600, 700, 700], [800, 900, 600]])
pawc = numpy.array([[.11, .11, .12], [.55, .55, .19]])
veg = numpy.array([[1, 1, 0], [0, 1, 0]])
nodata_dict = {'eto': None, 'precip': None, 'depth_root': None,
'pawc': None, 'out_nodata': None}
seasonality_constant = 6
actual_fractp = fractp_op(kc, eto, precip, root, soil, pawc, veg,
nodata_dict, seasonality_constant)
# generated by running fractp_op
expected_fractp = numpy.array([[0.9345682, 0.06896508, 1.],
[1., 0.6487423, 0.09090909]],
dtype=numpy.float32)
numpy.testing.assert_allclose(actual_fractp, expected_fractp,
err_msg="Fractp does not match expected")
def test_compute_watershed_valuation(self):
"""Test `compute_watershed_valuation`, `compute_rsupply_volume`
and `compute_water_yield_volume`"""
from natcap.invest import annual_water_yield
def _create_watershed_results_vector(path_to_shp):
"""Generate a fake watershed results vector file."""
shapely_geometry_list = [
Polygon([(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)]),
Polygon([(2, 2), (3, 2), (3, 3), (2, 3), (2, 2)])
]
projection_wkt = osr.GetUserInputAsWKT("EPSG:4326")
vector_format = "ESRI Shapefile"
fields = {"ws_id": ogr.OFTReal, "wyield_mn": ogr.OFTReal,
"consum_mn": ogr.OFTReal, "consum_vol": ogr.OFTReal}
attribute_list = [{"ws_id": 0, "wyield_mn": 990000,
"consum_mn": 500, "consum_vol": 50},
{"ws_id": 1, "wyield_mn": 800000,
"consum_mn": 600, "consum_vol": 70}]
pygeoprocessing.shapely_geometry_to_vector(shapely_geometry_list,
path_to_shp,
projection_wkt,
vector_format, fields,
attribute_list)
def _validate_fields(vector_path, field_name, expected_values, error_msg):
"""
Validate a specific field in the watershed results vector
by comparing actual to expected values. Expected values generated
by running the function.
Args:
vector path (str): path to watershed shapefile
field_name (str): attribute field to check
expected values (list): list of expected values for field
error_msg (str): what to print if assertion fails
Returns:
None
"""
with gdal.OpenEx(vector_path, gdal.OF_VECTOR | gdal.GA_Update) as ws_ds:
ws_layer = ws_ds.GetLayer()
actual_values = [ws_feat.GetField(field_name)
for ws_feat in ws_layer]
self.assertEqual(actual_values, expected_values, msg=error_msg)
# generate fake watershed results vector
watershed_results_vector_path = os.path.join(self.workspace_dir,
"watershed_results.shp")
_create_watershed_results_vector(watershed_results_vector_path)
# generate fake val_df
val_df = pandas.DataFrame({'efficiency': [.7, .8], 'height': [12, 50],
'fraction': [.9, .7], 'discount': [60, 20],
'time_span': [10, 10], 'cost': [100, 200],
'kw_price': [15, 20]})
# test water yield volume
annual_water_yield.compute_water_yield_volume(
watershed_results_vector_path)
_validate_fields(watershed_results_vector_path, "wyield_vol",
[990.0, 800.0],
"Error with water yield volume calculation.")
# test rsupply volume
annual_water_yield.compute_rsupply_volume(
watershed_results_vector_path)
_validate_fields(watershed_results_vector_path, "rsupply_vl",
[940.0, 730.0],
"Error calculating total realized water supply volume.")
# test compute watershed valuation
annual_water_yield.compute_watershed_valuation(
watershed_results_vector_path, val_df)
_validate_fields(watershed_results_vector_path, "hp_energy",
[19.329408, 55.5968],
"Error calculating energy.")
_validate_fields(watershed_results_vector_path, "hp_val",
[501.9029748723, 4587.91946857059],
"Error calculating net present value.")

View File

@ -268,6 +268,69 @@ class CarbonTests(unittest.TestCase):
assert_raster_equal_value(
os.path.join(args['workspace_dir'], 'npv_redd.tif'), -0.4602106)
def test_generate_carbon_map(self):
"""Test `_generate_carbon_map`"""
from natcap.invest.carbon import _generate_carbon_map
def _make_simple_lulc_raster(base_raster_path):
"""Create a raster on designated path with arbitrary values.
Args:
base_raster_path (str): the raster path for making the new raster.
Returns:
None.
"""
array = numpy.array([[1, 1], [2, 3]], dtype=numpy.int32)
# UTM Zone 10N
srs = osr.SpatialReference()
srs.ImportFromEPSG(26910)
projection_wkt = srs.ExportToWkt()
origin = (461251, 4923245)
pixel_size = (1, 1)
no_data = -999
pygeoprocessing.numpy_array_to_raster(
array, no_data, pixel_size, origin, projection_wkt,
base_raster_path)
# generate a fake lulc raster
lulc_path = os.path.join(self.workspace_dir, "lulc.tif")
_make_simple_lulc_raster(lulc_path)
# make fake carbon pool dict
carbon_pool_by_type = {1: 5000, 2: 60, 3: 120}
out_carbon_stock_path = os.path.join(self.workspace_dir,
"carbon_stock.tif")
_generate_carbon_map(lulc_path, carbon_pool_by_type,
out_carbon_stock_path)
# open output carbon stock raster and check values
actual_carbon_stock = gdal.Open(out_carbon_stock_path)
band = actual_carbon_stock.GetRasterBand(1)
actual_carbon_stock = band.ReadAsArray()
expected_carbon_stock = numpy.array([[0.5, 0.5], [0.006, 0.012]],
dtype=numpy.float32)
numpy.testing.assert_array_equal(actual_carbon_stock,
expected_carbon_stock)
def test_calculate_valuation_constant(self):
"""Test `_calculate_valuation_constant`"""
from natcap.invest.carbon import _calculate_valuation_constant
valuation_constant = _calculate_valuation_constant(lulc_cur_year=2010,
lulc_fut_year=2012,
discount_rate=50,
rate_change=5,
price_per_metric_ton_of_c=50)
expected_valuation = 40.87302
self.assertEqual(round(valuation_constant, 5), expected_valuation)
class CarbonValidationTests(unittest.TestCase):
"""Tests for the Carbon Model MODEL_SPEC and validation."""

View File

@ -11,6 +11,7 @@ import unittest
import numpy
import pandas
from scipy.sparse import dok_matrix
import pygeoprocessing
from natcap.invest import utils
from natcap.invest import validation
@ -24,6 +25,26 @@ REGRESSION_DATA = os.path.join(
LOGGER = logging.getLogger(__name__)
def make_raster_from_array(base_raster_path, array):
"""Create a raster on designated path with arbitrary values.
Args:
base_raster_path (str): the raster path for making the new raster.
Returns:
None.
"""
# UTM Zone 10N
srs = osr.SpatialReference()
srs.ImportFromEPSG(26910)
projection_wkt = srs.ExportToWkt()
origin = (461261, 4923265)
pixel_size = (1, 1)
no_data = -1
pygeoprocessing.numpy_array_to_raster(
array, no_data, pixel_size, origin, projection_wkt,
base_raster_path)
class TestPreprocessor(unittest.TestCase):
"""Test Coastal Blue Carbon preprocessor functions."""
@ -1060,3 +1081,149 @@ class TestCBC2(unittest.TestCase):
[(['analysis_year'],
coastal_blue_carbon.INVALID_ANALYSIS_YEAR_MSG.format(
analysis_year=2000, latest_year=2000))])
def test_calculate_npv(self):
"""Test `_calculate_npv`"""
from natcap.invest.coastal_blue_carbon import coastal_blue_carbon
# make fake data
net_sequestration_rasters = {
2010: os.path.join(self.workspace_dir, "carbon_seq_2010.tif"),
2011: os.path.join(self.workspace_dir, "carbon_seq_2011.tif"),
2012: os.path.join(self.workspace_dir, "carbon_seq_2012.tif")
}
for year, path in net_sequestration_rasters.items():
array = numpy.array([[year*.5, year*.25], [year-1, 50]]) # random array
make_raster_from_array(path, array)
prices_by_year = {
2010: 50,
2011: 80,
2012: 95
}
discount_rate = 0.1
baseline_year = 2010
target_raster_years_and_paths = {
2010: os.path.join(self.workspace_dir, "tgt_carbon_seq_2010.tif"),
2011: os.path.join(self.workspace_dir, "tgt_carbon_seq_2011.tif"),
2012: os.path.join(self.workspace_dir, "tgt_carbon_seq_2012.tif")
}
coastal_blue_carbon._calculate_npv(net_sequestration_rasters,
prices_by_year, discount_rate,
baseline_year,
target_raster_years_and_paths)
# read in the created target rasters
actual_2011 = gdal.Open(target_raster_years_and_paths[2011])
band = actual_2011.GetRasterBand(1)
actual_2011 = band.ReadAsArray()
actual_2012 = gdal.Open(target_raster_years_and_paths[2012])
band = actual_2012.GetRasterBand(1)
actual_2012 = band.ReadAsArray()
# compare actual rasters to expected (based on running `_calculate_npv`)
expected_2011 = numpy.array([[100525, 50262.5], [200950, 5000]])
expected_2012 = numpy.array([[370206.818182, 185103.409091],
[740045.454545, 18409.090909]])
numpy.testing.assert_allclose(actual_2011, expected_2011)
numpy.testing.assert_allclose(actual_2012, expected_2012)
def test_calculate_accumulation_over_time(self):
"""Test `_calculate_accumulation_over_time`"""
from natcap.invest.coastal_blue_carbon.coastal_blue_carbon import \
_calculate_accumulation_over_time
# generate fake data with nodata values
nodata = float(numpy.finfo(numpy.float32).min)
annual_biomass_matrix = numpy.array([[1, 2], [3, nodata]])
annual_soil_matrix = numpy.array([[11, 12], [13, 14]])
annual_litter_matrix = numpy.array([[.5, .9], [4, .9]])
n_years = 3
actual_accumulation = _calculate_accumulation_over_time(
annual_biomass_matrix, annual_soil_matrix, annual_litter_matrix,
n_years)
expected_accumulation = numpy.array([[37.5, 44.7], [60, nodata]])
numpy.testing.assert_allclose(actual_accumulation, expected_accumulation)
def test_calculate_net_sequestration(self):
"""test `_calculate_net_sequestration`"""
from natcap.invest.coastal_blue_carbon.coastal_blue_carbon import \
_calculate_net_sequestration
# make fake rasters that contain nodata pixels (-1)
accumulation_raster_path = os.path.join(self.workspace_dir,
"accumulation_raster.tif")
accumulation_array = numpy.array([[40, -1], [70, -1]])
make_raster_from_array(accumulation_raster_path, accumulation_array)
emissions_raster_path = os.path.join(self.workspace_dir,
"emissions_raster.tif")
emissions_array = numpy.array([[-1, 8], [7, -1]])
make_raster_from_array(emissions_raster_path, emissions_array)
target_raster_path = os.path.join(self.workspace_dir,
"target_raster.tif")
# run `_calculate_net_sequestration`
_calculate_net_sequestration(accumulation_raster_path,
emissions_raster_path, target_raster_path)
# compare actual to expected output net sequestration raster
actual_sequestration = gdal.Open(target_raster_path)
band = actual_sequestration.GetRasterBand(1)
actual_sequestration = band.ReadAsArray()
# calculated by running `_calculate_net_sequestration`
nodata = float(numpy.finfo(numpy.float32).min)
expected_sequestration = numpy.array([[40, -8], [-7, nodata]])
numpy.testing.assert_allclose(actual_sequestration,
expected_sequestration)
def test_reclassify_accumulation_transition(self):
"""Test `_reclassify_accumulation_transition`"""
from natcap.invest.coastal_blue_carbon.coastal_blue_carbon import \
_reclassify_accumulation_transition, _reclassify_disturbance_magnitude
# make fake raster data
landuse_transition_from_raster = os.path.join(self.workspace_dir,
"landuse_transition_from.tif")
landuse_transition_from_array = numpy.array([[1, 2], [3, 2]])
make_raster_from_array(landuse_transition_from_raster,
landuse_transition_from_array)
landuse_transition_to_raster = os.path.join(self.workspace_dir,
"landuse_transition_to.tif")
landuse_transition_to_array = numpy.array([[1, 1], [2, 3]])
make_raster_from_array(landuse_transition_to_raster,
landuse_transition_to_array)
#make fake accumulation_rate_matrix
accumulation_rate_matrix = dok_matrix((4, 4), dtype=numpy.float32)
accumulation_rate_matrix[1, 2] = 0.5 # Forest -> Grassland
accumulation_rate_matrix[1, 3] = 0.3 # Forest -> Agriculture
accumulation_rate_matrix[2, 1] = 0.2 # Grassland -> Forest
accumulation_rate_matrix[2, 3] = 0.4 # Grassland -> Agriculture
accumulation_rate_matrix[3, 1] = 0.1 # Agriculture -> Forest
accumulation_rate_matrix[3, 2] = 0.3 # Agriculture -> Grassland
target_raster_path = os.path.join(self.workspace_dir, "output.tif")
_reclassify_accumulation_transition(
landuse_transition_from_raster, landuse_transition_to_raster,
accumulation_rate_matrix, target_raster_path)
# compare actual and expected target_raster
actual_accumulation = gdal.Open(target_raster_path)
band = actual_accumulation.GetRasterBand(1)
actual_accumulation = band.ReadAsArray()
expected_accumulation = numpy.array([[0, .2], [.3, .4]])
numpy.testing.assert_allclose(actual_accumulation, expected_accumulation)

View File

@ -8,6 +8,7 @@ import unittest
import numpy.testing
import pandas.testing
import pandas
import pygeoprocessing
import shapely.wkb
import taskgraph
@ -1553,6 +1554,100 @@ class CoastalVulnerabilityTests(unittest.TestCase):
# Polygon has 4 sides on exterior, 3 on interior, expect 7 lines
self.assertTrue(len(line_list) == 7)
def test_assemble_results_and_calculate_exposure(self):
"""Test that assemble_results_and_calculate_exposure correctly
calculates exposure"""
from natcap.invest.coastal_vulnerability import \
assemble_results_and_calculate_exposure
def _make_shore_points_vector(shore_points_path):
# create 4 points, each with a unique 'shore_id' in [0..3].
shore_geometries = [Point(0, 0), Point(1, 0), Point(2, 1), Point(3, 2)]
shore_fields = {'shore_id': ogr.OFTInteger}
shore_attributes = [{'shore_id': i} for i in range(len(shore_geometries))]
# Create a spatial reference (projected or geographic)
srs = osr.SpatialReference()
srs.ImportFromEPSG(26910) # e.g. "NAD83 / UTM zone 10N"
pygeoprocessing.shapely_geometry_to_vector(
shore_geometries, shore_points_path, srs.ExportToWkt(),
vector_format='GPKG',
fields=shore_fields,
attribute_list=shore_attributes,
ogr_geom_type=ogr.wkbPoint
)
def _make_habitat_csv(habitat_csv_path):
# Example: one habitat column named 'kelp', plus 'R_hab'
# We have 4 shore IDs, so we add 4 rows. Values are arbitrary.
habitat_df = pandas.DataFrame(
{'shore_id': [0, 1, 2, 3], 'kelp': [5, 3, 5, 4],
'seagrass': [4, 1, 2, 4], 'R_hab': [5, 2, 5, 3]})
habitat_df.to_csv(habitat_csv_path, index=False)
def _make_risk_id_path_list():
# Create pickles for risk data
relief_pkl = os.path.join(self.workspace_dir, 'relief.pickle')
slr_pkl = os.path.join(self.workspace_dir, 'slr.pickle')
population_pkl = os.path.join(self.workspace_dir, 'population.pickle')
relief_data = {0: 10.0, 1: 50.0, 2: 30.0, 3: 80.0} # arbitrary data
slr_data = {0: 0.1, 1: 0.2, 2: 0.9, 3: 0.5}
population_data = {0: 123.0, 1: 999.0, 2: 55.0, 3: 0.0}
for file_path, data_dict in zip([relief_pkl, slr_pkl, population_pkl],
[relief_data, slr_data, population_data]):
with open(file_path, 'wb') as f:
pickle.dump(data_dict, f)
risk_id_path_list = [
(relief_pkl, True, "R_relief"), # "True" => bin to 1..5
(slr_pkl, True, "R_slr"),
(population_pkl, False, "population")
]
return risk_id_path_list
shore_points_path = os.path.join(self.workspace_dir, "shore_points.gpkg")
_make_shore_points_vector(shore_points_path)
habitat_csv_path = os.path.join(self.workspace_dir, 'habitat_protection.csv')
_make_habitat_csv(habitat_csv_path)
risk_id_path_list = _make_risk_id_path_list()
intermediate_vector_path = os.path.join(self.workspace_dir,
'intermediate_exposure.gpkg')
intermediate_csv_path = os.path.join(self.workspace_dir,
'intermediate_exposure.csv')
output_vector_path = os.path.join(self.workspace_dir,
'coastal_exposure.gpkg')
output_csv_path = os.path.join(self.workspace_dir,
'coastal_exposure.csv')
# call function
assemble_results_and_calculate_exposure(
risk_id_path_list,
habitat_csv_path,
shore_points_path,
intermediate_vector_path,
intermediate_csv_path,
output_vector_path,
output_csv_path
)
# read field values in output vector and compare
actual_df = pandas.read_csv(
output_csv_path,
usecols=["exposure", "habitat_role", "exposure_no_habitats"])
expected_df = pandas.DataFrame({
"exposure": [2.924018, 2.0, 4.641589, 2.289428],
"habitat_role": [0, 0.714418, 0, 0.424989],
"exposure_no_habitats": [2.924018, 2.714418, 4.641589, 2.714418]})
pandas.testing.assert_frame_equal(
actual_df, expected_df, check_dtype=False)
def assert_pickled_arrays_almost_equal(
actual_values_pickle_path, expected_values_json_path):

View File

@ -5,9 +5,10 @@ import shutil
import os
import numpy
from osgeo import gdal
from osgeo import gdal, ogr, osr
import pandas
import pygeoprocessing
from shapely.geometry import Polygon
gdal.UseExceptions()
MODEL_DATA_PATH = os.path.join(
@ -21,6 +22,108 @@ TEST_DATA_PATH = os.path.join(
'crop_production_model')
def make_aggregate_vector(path_to_shp):
"""
Generate shapefile with two overlapping polygons
Args:
path_to_shp (str): path to store watershed results vector
Outputs:
None
"""
# (xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)
shapely_geometry_list = [
Polygon([(461151, 4923265-50), (461261+50, 4923265-50),
(461261+50, 4923265), (461151, 4923265)]),
Polygon([(461261, 4923265-35), (461261+60, 4923265-35),
(461261+60, 4923265+50), (461261, 4923265+50)])
]
srs = osr.SpatialReference()
srs.ImportFromEPSG(26910)
projection_wkt = srs.ExportToWkt()
vector_format = "ESRI Shapefile"
fields = {"id": ogr.OFTReal}
attribute_list = [
{"id": 0},
{"id": 1},
]
pygeoprocessing.shapely_geometry_to_vector(shapely_geometry_list,
path_to_shp, projection_wkt,
vector_format, fields,
attribute_list)
def make_simple_raster(base_raster_path, array):
"""Create a raster on designated path with arbitrary values.
Args:
base_raster_path (str): the raster path for making the new raster.
Returns:
None.
"""
# UTM Zone 10N
srs = osr.SpatialReference()
srs.ImportFromEPSG(26910)
projection_wkt = srs.ExportToWkt()
origin = (461251, 4923245)
pixel_size = (30, 30)
no_data = -1
pygeoprocessing.numpy_array_to_raster(
array, no_data, pixel_size, origin, projection_wkt,
base_raster_path)
def create_nutrient_df():
"""Creates a nutrient DataFrame for testing."""
return pandas.DataFrame([
{'crop': 'corn', 'area (ha)': 21.0, 'production_observed': 0.2,
'percentrefuse': 7, 'protein': 42., 'lipid': 8, 'energy': 476.,
'ca': 27.0, 'fe': 15.7, 'mg': 280.0, 'ph': 704.0, 'k': 1727.0,
'na': 2.0, 'zn': 4.9, 'cu': 1.9, 'fl': 8, 'mn': 2.9, 'se': 0.1,
'vita': 3.0, 'betac': 16.0, 'alphac': 2.30, 'vite': 0.8,
'crypto': 1.6, 'lycopene': 0.36, 'lutein': 63.0, 'betat': 0.5,
'gammat': 2.1, 'deltat': 1.9, 'vitc': 6.8, 'thiamin': 0.4,
'riboflavin': 1.8, 'niacin': 8.2, 'pantothenic': 0.9,
'vitb6': 1.4, 'folate': 385.0, 'vitb12': 2.0, 'vitk': 41.0},
{'crop': 'soybean', 'area (ha)': 5., 'production_observed': 4.,
'percentrefuse': 9, 'protein': 33., 'lipid': 2., 'energy': 99.,
'ca': 257., 'fe': 15.7, 'mg': 280., 'ph': 704.0, 'k': 197.0,
'na': 2., 'zn': 4.9, 'cu': 1.6, 'fl': 3., 'mn': 5.2, 'se': 0.3,
'vita': 3.0, 'betac': 16.0, 'alphac': 1.0, 'vite': 0.8,
'crypto': 0.6, 'lycopene': 0.3, 'lutein': 61.0, 'betat': 0.5,
'gammat': 2.3, 'deltat': 1.2, 'vitc': 3.0, 'thiamin': 0.42,
'riboflavin': 0.82, 'niacin': 12.2, 'pantothenic': 0.92,
'vitb6': 5.4, 'folate': 305., 'vitb12': 3., 'vitk': 42.},
]).set_index('crop')
def _create_crop_rasters(output_dir, crop_names, file_suffix):
"""Creates raster files for test setup."""
_OBSERVED_PRODUCTION_FILE_PATTERN = os.path.join(
'.', '%s_observed_production%s.tif')
_CROP_PRODUCTION_FILE_PATTERN = os.path.join(
'.', '%s_regression_production%s.tif')
for i, crop in enumerate(crop_names):
observed_yield_path = os.path.join(
output_dir,
_OBSERVED_PRODUCTION_FILE_PATTERN % (crop, file_suffix))
crop_production_raster_path = os.path.join(
output_dir,
_CROP_PRODUCTION_FILE_PATTERN % (crop, file_suffix))
# Create arbitrary raster arrays
observed_array = numpy.array([[4, i], [i*3, 4]], dtype=numpy.int16)
crop_array = numpy.array([[i, 1], [i*2, 3]], dtype=numpy.int16)
make_simple_raster(observed_yield_path, observed_array)
make_simple_raster(crop_production_raster_path, crop_array)
class CropProductionTests(unittest.TestCase):
"""Tests for the Crop Production model."""
@ -390,6 +493,304 @@ class CropProductionTests(unittest.TestCase):
pandas.testing.assert_frame_equal(
expected_result_table, result_table, check_dtype=False)
def test_x_yield_op(self):
"""Test `_x_yield_op"""
from natcap.invest.crop_production_regression import _x_yield_op
# make fake data
y_max = numpy.array([[-1, 3, 2], [4, 5, 3]])
b_x = numpy.array([[4, 3, 2], [2, 0, 3]])
c_x = numpy.array([[4, 1, 2], [3, 0, 3]])
lulc_array = numpy.array([[3, 3, 2], [3, -1, 3]])
fert_rate = 0.6
crop_lucode = 3
pixel_area_ha = 10
actual_result = _x_yield_op(y_max, b_x, c_x, lulc_array, fert_rate,
crop_lucode, pixel_area_ha)
expected_result = numpy.array([[-1, -19.393047, -1],
[26.776089, -1, 15.1231]])
numpy.testing.assert_allclose(actual_result, expected_result)
def test_zero_observed_yield_op(self):
"""Test `_zero_observed_yield_op`"""
from natcap.invest.crop_production_regression import \
_zero_observed_yield_op
# make fake data
observed_yield_array = numpy.array([[0, 1, -1], [5, 6, -1]])
observed_yield_nodata = -1
actual_result = _zero_observed_yield_op(observed_yield_array,
observed_yield_nodata)
expected_result = numpy.array([[0, 1, 0], [5, 6, 0]])
numpy.testing.assert_allclose(actual_result, expected_result)
def test_mask_observed_yield_op(self):
"""Test `_mask_observed_yield_op`"""
from natcap.invest.crop_production_regression import \
_mask_observed_yield_op
# make fake data
lulc_array = numpy.array([[3, 5, -9999], [3, 3, -1]])
observed_yield_array = numpy.array([[-1, 5, 4], [8, -9999, 91]])
observed_yield_nodata = -1
# note: this observed_yield_nodata value becomes the nodata value in
# the output array but the values in the observed_yield_array with
# this value are NOT treated as no data within this function
landcover_nodata = -9999
crop_lucode = 3
pixel_area_ha = 10
actual_result = _mask_observed_yield_op(
lulc_array, observed_yield_array, observed_yield_nodata,
landcover_nodata, crop_lucode, pixel_area_ha)
expected_result = numpy.array([[-10, 0, -1], [80, -99990, 0]])
numpy.testing.assert_allclose(actual_result, expected_result)
def test_tabulate_regression_results(self):
"""Test `tabulate_regression_results`"""
from natcap.invest.crop_production_regression import \
tabulate_regression_results
def _create_expected_results():
"""Creates the expected results DataFrame."""
return pandas.DataFrame([
{'crop': 'corn', 'area (ha)': 20.0,
'production_observed': 8.0, 'production_modeled': 4.0,
'protein_modeled': 1562400.0, 'protein_observed': 3124800.0,
'lipid_modeled': 297600.0, 'lipid_observed': 595200.0,
'energy_modeled': 17707200.0, 'energy_observed': 35414400.0,
'ca_modeled': 1004400.0, 'ca_observed': 2008800.0,
'fe_modeled': 584040.0, 'fe_observed': 1168080.0,
'mg_modeled': 10416000.0, 'mg_observed': 20832000.0,
'ph_modeled': 26188800.0, 'ph_observed': 52377600.0,
'k_modeled': 64244400.0, 'k_observed': 128488800.0,
'na_modeled': 74400.0, 'na_observed': 148800.0,
'zn_modeled': 182280.0, 'zn_observed': 364560.0,
'cu_modeled': 70680.0, 'cu_observed': 141360.0,
'fl_modeled': 297600.0, 'fl_observed': 595200.0,
'mn_modeled': 107880.0, 'mn_observed': 215760.0,
'se_modeled': 3720.0, 'se_observed': 7440.0,
'vita_modeled': 111600.0, 'vita_observed': 223200.0,
'betac_modeled': 595200.0, 'betac_observed': 1190400.0,
'alphac_modeled': 85560.0, 'alphac_observed': 171120.0,
'vite_modeled': 29760.0, 'vite_observed': 59520.0,
'crypto_modeled': 59520.0, 'crypto_observed': 119040.0,
'lycopene_modeled': 13392.0, 'lycopene_observed': 26784.0,
'lutein_modeled': 2343600.0, 'lutein_observed': 4687200.0,
'betat_modeled': 18600.0, 'betat_observed': 37200.0,
'gammat_modeled': 78120.0, 'gammat_observed': 156240.0,
'deltat_modeled': 70680.0, 'deltat_observed': 141360.0,
'vitc_modeled': 252960.0, 'vitc_observed': 505920.0,
'thiamin_modeled': 14880.0, 'thiamin_observed': 29760.0,
'riboflavin_modeled': 66960.0, 'riboflavin_observed': 133920.0,
'niacin_modeled': 305040.0, 'niacin_observed': 610080.0,
'pantothenic_modeled': 33480.0, 'pantothenic_observed': 66960.0,
'vitb6_modeled': 52080.0, 'vitb6_observed': 104160.0,
'folate_modeled': 14322000.0, 'folate_observed': 28644000.0,
'vitb12_modeled': 74400.0, 'vitb12_observed': 148800.0,
'vitk_modeled': 1525200.0, 'vitk_observed': 3050400.0},
{'crop': 'soybean', 'area (ha)': 40.0,
'production_observed': 12.0, 'production_modeled': 7.0,
'protein_modeled': 2102100.0, 'protein_observed': 3603600.0,
'lipid_modeled': 127400.0, 'lipid_observed': 218400.0,
'energy_modeled': 6306300.0, 'energy_observed': 10810800.0,
'ca_modeled': 16370900.0, 'ca_observed': 28064400.0,
'fe_modeled': 1000090.0, 'fe_observed': 1714440.0,
'mg_modeled': 17836000.0, 'mg_observed': 30576000.0,
'ph_modeled': 44844800.0, 'ph_observed': 76876800.0,
'k_modeled': 12548900.0, 'k_observed': 21512400.0,
'na_modeled': 127400.0, 'na_observed': 218400.0,
'zn_modeled': 312130.0, 'zn_observed': 535080.0,
'cu_modeled': 101920.0, 'cu_observed': 174720.0,
'fl_modeled': 191100.0, 'fl_observed': 327600.0,
'mn_modeled': 331240.0, 'mn_observed': 567840.0,
'se_modeled': 19110.0, 'se_observed': 32760.0,
'vita_modeled': 191100.0, 'vita_observed': 327600.0,
'betac_modeled': 1019200.0, 'betac_observed': 1747200.0,
'alphac_modeled': 63700.0, 'alphac_observed': 109200.0,
'vite_modeled': 50960.0, 'vite_observed': 87360.0,
'crypto_modeled': 38220.0, 'crypto_observed': 65520.0,
'lycopene_modeled': 19110.0, 'lycopene_observed': 32760.0,
'lutein_modeled': 3885700.0, 'lutein_observed': 6661200.0,
'betat_modeled': 31850.0, 'betat_observed': 54600.0,
'gammat_modeled': 146510.0, 'gammat_observed': 251160.0,
'deltat_modeled': 76440.0, 'deltat_observed': 131040.0,
'vitc_modeled': 191100.0, 'vitc_observed': 327600.0,
'thiamin_modeled': 26754.0, 'thiamin_observed': 45864.0,
'riboflavin_modeled': 52234.0, 'riboflavin_observed': 89544.0,
'niacin_modeled': 777140.0, 'niacin_observed': 1332240.0,
'pantothenic_modeled': 58604.0, 'pantothenic_observed': 100464.0,
'vitb6_modeled': 343980.0, 'vitb6_observed': 589680.0,
'folate_modeled': 19428500.0, 'folate_observed': 33306000.0,
'vitb12_modeled': 191100.0, 'vitb12_observed': 327600.0,
'vitk_modeled': 2675400.0, 'vitk_observed': 4586400.0}])
nutrient_df = create_nutrient_df()
pixel_area_ha = 10
workspace_dir = self.workspace_dir
output_dir = os.path.join(workspace_dir, "OUTPUT")
os.makedirs(output_dir, exist_ok=True)
landcover_raster_path = os.path.join(workspace_dir, "landcover.tif")
landcover_nodata = -1
make_simple_raster(landcover_raster_path,
numpy.array([[2, 1], [2, 3]], dtype=numpy.int16))
file_suffix = "v1"
target_table_path = os.path.join(workspace_dir, "output_table.csv")
crop_names = ["corn", "soybean"]
_create_crop_rasters(output_dir, crop_names, file_suffix)
tabulate_regression_results(
nutrient_df, crop_names, pixel_area_ha,
landcover_raster_path, landcover_nodata,
output_dir, file_suffix, target_table_path
)
# Read only the first 2 crop's data (skipping total area)
actual_result_table = pandas.read_csv(target_table_path, nrows=2,
header=0)
expected_result_table = _create_expected_results()
# Compare expected vs actual
pandas.testing.assert_frame_equal(actual_result_table,
expected_result_table)
def test_aggregate_regression_results_to_polygons(self):
"""Test `aggregate_regression_results_to_polygons`"""
from natcap.invest.crop_production_regression import \
aggregate_regression_results_to_polygons
def _create_expected_agg_table():
"""Create expected output results"""
# Define the new values manually
return pandas.DataFrame([
{"FID": 0, "corn_modeled": 1, "corn_observed": 4,
"soybean_modeled": 2, "soybean_observed": 5,
"protein_modeled": 991200, "protein_observed": 3063900,
"lipid_modeled": 110800, "lipid_observed": 388600,
"energy_modeled": 6228600, "energy_observed": 22211700,
"ca_modeled": 4928500, "ca_observed": 12697900,
"fe_modeled": 431750, "fe_observed": 1298390,
"mg_modeled": 7700000, "mg_observed": 23156000,
"ph_modeled": 19360000, "ph_observed": 58220800,
"k_modeled": 19646500, "k_observed": 73207900,
"na_modeled": 55000, "na_observed": 165400,
"zn_modeled": 134750, "zn_observed": 405230,
"cu_modeled": 46790, "cu_observed": 143480,
"fl_modeled": 129000, "fl_observed": 434100,
"mn_modeled": 121610, "mn_observed": 344480,
"se_modeled": 6390, "se_observed": 17370,
"vita_modeled": 82500, "vita_observed": 248100,
"betac_modeled": 440000, "betac_observed": 1323200,
"alphac_modeled": 39590, "alphac_observed": 131060,
"vite_modeled": 22000, "vite_observed": 66160,
"crypto_modeled": 25800, "crypto_observed": 86820,
"lycopene_modeled": 8808, "lycopene_observed": 27042,
"lutein_modeled": 1696100, "lutein_observed": 5119100,
"betat_modeled": 13750, "betat_observed": 41350,
"gammat_modeled": 61390, "gammat_observed": 182770,
"deltat_modeled": 39510, "deltat_observed": 125280,
"vitc_modeled": 117840, "vitc_observed": 389460,
"thiamin_modeled": 11364, "thiamin_observed": 33990,
"riboflavin_modeled": 31664, "riboflavin_observed": 104270,
"niacin_modeled": 298300, "niacin_observed": 860140,
"pantothenic_modeled": 25114, "pantothenic_observed": 75340,
"vitb6_modeled": 111300, "vitb6_observed": 297780,
"folate_modeled": 9131500, "folate_observed": 28199500,
"vitb12_modeled": 73200, "vitb12_observed": 210900,
"vitk_modeled": 1145700, "vitk_observed": 3436200},
{"FID": 1, "corn_modeled": 4, "corn_observed": 8,
"soybean_modeled": 7, "soybean_observed": 12,
"protein_modeled": 3664500, "protein_observed": 6728400,
"lipid_modeled": 425000, "lipid_observed": 813600,
"energy_modeled": 24013500, "energy_observed": 46225200,
"ca_modeled": 17375300, "ca_observed": 30073200,
"fe_modeled": 1584130, "fe_observed": 2882520,
"mg_modeled": 28252000, "mg_observed": 51408000,
"ph_modeled": 71033600, "ph_observed": 129254400,
"k_modeled": 76793300, "k_observed": 150001200,
"na_modeled": 201800, "na_observed": 367200,
"zn_modeled": 494410, "zn_observed": 899640,
"cu_modeled": 172600, "cu_observed": 316080,
"fl_modeled": 488700, "fl_observed": 922800,
"mn_modeled": 439120, "mn_observed": 783600,
"se_modeled": 22830, "se_observed": 40200,
"vita_modeled": 302700, "vita_observed": 550800,
"betac_modeled": 1614400, "betac_observed": 2937600,
"alphac_modeled": 149260, "alphac_observed": 280320,
"vite_modeled": 80720, "vite_observed": 146880,
"crypto_modeled": 97740, "crypto_observed": 184560,
"lycopene_modeled": 32502, "lycopene_observed": 59544,
"lutein_modeled": 6229300, "lutein_observed": 11348400,
"betat_modeled": 50450, "betat_observed": 91800,
"gammat_modeled": 224630, "gammat_observed": 407400,
"deltat_modeled": 147120, "deltat_observed": 272400,
"vitc_modeled": 444060, "vitc_observed": 833520,
"thiamin_modeled": 41634, "thiamin_observed": 75624,
"riboflavin_modeled": 119194, "riboflavin_observed": 223464,
"niacin_modeled": 1082180, "niacin_observed": 1942320,
"pantothenic_modeled": 92084, "pantothenic_observed": 167424,
"vitb6_modeled": 396060, "vitb6_observed": 693840,
"folate_modeled": 33750500, "folate_observed": 61950000,
"vitb12_modeled": 265500, "vitb12_observed": 476400,
"vitk_modeled": 4200600, "vitk_observed": 7636800}
], dtype=float)
workspace = self.workspace_dir
base_aggregate_vector_path = os.path.join(workspace,
"agg_vector.shp")
make_aggregate_vector(base_aggregate_vector_path)
target_aggregate_vector_path = os.path.join(workspace,
"agg_vector_prj.shp")
spatial_ref = osr.SpatialReference()
spatial_ref.ImportFromEPSG(26910) # EPSG:4326 for WGS84
landcover_raster_projection = spatial_ref.ExportToWkt()
crop_names = ['corn', 'soybean']
nutrient_df = create_nutrient_df()
output_dir = os.path.join(workspace, "OUTPUT")
os.makedirs(output_dir, exist_ok=True)
file_suffix = 'test'
target_aggregate_table_path = '' # unused
_create_crop_rasters(output_dir, crop_names, file_suffix)
aggregate_regression_results_to_polygons(
base_aggregate_vector_path, target_aggregate_vector_path,
landcover_raster_projection, crop_names,
nutrient_df, output_dir, file_suffix,
target_aggregate_table_path)
_AGGREGATE_TABLE_FILE_PATTERN = os.path.join(
'.','aggregate_results%s.csv')
aggregate_table_path = os.path.join(
output_dir, _AGGREGATE_TABLE_FILE_PATTERN % file_suffix)
actual_aggregate_table = pandas.read_csv(aggregate_table_path,
dtype=float)
print(actual_aggregate_table)
expected_aggregate_table = _create_expected_agg_table()
pandas.testing.assert_frame_equal(
actual_aggregate_table, expected_aggregate_table)
class CropValidationTests(unittest.TestCase):
"""Tests for the Crop Productions' MODEL_SPEC and validation."""

View File

@ -6,6 +6,7 @@ import unittest
import pytest
import numpy
import pandas
import pygeoprocessing
from osgeo import gdal
from osgeo import ogr
@ -865,7 +866,6 @@ class SeasonalWaterYieldRegressionTests(unittest.TestCase):
when a climate zone raster value is not present in the climate
zone table.
"""
import pandas
from natcap.invest.seasonal_water_yield import seasonal_water_yield
# use predefined directory so test can clean up files during teardown
@ -1218,23 +1218,23 @@ class SeasonalWaterYieldRegressionTests(unittest.TestCase):
# set up tiny raster arrays to test
precip_array = numpy.array([
[10, 10],
[10, 10]], dtype=numpy.float32)
[10, 1, 5],
[100, 15, 70]], dtype=numpy.float32)
et0_array = numpy.array([
[100, 100],
[200, 200]], dtype=numpy.float32)
[5, 100, 1],
[200, 20, 100]], dtype=numpy.float32)
quickflow_array = numpy.array([
[0, 0],
[0.61, 0.61]], dtype=numpy.float32)
[0, 1, 0],
[0.61, 0.61, 1]], dtype=numpy.float32)
flow_dir_array = numpy.array([
[15, 25],
[50, 50]], dtype=numpy.float32)
[15, 25, 25],
[50, 50, 10]], dtype=numpy.float32)
kc_array = numpy.array([
[1, 1],
[1, 1]], dtype=numpy.float32)
[1, .75, 1],
[1, .4, 0]], dtype=numpy.float32)
stream_mask = numpy.array([
[0, 0],
[0, 0]], dtype=numpy.float32)
[0, 0, 0],
[0, 0, 0]], dtype=numpy.float32)
precip_path = os.path.join(self.workspace_dir, 'precip.tif')
et0_path = os.path.join(self.workspace_dir, 'et0.tif')
@ -1246,7 +1246,6 @@ class SeasonalWaterYieldRegressionTests(unittest.TestCase):
srs = osr.SpatialReference()
srs.ImportFromEPSG(26910) # UTM Zone 10N
project_wkt = srs.ExportToWkt()
output_path = os.path.join(self.workspace_dir, 'quickflow.tif')
# write all the test arrays to raster files
for array, path in [(precip_array, precip_path),
@ -1258,23 +1257,161 @@ class SeasonalWaterYieldRegressionTests(unittest.TestCase):
(flow_dir_array, flow_dir_path),
(kc_array, kc_path),
(stream_mask, stream_path)]:
# define a nodata value for intermediate outputs
pygeoprocessing.numpy_array_to_raster(
array, -1, (1, -1), (1180000, 690000), project_wkt, path)
array, -999, (1, -1), (1180000, 690000), project_wkt, path)
# arbitrary values for alpha, beta, gamma
alpha = .6
beta = .4
gamma = .5
alpha_month_map = {i: alpha for i in range(1, 13)}
target_li_path = os.path.join(self.workspace_dir, 'target_li_path.tif')
target_li_avail_path = os.path.join(self.workspace_dir,
'target_li_avail_path.tif')
target_l_sum_avail_path = os.path.join(self.workspace_dir,
'target_l_sum_avail_path.tif')
target_aet_path = os.path.join(self.workspace_dir,
'target_aet_path.tif')
# arbitrary values for alpha, beta, gamma, etc.
# not verifying the output, just making sure there are no errors
seasonal_water_yield_core.calculate_local_recharge(
[precip_path for i in range(12)], [et0_path for i in range(12)],
[quickflow_path for i in range(12)], flow_dir_path,
[kc_path for i in range(12)], {i: 0.5 for i in range(12)}, 0.5,
0.5, stream_path,
os.path.join(self.workspace_dir, 'target_li_path.tif'),
os.path.join(self.workspace_dir, 'target_li_avail_path.tif'),
os.path.join(self.workspace_dir, 'target_l_sum_avail_path.tif'),
os.path.join(self.workspace_dir, 'target_aet_path.tif'),
[kc_path for i in range(12)], alpha_month_map, beta,
gamma, stream_path, target_li_path, target_li_avail_path,
target_l_sum_avail_path, target_aet_path,
os.path.join(self.workspace_dir, 'target_precip_path.tif'))
actual_li = pygeoprocessing.raster_to_numpy_array(target_li_path)
actual_li_avail = pygeoprocessing.raster_to_numpy_array(target_li_avail_path)
actual_l_sum_avail = pygeoprocessing.raster_to_numpy_array(target_l_sum_avail_path)
actual_aet = pygeoprocessing.raster_to_numpy_array(target_aet_path)
# note: obtained these arrays by running `calculate_local_recharge`
expected_li = numpy.array([[60., -72., 73.915215],
[0, 76.68, 828.]])
expected_li_avail = numpy.array([[30., -72., 36.957607],
[0, 38.34, 414.]])
expected_l_sum_avail = numpy.array([[0, 25., -25.665003],
[0, 0, 38.34]])
expected_aet = numpy.array([[60., 72., -13.915211],
[1192.68, 96., 0.]])
# assert li is same as expected li from function
numpy.testing.assert_allclose(actual_li, expected_li, equal_nan=True,
err_msg="li raster values do not match.")
numpy.testing.assert_allclose(actual_li_avail, expected_li_avail,
equal_nan=True,
err_msg="li_avail raster values do not match.")
numpy.testing.assert_allclose(actual_l_sum_avail, expected_l_sum_avail,
equal_nan=True,
err_msg="l_sum_avail raster values do not match.")
numpy.testing.assert_allclose(actual_aet, expected_aet, equal_nan=True,
err_msg="aet raster values do not match.")
def test_route_baseflow_sum(self):
"""Test `route_baseflow_sum`"""
from natcap.invest.seasonal_water_yield import \
seasonal_water_yield_core
# set up tiny raster arrays to test
flow_dir_mfd = numpy.array([
[1409286196, 1409286196, 1677721604],
[1678770180, 838861365, 1677721604]], dtype=numpy.int32)
l = numpy.array([
[18, 15, 12.5],
[2, 17, 8]], dtype=numpy.float32)
l_avail = numpy.array([
[15.6, 12, 11],
[1, 15, 6]], dtype=numpy.float32)
l_sum = numpy.array([
[29, 28, 19],
[2, 19, 99]], dtype=numpy.float32)
stream_mask = numpy.array([
[0, 1, 0],
[0, 0, 0]], dtype=numpy.int8)
flow_dir_mfd_path = os.path.join(self.workspace_dir, 'flow_dir_mfd.tif')
l_path = os.path.join(self.workspace_dir, 'l.tif')
l_avail_path = os.path.join(self.workspace_dir, 'l_avail.tif')
l_sum_path = os.path.join(self.workspace_dir, 'l_sum.tif')
stream_path = os.path.join(self.workspace_dir, 'stream.tif')
srs = osr.SpatialReference()
srs.ImportFromEPSG(26910) # UTM Zone 10N
project_wkt = srs.ExportToWkt()
# write all the test arrays to raster files
for array, path in [(flow_dir_mfd, flow_dir_mfd_path),
(l, l_path),
(l_avail, l_avail_path),
(l_sum, l_sum_path),
(stream_mask, stream_path)]:
pygeoprocessing.numpy_array_to_raster(
array, 0, (1, -1), (1180000, 690000), project_wkt, path)
target_b_path = os.path.join(self.workspace_dir, 'b.tif')
target_b_sum_path = os.path.join(self.workspace_dir, 'b_sum.tif')
seasonal_water_yield_core.route_baseflow_sum(flow_dir_mfd_path, l_path,
l_avail_path, l_sum_path,
stream_path, target_b_path,
target_b_sum_path)
actual_b = pygeoprocessing.raster_to_numpy_array(target_b_path)
actual_b_sum = pygeoprocessing.raster_to_numpy_array(target_b_sum_path)
# note: obtained these arrays by running `route_baseflow_sum`
expected_b = numpy.array([[10.5, 0.9999998, 0],
[0.1422222, 2.2666667, 0]])
expected_b_sum = numpy.array([[16.916666, 1.8666663, 0],
[0.1422222, 2.5333333, 0]])
numpy.testing.assert_allclose(actual_b, expected_b, equal_nan=True,
err_msg="Baseflow raster values do not match.")
numpy.testing.assert_allclose(actual_b_sum, expected_b_sum, equal_nan=True,
err_msg="b_sum raster values do not match.")
def test_calculate_curve_number_raster(self):
"""test `_calculate_curve_number_raster`"""
from natcap.invest.seasonal_water_yield import seasonal_water_yield
# make small lulc raster
lulc_raster_path = os.path.join(self.workspace_dir, 'lulc.tif')
lulc_array = numpy.zeros((3, 3), dtype=numpy.int16)
lulc_array[1:, :] = 1
lulc_array[0, 0] = 2
make_raster_from_array(lulc_array, lulc_raster_path)
# make small soil raster
soil_group_path = os.path.join(self.workspace_dir, "soil_group.tif")
soil_groups = 4
soil_array = numpy.zeros((3, 3), dtype=numpy.int32)
for i, row in enumerate(soil_array):
row[:] = i % soil_groups + 1
make_raster_from_array(soil_array, soil_group_path)
# make biophysical table
biophysical_df = pandas.DataFrame([
{"lucode": 0, "Description": "lulc 1", "cn_a": 50,
"cn_b": 60, "cn_c": 0, "cn_d": 0},
{"lucode": 1, "Description": "lulc 2", "cn_a": 72,
"cn_b": 82, "cn_c": 0, "cn_d": 0},
{"lucode": 2, "Description": "lulc 3", "cn_a": 65,
"cn_b": 22, "cn_c": 1, "cn_d": 0}])
cn_path = os.path.join(self.workspace_dir, "cn.tif")
seasonal_water_yield._calculate_curve_number_raster(
lulc_raster_path, soil_group_path, biophysical_df, cn_path)
actual_cn = pygeoprocessing.raster_to_numpy_array(cn_path)
expected_cn = [[65, 50, 50], [82, 82, 82], [0, 0, 0]]
# obtained expected array by running _calculate_curve_number_raster
numpy.testing.assert_allclose(actual_cn, expected_cn, equal_nan=True,
err_msg="Curve Number raster values do not match.")
class SWYValidationTests(unittest.TestCase):
"""Tests for the SWY Model MODEL_SPEC and validation."""

View File

@ -1921,8 +1921,8 @@ class TestValidationFromSpec(unittest.TestCase):
with open(csv_path, 'w') as csv_file:
csv_file.write(textwrap.dedent(
"""\
"field_a"
1"""))
"field_a",
1,"""))
args = {
'some_number': 1,
'csv': csv_path,

View File

@ -274,7 +274,7 @@ class WindEnergyUnitTests(unittest.TestCase):
from natcap.invest import wind_energy
srs = osr.SpatialReference()
srs.ImportFromEPSG(3157) #UTM Zone 10N
srs.ImportFromEPSG(3157) # UTM Zone 10N
projection_wkt = srs.ExportToWkt()
origin = (443723.127327877911739, 4956546.905980412848294)
pos_x = origin[0]
@ -284,7 +284,7 @@ class WindEnergyUnitTests(unittest.TestCase):
fields = {'id': ogr.OFTReal}
attrs = [{'id': 1}]
# Square polygon that will overlap the 4 pixels of the raster in the
# Square polygon that will overlap the 4 pixels of the raster in the
# upper left corner
poly_geometry = [box(pos_x, pos_y - 17, pos_x + 17, pos_y)]
poly_vector_path = os.path.join(
@ -306,7 +306,7 @@ class WindEnergyUnitTests(unittest.TestCase):
dist_raster_path = os.path.join(self.workspace_dir, 'dist.tif')
# Call function to test given testing inputs
wind_energy._create_distance_raster(
base_raster_path, poly_vector_path, dist_raster_path,
base_raster_path, poly_vector_path, dist_raster_path,
self.workspace_dir)
# Compare the results
@ -348,11 +348,9 @@ class WindEnergyUnitTests(unittest.TestCase):
price_list = [0.10, 0.10, 0.10, 0.10, 0.10]
srs = osr.SpatialReference()
srs.ImportFromEPSG(3157) #UTM Zone 10N
srs.ImportFromEPSG(3157) # UTM Zone 10N
projection_wkt = srs.ExportToWkt()
origin = (443723.127327877911739, 4956546.905980412848294)
pos_x = origin[0]
pos_y = origin[1]
# Create harvested raster
harvest_val = 1000000
@ -360,8 +358,8 @@ class WindEnergyUnitTests(unittest.TestCase):
[[harvest_val, harvest_val + 1e5, harvest_val + 2e5,
harvest_val + 3e5, harvest_val + 4e5],
[harvest_val, harvest_val + 1e5, harvest_val + 2e5,
harvest_val + 3e5, harvest_val + 4e5],
], dtype=numpy.float32)
harvest_val + 3e5, harvest_val + 4e5]],
dtype=numpy.float32)
base_harvest_path = os.path.join(self.workspace_dir, 'harvest_raster.tif')
# Create raster to use for testing input
pygeoprocessing.numpy_array_to_raster(
@ -386,9 +384,9 @@ class WindEnergyUnitTests(unittest.TestCase):
# Compare the results that were "eye" tested.
desired_npv_array = numpy.array(
[[309332320.0, 348331200.0, 387330020.0, 426328930.0,
465327800.0],
465327800.0],
[309332320.0, 348331200.0, 387330020.0, 426328930.0,
465327800.0]], dtype=numpy.float32)
465327800.0]], dtype=numpy.float32)
actual_npv_array = pygeoprocessing.raster_to_numpy_array(
target_npv_raster_path)
numpy.testing.assert_allclose(actual_npv_array, desired_npv_array)
@ -402,6 +400,7 @@ class WindEnergyUnitTests(unittest.TestCase):
numpy.testing.assert_allclose(
actual_levelized_array, desired_levelized_array)
class WindEnergyRegressionTests(unittest.TestCase):
"""Regression tests for the Wind Energy module."""
@ -428,8 +427,8 @@ class WindEnergyRegressionTests(unittest.TestCase):
SAMPLE_DATA, 'global_wind_energy_parameters.csv'),
'turbine_parameters_path': os.path.join(
SAMPLE_DATA, '3_6_turbine.csv'),
'number_of_turbines': '80', # pass str to test casting
'min_depth': '3', # pass str to test casting
'number_of_turbines': '80', # pass str to test casting
'min_depth': '3', # pass str to test casting
'max_depth': 180,
'n_workers': -1
}
@ -534,13 +533,13 @@ class WindEnergyRegressionTests(unittest.TestCase):
args['max_distance'] = 200000
args['valuation_container'] = True
args['foundation_cost'] = 2000000
args['discount_rate'] = '0.07' # pass str to test casting
args['discount_rate'] = '0.07' # pass str to test casting
# Test that only grid points are provided in grid_points_path
args['grid_points_path'] = os.path.join(
SAMPLE_DATA, 'resampled_grid_pts.csv')
args['price_table'] = False
args['wind_price'] = 0.187
args['rate_change'] = '0.2' # pass str to test casting
args['rate_change'] = '0.2' # pass str to test casting
wind_energy.execute(args)

View File

@ -35,8 +35,10 @@ Workbench fixes/enhancements:
Everything else:
- General -->
<!-- :changelog: -->
<section id="unreleased-changes">
<h1>Unreleased Changes</h1>
<!-- Unreleased Changes
------------------ -->
<section id="section-1">
<h1>3.14.3 (2024-12-19)</h1>
<ul>
<li>
<dl>
@ -46,6 +48,10 @@ Everything else:
<li>InVEST has been updated to build against numpy 2. <a href="https://github.com/natcap/invest/issues/1641">https://github.com/natcap/invest/issues/1641</a></li>
<li>Updating validation to handle a change in exceptions raised by GDAL in <code>pygeoprocessing.get_raster_info</code> and <code>pygeoprocessing.get_vector_info</code>. <a href="https://github.com/natcap/invest/issues/1645">https://github.com/natcap/invest/issues/1645</a></li>
<li>The scripting page of the InVEST API documentation has been updated to reflect changes in how InVEST is installed on modern systems, and also to include images of the InVEST workbench instead of just broken links. <a href="https://github.com/natcap/invest/issues/1660">https://github.com/natcap/invest/issues/1660</a></li>
<li>Updated translations for Spanish and Chinese</li>
<li>natcap.invest now works with (and requires) <code>gdal.UseExceptions</code>. A <code>FutureWarning</code> is raised on import if GDAL exceptions are not enabled.</li>
<li>Fixed an issue on Windows where GDAL fails to find its DLLs due to an interfering GDAL installation on the PATH, such as from anaconda. <a href="https://github.com/natcap/invest/issues/1643">https://github.com/natcap/invest/issues/1643</a></li>
<li>Improved error handling of NA values in raster reclassification to provide a more descriptive message.</li>
</ul>
</dd>
</dl>
@ -92,6 +98,29 @@ Everything else:
<ul>
<li>Access raster is now generated from the reprojected access vector (<a href="https://github.com/natcap/invest/issues/1615">https://github.com/natcap/invest/issues/1615</a>).</li>
<li>Rarity values are now output in CSV format (as well as in raster format) (<a href="https://github.com/natcap/invest/issues/721">https://github.com/natcap/invest/issues/721</a>).</li>
<li>Improved error handling when there is a missing LULC value in the sensitivity table (<a href="https://github.com/natcap/invest/issues/1671">https://github.com/natcap/invest/issues/1671</a>).</li>
</ul>
</dd>
</dl>
</li>
<li>
<dl>
<dt>Pollination</dt>
<dd>
<ul>
<li>Fixed an issue with nodata handling that was causing some outputs to be filled either with the float32 value for positive infinity, or else with a value very close to it. <a href="https://github.com/natcap/invest/issues/1635">https://github.com/natcap/invest/issues/1635</a></li>
<li>While working on <a href="https://github.com/natcap/invest/issues/1635">https://github.com/natcap/invest/issues/1635</a>, we also updated the stated dtype of most pollination model outputs to be float32 instead of the float64 dtype that was being assumed previously. This will result in smaller output filesizes with minimal loss of precision.</li>
</ul>
</dd>
</dl>
</li>
<li>
<dl>
<dt>Seasonal Water Yield</dt>
<dd>
<ul>
<li>Added support for zero padding in month numbers in ET and precipitation file names (i.e., users can now name their file Precip_01.tif). (<a href="https://github.com/natcap/invest/issues/1166">https://github.com/natcap/invest/issues/1166</a>)</li>
<li>Fixed a bug where <code>numpy.nan</code> pixel values would not be correctly detected as nodata in local recharge and baseflow routing functions. (<a href="https://github.com/natcap/invest/issues/1705">https://github.com/natcap/invest/issues/1705</a>)</li>
</ul>
</dd>
</dl>
@ -118,7 +147,7 @@ Everything else:
</li>
</ul>
</section>
<section id="section-1">
<section id="section-2">
<h1>3.14.2 (2024-05-29)</h1>
<ul>
<li>
@ -239,7 +268,7 @@ Everything else:
</li>
</ul>
</section>
<section id="section-2">
<section id="section-3">
<h1>3.14.1 (2023-12-18)</h1>
<ul>
<li>
@ -370,7 +399,7 @@ Everything else:
</li>
</ul>
</section>
<section id="section-3">
<section id="section-4">
<h1>3.14.0 (2023-09-08)</h1>
<ul>
<li>
@ -575,7 +604,7 @@ Everything else:
</li>
</ul>
</section>
<section id="section-4">
<section id="section-5">
<h1>3.13.0 (2023-03-17)</h1>
<ul>
<li>
@ -708,7 +737,7 @@ Everything else:
</li>
</ul>
</section>
<section id="section-5">
<section id="section-6">
<h1>3.12.1 (2022-12-16)</h1>
<ul>
<li>
@ -781,7 +810,7 @@ Everything else:
</li>
</ul>
</section>
<section id="section-6">
<section id="section-7">
<h1>3.12.0 (2022-08-31)</h1>
<ul>
<li>
@ -905,7 +934,7 @@ Everything else:
</li>
</ul>
</section>
<section id="section-7">
<section id="section-8">
<h1>3.11.0 (2022-05-24)</h1>
<ul>
<li>
@ -1003,7 +1032,7 @@ Everything else:
</li>
</ul>
</section>
<section id="section-8">
<section id="section-9">
<h1>3.10.2 (2022-02-08)</h1>
<ul>
<li>
@ -1105,7 +1134,7 @@ setuptools_scm</code> from the project root.</li>
</li>
</ul>
</section>
<section id="section-9">
<section id="section-10">
<h1>3.10.1 (2022-01-06)</h1>
<ul>
<li>
@ -1120,7 +1149,7 @@ setuptools_scm</code> from the project root.</li>
</li>
</ul>
</section>
<section id="section-10">
<section id="section-11">
<h1>3.10.0 (2022-01-04)</h1>
<ul>
<li>
@ -1304,7 +1333,7 @@ setuptools_scm</code> from the project root.</li>
</li>
</ul>
</section>
<section id="section-11">
<section id="section-12">
<h1>3.9.2 (2021-10-29)</h1>
<ul>
<li>
@ -1365,7 +1394,7 @@ setuptools_scm</code> from the project root.</li>
</li>
</ul>
</section>
<section id="section-12">
<section id="section-13">
<h1>3.9.1 (2021-09-22)</h1>
<ul>
<li>
@ -1514,7 +1543,7 @@ setuptools_scm</code> from the project root.</li>
</li>
</ul>
</section>
<section id="section-13">
<section id="section-14">
<h1>3.9.0 (2020-12-11)</h1>
<ul>
<li>
@ -1702,7 +1731,7 @@ setuptools_scm</code> from the project root.</li>
</li>
</ul>
</section>
<section id="section-14">
<section id="section-15">
<h1>3.8.9 (2020-09-15)</h1>
<ul>
<li>
@ -1727,7 +1756,7 @@ setuptools_scm</code> from the project root.</li>
</li>
</ul>
</section>
<section id="section-15">
<section id="section-16">
<h1>3.8.8 (2020-09-04)</h1>
<ul>
<li>
@ -1827,7 +1856,7 @@ setuptools_scm</code> from the project root.</li>
</li>
</ul>
</section>
<section id="section-16">
<section id="section-17">
<h1>3.8.7 (2020-07-17)</h1>
<ul>
<li>
@ -1866,7 +1895,7 @@ setuptools_scm</code> from the project root.</li>
</li>
</ul>
</section>
<section id="section-17">
<section id="section-18">
<h1>3.8.6 (2020-07-03)</h1>
<ul>
<li>
@ -1881,7 +1910,7 @@ setuptools_scm</code> from the project root.</li>
</li>
</ul>
</section>
<section id="section-18">
<section id="section-19">
<h1>3.8.5 (2020-06-26)</h1>
<ul>
<li>
@ -1932,7 +1961,7 @@ setuptools_scm</code> from the project root.</li>
</li>
</ul>
</section>
<section id="section-19">
<section id="section-20">
<h1>3.8.4 (2020-06-05)</h1>
<ul>
<li>
@ -1967,7 +1996,7 @@ setuptools_scm</code> from the project root.</li>
</li>
</ul>
</section>
<section id="section-20">
<section id="section-21">
<h1>3.8.3 (2020-05-29)</h1>
<ul>
<li>
@ -1982,13 +2011,13 @@ setuptools_scm</code> from the project root.</li>
</li>
</ul>
</section>
<section id="section-21">
<section id="section-22">
<h1>3.8.2 (2020-05-15)</h1>
<ul>
<li>InVEST's CSV encoding requirements are now described in the validation error message displayed when a CSV cannot be opened.</li>
</ul>
</section>
<section id="section-22">
<section id="section-23">
<h1>3.8.1 (2020-05-08)</h1>
<ul>
<li>Fixed a compilation issue on Mac OS X Catalina.</li>
@ -2012,7 +2041,7 @@ setuptools_scm</code> from the project root.</li>
<li>Update api-docs conf file to mock sdr.sdr_core and to use updated unittest mock</li>
</ul>
</section>
<section id="section-23">
<section id="section-24">
<h1>3.8.0 (2020-02-07)</h1>
<ul>
<li>Created a sub-directory for the sample data in the installation directory.</li>
@ -2068,7 +2097,7 @@ setuptools_scm</code> from the project root.</li>
<li>Added a new InVEST model: Urban Cooling Model.</li>
</ul>
</section>
<section id="section-24">
<section id="section-25">
<h1>3.7.0 (2019-05-09)</h1>
<ul>
<li>Refactoring Coastal Vulnerability (CV) model. CV now uses TaskGraph and Pygeoprocessing &gt;=1.6.1. The model is now largely vector-based instead of raster-based. Fewer input datasets are required for the same functionality. Runtime in sycnhronous mode is similar to previous versions, but runtime can be reduced with multiprocessing. CV also supports avoided recomputation for successive runs in the same workspace, even if a different file suffix is used. Output vector files are in CSV and geopackage formats.</li>
@ -2089,7 +2118,7 @@ setuptools_scm</code> from the project root.</li>
<li>Adding encoding='utf-8-sig' to pandas.read_csv() to support utils.build_lookup_from_csv() to read CSV files encoded with UTF-8 BOM (byte-order mark) properly.</li>
</ul>
</section>
<section id="section-25">
<section id="section-26">
<h1>3.6.0 (2019-01-30)</h1>
<ul>
<li>Correcting an issue with the InVEST Carbon Storage and Sequestration model where filepaths containing non-ASCII characters would cause the model's report generation to crash. The output report is now a UTF-8 document.</li>
@ -2119,7 +2148,7 @@ setuptools_scm</code> from the project root.</li>
<li>Fixing a case where a zero discount rate and rate of change in the carbon model would cause a divide by zero error.</li>
</ul>
</section>
<section id="section-26">
<section id="section-27">
<h1>3.5.0 (2018-08-14)</h1>
<ul>
<li>Bumped pygeoprocessing requirement to <code>pygeoprocessing&gt;=1.2.3</code>.</li>
@ -2141,7 +2170,7 @@ setuptools_scm</code> from the project root.</li>
<li>Fixed an issue in the model data of the crop production model where some crops were using incorrect climate bin rasters. Since the error was in the data and not the code, users will need to download the most recent version of InVEST's crop model data during the installation step to get the fix.</li>
</ul>
</section>
<section id="section-27">
<section id="section-28">
<h1>3.4.4 (2018-03-26)</h1>
<ul>
<li>InVEST now requires GDAL 2.0.0 and has been tested up to GDAL 2.2.3. Any API users of InVEST will need to use GDAL version &gt;= 2.0. When upgrading GDAL we noticed slight numerical differences in our test suite in both numerical raster differences, geometry transforms, and occasionally a single pixel difference when using <cite>gdal.RasterizeLayer</cite>. Each of these differences in the InVEST test suite is within a reasonable numerical tolerance and we have updated our regression test suite appropriately. Users comparing runs between previous versions of InVEST may also notice reasonable numerical differences between runs.</li>
@ -2151,7 +2180,7 @@ setuptools_scm</code> from the project root.</li>
<li>Fixed a broken link to local and online user documentation from the Seasonal Water Yield model from the model's user interface.</li>
</ul>
</section>
<section id="section-28">
<section id="section-29">
<h1>3.4.3 (2018-03-26)</h1>
<ul>
<li>Fixed a critical issue in the carbon model UI that would incorrectly state the user needed a "REDD Priority Raster" when none was required.</li>
@ -2159,7 +2188,7 @@ setuptools_scm</code> from the project root.</li>
<li>Fixed an issue in wind energy UI that was incorrectly validating most of the inputs.</li>
</ul>
</section>
<section id="section-29">
<section id="section-30">
<h1>3.4.2 (2017-12-15)</h1>
<ul>
<li>Fixed a cross-platform issue with the UI where logfiles could not be dropped onto UI windows.</li>
@ -2169,7 +2198,7 @@ setuptools_scm</code> from the project root.</li>
<li>Fixing an issue with the <code>FileSystemRunDialog</code> where pressing the 'X' button in the corner of the window would close the window, but not reset its state. The window's state is now reset whenever the window is closed (and the window cannot be closed when the model is running)</li>
</ul>
</section>
<section id="section-30">
<section id="section-31">
<h1>3.4.1 (2017-12-11)</h1>
<ul>
<li>In the Coastal Blue Carbon model, the <code>interest_rate</code> parameter has been renamed to <code>inflation_rate</code>.</li>
@ -2177,7 +2206,7 @@ setuptools_scm</code> from the project root.</li>
<li>Added better error checking to the SDR model for missing <cite>ws_id</cite> and invalid <cite>ws_id</cite> values such as <cite>None</cite> or some non-integer value. Also added tests for the <cite>SDR</cite> validation module.</li>
</ul>
</section>
<section id="section-31">
<section id="section-32">
<h1>3.4.0 (2017-12-03)</h1>
<ul>
<li>Fixed an issue with most InVEST models where the suffix was not being reflected in the output filenames. This was due to a bug in the InVEST UI, where the suffix args key was assumed to be <code>'suffix'</code>. Instances of <code>InVESTModel</code> now accept a keyword argument to defined the suffix args key.</li>
@ -2209,7 +2238,7 @@ setuptools_scm</code> from the project root.</li>
<li>Updated the erodibility sample raster that ships with InVEST for the SDR model. The old version was in US units, in this version we convert to SI units as the model requires, and clipped the raster to the extents of the other stack to save disk space.</li>
</ul>
</section>
<section id="section-32">
<section id="section-33">
<h1>3.3.3 (2017-02-06)</h1>
<ul>
<li>Fixed an issue in the UI where the carbon model wouldn't accept negative numbers in the price increase of carbon.</li>
@ -2229,7 +2258,7 @@ setuptools_scm</code> from the project root.</li>
<li>Updated branding and usability of the InVEST installer for Windows, and the Mac Disk Image (.dmg).</li>
</ul>
</section>
<section id="section-33">
<section id="section-34">
<h1>3.3.2 (2016-10-17)</h1>
<ul>
<li>Partial test coverage for HRA model.</li>
@ -2268,7 +2297,7 @@ setuptools_scm</code> from the project root.</li>
<li>Fixed an issue in SDR that reported runtime overflow errors during normal processing even though the model completed without other errors.</li>
</ul>
</section>
<section id="section-34">
<section id="section-35">
<h1>3.3.1 (2016-06-13)</h1>
<ul>
<li>Refactored API documentation for readability, organization by relevant topics, and to allow docs to build on <a href="http://invest.readthedocs.io">invest.readthedocs.io</a>,</li>
@ -2294,7 +2323,7 @@ setuptools_scm</code> from the project root.</li>
<li>Updated Crop Production model to add a simplified UI, faster runtime, and more testing.</li>
</ul>
</section>
<section id="section-35">
<section id="section-36">
<h1>3.3.0 (2016-03-14)</h1>
<ul>
<li>Refactored Wind Energy model to use a CSV input for wind data instead of a Binary file.</li>
@ -2348,7 +2377,7 @@ setuptools_scm</code> from the project root.</li>
<li>Documentation to the GLOBIO code base including the large docstring for 'execute'.</li>
</ul>
</section>
<section id="section-36">
<section id="section-37">
<h1>3.2.0 (2015-05-31)</h1>
<p>InVEST 3.2.0 is a major release with the addition of several experimental models and tools as well as an upgrade to the PyGeoprocessing core:</p>
<ul>
@ -2361,11 +2390,11 @@ setuptools_scm</code> from the project root.</li>
<li>Miscelaneous performance patches and bug fixes.</li>
</ul>
</section>
<section id="section-37">
<section id="section-38">
<h1>3.1.3 (2015-04-23)</h1>
<p>InVEST 3.1.3 is a hotfix release patching a memory blocking issue resolved in PyGeoprocessing version 0.2.1. Users might have experienced slow runtimes on SDR or other routed models.</p>
</section>
<section id="section-38">
<section id="section-39">
<h1>3.1.2 (2015-04-15)</h1>
<p>InVEST 3.1.2 is a minor release patching issues mostly related to the freshwater routing models and signed GDAL Byte datasets.</p>
<ul>
@ -2381,7 +2410,7 @@ setuptools_scm</code> from the project root.</li>
<li>Fixed an issue in the Blue Carbon model that prevented the report from being generated in the outputs file.</li>
</ul>
</section>
<section id="section-39">
<section id="section-40">
<h1>3.1.1 (2015-03-13)</h1>
<p>InVEST 3.1.1 is a major performance and memory bug patch to the InVEST toolsuite. We recommend all users upgrade to this version.</p>
<ul>
@ -2404,7 +2433,7 @@ setuptools_scm</code> from the project root.</li>
<li>Fixed a bug in Habitat Quality where the future output "quality_out_f.tif" was not reflecting the habitat value given in the sensitivity table for the specified landcover types.</li>
</ul>
</section>
<section id="section-40">
<section id="section-41">
<h1>3.1.0 (2014-11-19)</h1>
<p>InVEST 3.1.0 (<a href="http://www.naturalcapitalproject.org/download.html">http://www.naturalcapitalproject.org/download.html</a>) is a major software and science milestone that includes an overhauled sedimentation model, long awaited fixes to exponential decay routines in habitat quality and pollination, and a massive update to the underlying hydrological routing routines. The updated sediment model, called SDR (sediment delivery ratio), is part of our continuing effort to improve the science and capabilities of the InVEST tool suite. The SDR model inputs are backwards comparable with the InVEST 3.0.1 sediment model with two additional global calibration parameters and removed the need for the retention efficiency parameter in the biophysical table; most users can run SDR directly with the data they have prepared for previous versions. The biophysical differences between the models are described in a section within the SDR user's guide and represent a superior representation of the hydrological connectivity of the watershed, biophysical parameters that are independent of cell size, and a more accurate representation of sediment retention on the landscape. Other InVEST improvements to include standard bug fixes, performance improvements, and usability features which in part are described below:</p>
<ul>
@ -2445,7 +2474,7 @@ setuptools_scm</code> from the project root.</li>
<li>Fixed an issue where the data type of the nodata value in a raster might be different than the values in the raster. This was common in the case of 64 bit floating point values as nodata when the underlying raster was 32 bit. Now nodata values are cast to the underlying types which improves the reliability of many of the InVEST models.</li>
</ul>
</section>
<section id="section-41">
<section id="section-42">
<h1>3.0.1 (2014-05-19)</h1>
<ul>
<li>Blue Carbon model released.</li>
@ -2467,7 +2496,7 @@ setuptools_scm</code> from the project root.</li>
<li>Fixed an issue in Marine Water Quality where the UV points were supposed to be optional, but instead raised an exception when not passed in.</li>
</ul>
</section>
<section id="section-42">
<section id="section-43">
<h1>3.0.0 (2014-03-23)</h1>
<p>The 3.0.0 release of InVEST represents a shift away from the ArcGIS to the InVEST standalone computational platform. The only exception to this shift is the marine coastal protection tier 1 model which is still supported in an ArcGIS toolbox and has no InVEST 3.0 standalone at the moment. Specific changes are detailed below</p>
<ul>
@ -2486,7 +2515,7 @@ setuptools_scm</code> from the project root.</li>
<li>Numerous other minor bug fixes and performance enhacnements.</li>
</ul>
</section>
<section id="section-43">
<section id="section-44">
<h1>2.6.0 (2013-12-16)</h1>
<p>The 2.6.0 release of InVEST removes most of the old InVEST models from the Arc toolbox in favor of the new InVEST standalone models. While we have been developing standalone equivalents for the InVEST Arc models since version 2.3.0, this is the first release in which we removed support for the deprecated ArcGIS versions after an internal review of correctness, performance, and stability on the standalones. Additionally, this is one of the last milestones before the InVEST 3.0.0 release later next year which will transition InVEST models away from strict ArcGIS dependence to a standalone form.</p>
<p>Specifically, support for the following models have been moved from the ArcGIS toolbox to their Windows based standalones: (1) hydropower/water yield, (2) finfish aquaculture, (3) coastal protection tier 0/coastal vulnerability, (4) wave energy, (5) carbon, (6) habitat quality/biodiversity, (7) pollination, (8) timber, and (9) overlap analysis. Additionally, documentation references to ArcGIS for those models have been replaced with instructions for launching standalone InVEST models from the Windows start menu.</p>
@ -2509,7 +2538,7 @@ setuptools_scm</code> from the project root.</li>
<li>Changing support from <a href="mailto:richsharp@stanford.edu">richsharp@stanford.edu</a> to the user support forums at <a href="http://ncp-yamato.stanford.edu/natcapforums">http://ncp-yamato.stanford.edu/natcapforums</a>.</li>
</ul>
</section>
<section id="section-44">
<section id="section-45">
<h1>2.5.6 (2013-09-06)</h1>
<p>The 2.5.6 release of InVEST that addresses minor bugs, performance tweaks, and new functionality of the InVEST standalone models. Including:</p>
<ul>
@ -2535,7 +2564,7 @@ setuptools_scm</code> from the project root.</li>
<li>Added an infrastructure feature so that temporary files are created in the user's workspace rather than at the system level folder. This lets users work in a secondary workspace on a USB attached hard drive and use the space of that drive, rather than the primary operating system drive.</li>
</ul>
</section>
<section id="section-45">
<section id="section-46">
<h1>2.5.5 (2013-08-06)</h1>
<p>The 2.5.5 release of InVEST that addresses minor bugs, performance tweaks, and new functionality of the InVEST standalone models. Including:</p>
<blockquote>
@ -2629,7 +2658,7 @@ setuptools_scm</code> from the project root.</li>
</ul>
</blockquote>
</section>
<section id="section-46">
<section id="section-47">
<h1>2.5.4 (2013-06-07)</h1>
<p>This is a minor release of InVEST that addresses numerous minor bugs and performance tweaks in the InVEST 3.0 models. Including:</p>
<blockquote>
@ -2679,15 +2708,15 @@ setuptools_scm</code> from the project root.</li>
</ul>
</blockquote>
</section>
<section id="section-47">
<section id="section-48">
<h1>2.5.3 (2013-03-21)</h1>
<p>This is a minor release of InVEST that fixes an issue with the HRA model that caused ArcGIS versions of the model to fail when calculating habitat maps for risk hotspots. This upgrade is strongly recommended for users of InVEST 2.5.1 or 2.5.2.</p>
</section>
<section id="section-48">
<section id="section-49">
<h1>2.5.2 (2013-03-17)</h1>
<p>This is a minor release of InVEST that fixes an issue with the HRA sample data that caused ArcGIS versions of the model to fail on the training data. There is no need to upgrade for most users unless you are doing InVEST training.</p>
</section>
<section id="section-49">
<section id="section-50">
<h1>2.5.1 (2013-03-12)</h1>
<p>This is a minor release of InVEST that does not add any new models, but does add additional functionality, stability, and increased performance to one of the InVEST 3.0 standalones:</p>
<blockquote>
@ -2706,7 +2735,7 @@ setuptools_scm</code> from the project root.</li>
</blockquote>
<p>Additionally, this minor release fixes a bug in the InVEST user interface where collapsible containers became entirely non-interactive.</p>
</section>
<section id="section-50">
<section id="section-51">
<h1>2.5.0 (2013-03-08)</h1>
<p>This a major release of InVEST that includes new standalone versions (ArcGIS is not required) our models as well as additional functionality, stability, and increased performance to many of the existing models. This release is timed to support our group's annual training event at Stanford University. We expect to release InVEST 2.5.1 a couple of weeks after to address any software issues that arise during the training. See the release notes below for details of the release, and please contact <a href="mailto:richsharp@stanford.edu">richsharp@stanford.edu</a> for any issues relating to software:</p>
<blockquote>
@ -2793,7 +2822,7 @@ setuptools_scm</code> from the project root.</li>
</blockquote>
</blockquote>
</section>
<section id="section-51">
<section id="section-52">
<h1>2.4.5 (2013-02-01)</h1>
<p>This is a minor release of InVEST that does not add any new models, but does add additional functionality, stability, and increased performance to many of the InVEST 3.0 standalones:</p>
<blockquote>
@ -2853,7 +2882,7 @@ setuptools_scm</code> from the project root.</li>
</ul>
</blockquote>
</section>
<section id="section-52">
<section id="section-53">
<h1>2.4.4 (2012-10-24)</h1>
<ul>
<li>Fixes memory errors experienced by some users in the Carbon Valuation 3.0 Beta model.</li>
@ -2861,7 +2890,7 @@ setuptools_scm</code> from the project root.</li>
<li>Fixes an issue importing packages for some officially-unreleased InVEST models.</li>
</ul>
</section>
<section id="section-53">
<section id="section-54">
<h1>2.4.3 (2012-10-19)</h1>
<ul>
<li>Fixed a minor issue with hydropower output vaulation rasters whose statistics were not pre-calculated. This would cause the range in ArcGIS to show ther rasters at -3e38 to 3e38.</li>
@ -2872,20 +2901,20 @@ setuptools_scm</code> from the project root.</li>
<li>Added a feature to all InVEST 3.0 models to list disk usage before and after each run and in most cases report a low free space error if relevant.</li>
</ul>
</section>
<section id="section-54">
<section id="section-55">
<h1>2.4.2 (2012-10-15)</h1>
<ul>
<li>Fixed an issue with the ArcMap document where the paths to default data were not saved as relative paths. This caused the default data in the document to not be found by ArcGIS.</li>
<li>Introduced some more memory-efficient processing for Biodiversity 3.0 Beta. This fixes an out-of-memory issue encountered by some users when using very large raster datasets as inputs.</li>
</ul>
</section>
<section id="section-55">
<section id="section-56">
<h1>2.4.1 (2012-10-08)</h1>
<ul>
<li>Fixed a compatibility issue with ArcGIS 9.3 where the ArcMap and ArcToolbox were unable to be opened by Arc 9.3.</li>
</ul>
</section>
<section id="section-56">
<section id="section-57">
<h1>2.4.0 (2012-10-05)</h1>
<p>Changes in InVEST 2.4.0</p>
<p>General:</p>
@ -2965,7 +2994,7 @@ setuptools_scm</code> from the project root.</li>
<li>Fixed a visualization bug with wave energy where output rasters did not have the min/max/stdev calculations on them. This made the default visualization in arc be a gray blob.</li>
</ul>
</section>
<section id="section-57">
<section id="section-58">
<h1>2.3.0 (2012-08-02)</h1>
<p>Changes in InVEST 2.3.0</p>
<p>General:</p>
@ -3031,7 +3060,7 @@ setuptools_scm</code> from the project root.</li>
<li>Other minor bug fixes and runtime performance tweaks in the 3.0 framework.</li>
</ul>
</section>
<section id="section-58">
<section id="section-59">
<h1>2.2.2 (2012-03-03)</h1>
<p>Changes in InVEST 2.2.2</p>
<p>General:</p>
@ -3052,14 +3081,14 @@ setuptools_scm</code> from the project root.</li>
<dd>toolbox if the workspace name is too long.</dd>
</dl>
</section>
<section id="section-59">
<section id="section-60">
<h1>2.2.1 (2012-01-26)</h1>
<p>Changes in InVEST 2.2.1</p>
<p>General:</p>
<p>This is a minor release which fixes the following defects:</p>
<p>-A variety of miscellaneous bugs were fixed that were causing crashes of the Coastal Protection model in Arc 9.3. -Fixed an issue in the Pollination model that was looking for an InVEST1005 directory. -The InVEST "models only" release had an entry for the InVEST 3.0 Beta tools, but was missing the underlying runtime. This has been added to the models only 2.2.1 release at the cost of a larger installer. -The default InVEST ArcMap document wouldn't open in ArcGIS 9.3. It can now be opened by Arc 9.3 and above. -Minor updates to the Coastal Protection user's guide.</p>
</section>
<section id="section-60">
<section id="section-61">
<h1>2.2.0 (2011-12-22)</h1>
<p>In this release we include updates to the habitat risk assessment model, updates to Coastal Vulnerability Tier 0 (previously named Coastal Protection), and a new tier 1 Coastal Vulnerability tool. Additionally, we are releasing a beta version of our 3.0 platform that includes the terrestrial timber and carbon models.</p>
<p>See the "Marine Models" and "InVEST 3.0 Beta" sections below for more details.</p>
@ -3113,7 +3142,7 @@ setuptools_scm</code> from the project root.</li>
</dd>
</dl>
</section>
<section id="section-61">
<section id="section-62">
<h1>2.1.1 (2011-10-17)</h1>
<p>Changes in InVEST 2.1.1</p>
<p>General:</p>

View File

@ -1,6 +1,6 @@
{
"name": "invest-workbench",
"version": "3.14.2",
"version": "3.14.3",
"description": "Models that map and value the goods and services from nature that sustain and fulfill human life",
"main": "build/main/main.js",
"homepage": "./",
@ -104,4 +104,4 @@
"vite": "^4.3.9",
"yazl": "^2.5.1"
}
}
}