Source code for bw2data.weighting_normalization
from bw2data.backends.schema import get_id
from bw2data.ia_data_store import ImpactAssessmentDataStore
from bw2data.meta import normalizations, weightings
from bw2data.utils import as_uncertainty_dict
from bw2data.validate import normalization_validator, weighting_validator
[docs]
class Weighting(ImpactAssessmentDataStore):
"""
LCIA weighting data - used to combine or compare different impact categories.
The data schema for weighting is a one-element list:
.. code-block:: python
Schema(All(
[uncertainty_dict],
Length(min=1, max=1)
))
"""
[docs]
validator = weighting_validator
[docs]
matrix = "weighting_matrix"
[docs]
def write(self, data):
"""Because of DataStore assumptions, need a one-element list"""
if self.name not in self._metadata:
self.register()
if not isinstance(data, list) or not len(data) == 1:
raise ValueError("Weighting data must be one-element list")
super(Weighting, self).write(data)
[docs]
def process_row(self, row):
"""Return an empty tuple (as ``dtype_fields`` is empty), and the weighting uncertainty dictionary."""
return {**as_uncertainty_dict(row), "row": 0}
[docs]
class Normalization(ImpactAssessmentDataStore):
"""
LCIA normalization data - used to transform meaningful units, like mass or damage, into "person-equivalents" or some such thing.
The data schema for IA normalization is:
.. code-block:: python
Schema([
[valid_tuple, maybe_uncertainty]
])
where:
* ``valid_tuple`` is a dataset identifier, like ``("biosphere", "CO2")``
* ``maybe_uncertainty`` is either a number or an uncertainty dictionary
"""
[docs]
validator = normalization_validator
[docs]
matrix = "normalization_matrix"
[docs]
def process_row(self, row):
"""Given ``(flow key, amount)``, return a dictionary for array insertion."""
return {
**as_uncertainty_dict(row[1]),
"row": get_id(row[0]),
}