Coverage for C:\src\imod-python\imod\mf6\lak.py: 98%
365 statements
« prev ^ index » next coverage.py v7.4.4, created at 2024-04-08 13:27 +0200
« prev ^ index » next coverage.py v7.4.4, created at 2024-04-08 13:27 +0200
1"""
2This source file contains the Lake Package and interface objects to the lake
3package. Usage: create instances of the LakeData class, and optionally
4instances of the Outlets class, and use the method "from_lakes_and_outlets" to
5create a lake package.
6"""
8import pathlib
9import textwrap
10from collections import defaultdict
12import jinja2
13import numpy as np
14import pandas as pd
15import xarray as xr
17from imod import mf6
18from imod.logging import init_log_decorator
19from imod.mf6.boundary_condition import BoundaryCondition
20from imod.mf6.package import Package
21from imod.mf6.pkgbase import PackageBase
22from imod.mf6.write_context import WriteContext
23from imod.schemata import AllValueSchema, DimsSchema, DTypeSchema
25CONNECTION_DIM = "connection_dim"
26OUTLET_DIM = "outlet_dim"
27LAKE_DIM = "lake_dim"
30class LakeApi_Base(PackageBase):
31 """
32 Base class for lake and outlet object.
33 """
35 def __init__(self, allargs):
36 super().__init__(allargs)
39class LakeData(LakeApi_Base):
40 """
41 This class is used to initialize the lake package. It contains data
42 relevant to one lake. The input needed to create an instance of this
43 consists of a few scalars (name, starting stage), some xarray data-arrays,
44 and time series. The xarray data-arrays should be of the same size as the
45 grid, and contain missing values in all locations where the lake does not
46 exist (similar to the input of the other grid based packages, such a
47 ConstantHead, River, GeneralHeadBounandary, etc.).
49 Parameters
50 ----------
52 starting_stage: float,
53 boundname: str,
54 connection_type: xr.DataArray of integers.
55 bed_leak: xr.DataArray of floats.
56 top_elevation: xr.DataArray of floats.
57 bot_elevation: xr.DataArray of floats.
58 connection_length: xr.DataArray of floats.
59 connection_width: xr.DataArray of floats.
60 status,
61 stage: timeseries of float numbers
62 rainfall: timeseries of float numbers
63 evaporation: timeseries of float numbers
64 runoff: timeseries of float numbers
65 inflow: timeseries of float numbers
66 withdrawal: timeseries of float numbers
67 auxiliary: timeseries of float numbers
69 """
71 timeseries_names = [
72 "status",
73 "stage",
74 "rainfall",
75 "evaporation",
76 "runoff",
77 "inflow",
78 "withdrawal",
79 "auxiliary",
80 ]
82 def __init__(
83 self,
84 starting_stage: float,
85 boundname: str,
86 connection_type,
87 bed_leak=None,
88 top_elevation=None,
89 bot_elevation=None,
90 connection_length=None,
91 connection_width=None,
92 status=None,
93 stage=None,
94 rainfall=None,
95 evaporation=None,
96 runoff=None,
97 inflow=None,
98 withdrawal=None,
99 auxiliary=None,
100 lake_table=None,
101 ):
102 dict_dataset = {
103 "starting_stage": starting_stage,
104 "boundname": boundname,
105 "connection_type": (connection_type.dims, connection_type.values),
106 "bed_leak": (bed_leak.dims, bed_leak.values),
107 "top_elevation": (top_elevation.dims, top_elevation.values),
108 "bottom_elevation": (bot_elevation.dims, bot_elevation.values),
109 "connection_length": (connection_length.dims, connection_length.values),
110 "connection_width": (connection_width.dims, connection_width.values),
111 "lake_table": lake_table,
112 }
113 super().__init__(dict_dataset)
115 # timeseries data
116 times = []
117 timeseries_dict = {
118 "status": status,
119 "stage": stage,
120 "rainfall": rainfall,
121 "evaporation": evaporation,
122 "runoff": runoff,
123 "inflow": inflow,
124 "withdrawal": withdrawal,
125 "auxiliary": auxiliary,
126 }
127 for _, timeseries in timeseries_dict.items():
128 if timeseries is not None:
129 if "time" in timeseries.coords:
130 times.extend([x for x in timeseries.coords["time"].values])
131 times = sorted(set(times))
132 self.dataset.assign_coords({"time": times})
133 for ts_name, ts_object in timeseries_dict.items():
134 if ts_object is not None:
135 fillvalue: float | str = np.nan
136 if not pd.api.types.is_numeric_dtype(ts_object.dtype):
137 fillvalue = ""
138 self.dataset[ts_name] = ts_object.reindex(
139 {"time": times}, fill_value=fillvalue
140 )
141 else:
142 self.dataset[ts_name] = None
145class OutletBase(LakeApi_Base):
146 """
147 Base class for the different kinds of outlets
148 """
150 timeseries_names = ["rate", "invert", "rough", "width", "slope"]
152 def __init__(self, allargs):
153 super().__init__(allargs)
156class OutletManning(OutletBase):
157 """
158 Lake outlet which discharges via a rectangular outlet that uses Manning's
159 equation.
160 """
162 _couttype = "manning"
164 def __init__(
165 self,
166 lakein: str,
167 lakeout: str,
168 invert,
169 width,
170 roughness,
171 slope,
172 ):
173 dict_dataset = {
174 "lakein": lakein,
175 "lakeout": lakeout,
176 "invert": invert,
177 "width": width,
178 "roughness": roughness,
179 "slope": slope,
180 }
181 super().__init__(dict_dataset)
184class OutletWeir(OutletBase):
185 """
186 Lake outlet which discharges via a sharp-crested weir.
187 """
189 _couttype = "weir"
191 def __init__(self, lakein: str, lakeout: str, invert, width):
192 dict_dataset = {
193 "lakein": lakein,
194 "lakeout": lakeout,
195 "invert": invert,
196 "width": width,
197 "roughness": None,
198 "slope": None,
199 }
200 super().__init__(dict_dataset)
203class OutletSpecified(OutletBase):
204 """
205 Lake outlet which discharges a specified outflow.
206 """
208 _couttype = "specified"
210 def __init__(self, lakein: str, lakeout: str, rate):
211 dict_dataset = {
212 "lakein": lakein,
213 "lakeout": lakeout,
214 "invert": None,
215 "width": None,
216 "roughness": None,
217 "slope": None,
218 "rate": rate,
219 }
220 super().__init__(dict_dataset)
223def create_connection_data(lakes):
224 connection_vars = [
225 "bed_leak",
226 "top_elevation",
227 "bottom_elevation",
228 "connection_length",
229 "connection_width",
230 ]
232 # This will create a statically sized array of strings (dtype="<U10")
233 claktype_string = np.array(
234 [
235 "horizontal", # 0
236 "vertical", # 1
237 "embeddedh", # 2
238 "embeddedv", # 3
239 ]
240 )
242 connection_data = defaultdict(list)
243 cell_ids = []
244 for lake in lakes:
245 notnull = lake["connection_type"].notnull()
246 indices = np.argwhere(
247 notnull.values
248 ) # -1 to convert mf6's 1 based indexing to np's 0-based indexing
249 xr_indices = {
250 dim: xr.DataArray(index, dims=("cell_id",))
251 for dim, index in zip(notnull.dims, indices.T)
252 }
254 # There should be no nodata values in connection_type, so we can use it to index.
255 type_numeric = (
256 lake.dataset["connection_type"].isel(**xr_indices).astype(int).values
257 )
258 type_string = claktype_string[type_numeric]
259 connection_data["connection_type"].append(type_string)
261 selection = lake.dataset[connection_vars].isel(**xr_indices)
262 for var, da in selection.items():
263 if not var.startswith("connection_"):
264 var = f"connection_{var}"
265 connection_data[var].append(da.values)
267 # Offset by one since MODFLOW is 1-based!
268 cell_id = xr.DataArray(
269 data=indices + 1,
270 coords={"celldim": list(xr_indices.keys())},
271 dims=(CONNECTION_DIM, "celldim"),
272 )
273 cell_ids.append(cell_id)
275 connection_data = {
276 k: xr.DataArray(data=np.concatenate(v), dims=[CONNECTION_DIM])
277 for k, v in connection_data.items()
278 }
280 connection_data["connection_cell_id"] = xr.concat(cell_ids, dim=CONNECTION_DIM)
281 return connection_data
284def create_outlet_data(outlets, name_to_number):
285 outlet_vars = [
286 "invert",
287 "roughness",
288 "width",
289 "slope",
290 ]
291 outlet_data = defaultdict(list)
292 for outlet in outlets:
293 outlet_data["outlet_couttype"].append(outlet._couttype)
295 # Convert names to numbers
296 for var in ("lakein", "lakeout"):
297 name = outlet.dataset[var].item()
298 if (
299 var == "lakeout" and name == ""
300 ): # the outlet lakeout can be outside of the model
301 lake_number = 0
302 else:
303 try:
304 lake_number = name_to_number[name]
305 except KeyError:
306 names = ", ".join(name_to_number.keys())
307 raise KeyError(
308 f"Outlet lake name {name} not found among lake names: {names}"
309 )
310 outlet_data[f"outlet_{var}"].append(lake_number)
312 # For other values: fill in NaN if not applicable.
313 for var in outlet_vars:
314 if var in outlet.dataset:
315 if "time" in outlet.dataset[var].dims:
316 value = 0.0
317 else:
318 value = outlet.dataset[var].item()
319 if value is None:
320 value = np.nan
321 else:
322 value = np.nan
323 outlet_data[f"outlet_{var}"].append(value)
325 outlet_data = {
326 k: xr.DataArray(data=v, dims=[OUTLET_DIM]) for k, v in outlet_data.items()
327 }
328 return outlet_data
331def concatenate_timeseries(list_of_lakes_or_outlets, timeseries_name):
332 """
333 In this function we create a dataarray with a given time coordinate axis. We
334 add all the timeseries of lakes or outlets with the given name. We also
335 create a dimension to specify the lake or outlet number.
336 """
337 if list_of_lakes_or_outlets is None:
338 return None
340 list_of_dataarrays = []
341 list_of_indices = []
342 for index, lake_or_outlet in enumerate(list_of_lakes_or_outlets):
343 if timeseries_name in lake_or_outlet.dataset:
344 da = lake_or_outlet.dataset[timeseries_name]
345 if "time" in da.coords:
346 list_of_dataarrays.append(da)
347 list_of_indices.append(index + 1)
349 index = index + 1
350 if len(list_of_dataarrays) == 0:
351 return None
352 fill_value = np.nan
353 if not pd.api.types.is_numeric_dtype(list_of_dataarrays[0].dtype):
354 fill_value = ""
355 out = xr.concat(
356 list_of_dataarrays, join="outer", dim="index", fill_value=fill_value
357 )
358 out = out.assign_coords(index=list_of_indices)
359 return out
362def join_lake_tables(lake_numbers, lakes):
363 """
364 merges all the lake tables into a single dataarray. The lake number corresponding to each table
365 is added in a new dimension
366 """
367 nr_lakes = len(lakes)
369 any_lake_table = any([not da_is_none(lake["lake_table"]) for lake in lakes])
370 if not any_lake_table:
371 return None
373 lake_tables = []
374 for i in range(nr_lakes):
375 if not da_is_none(lakes[i]["lake_table"]):
376 lake_number = lake_numbers[i]
377 lakes[i]["lake_table"] = lakes[i]["lake_table"].expand_dims(
378 dim={"laketable_lake_nr": [lake_number]}
379 )
380 lake_tables.append(lakes[i]["lake_table"])
382 result = xr.merge(lake_tables, compat="no_conflicts")
383 return result["lake_table"]
386def da_is_none(array):
387 """
388 when the None value is appended as a dataarray it gets converted to an array with zero values.
389 """
390 return array is None or array.values[()] is None
393class Lake(BoundaryCondition):
394 """
395 Lake (LAK) Package.
397 Parameters
398 ----------
399 lake_number: array of integers (xr.DataArray)- dimension number of lakes:
400 integer used as identifier for the lake.
401 lake_starting_stage: array of floats (xr.DataArray)- dimension number of lakes:
402 starting lake stage.
403 lake_boundname: array of strings (xr.DataArray)- dimension number of lakes:
404 name of the lake
406 connection_lake_number: array of floats (xr.DataArray)- dimension number of connections
407 lake number for the current lake-to-aquifer connection.
408 connection_cell_id: array of integers (xr.DataArray)- dimension number of connections
410 connection_type: array of strings (xr.DataArray)- dimension number of connections
411 indicates if connection is horizontal, vertical, embeddedH or embeddedV
412 connection_bed_leak: array of floats (xr.DataArray)- dimension number of connections
413 defines the bed leakance for the lake-GWF connection.
414 BEDLEAK must be greater than or equal to zero or specified to be np.nan. If BEDLEAK is specified to
415 be np.nan, the lake-GWF connection conductance is solely a function of aquifer properties in the
416 connected GWF cell and lakebed sediments are assumed to be absent.
417 connection_bottom_elevation: array of floats (xr.DataArray, optional)- dimension number of connections
418 defines the bottom elevation for a horizontal lake-GWF connection.
419 If not provided, will be set to the bottom elevation of the cell it is connected to.
420 connection_top_elevation:array of floats (xr.DataArray, optional)- dimension number of connections
421 defines the top elevation for a horizontal lake-GWF connection.
422 If not provided, will be set to the top elevation of the cell it is connected to.
423 connection_width: array of floats (xr.DataArray, optional)
424 defines the connection face width for a horizontal lake-GWF connection.
425 connwidth must be greater than zero for a horizontal lake-GWF connection. Any value can be
426 specified if claktype is vertical, embeddedh, or embeddedv. If not set, will be set to dx or dy.
427 connection_length: array of floats (xr.DataArray, optional)
428 defines the distance between the connected GWF cellid node and the lake
429 for a horizontal, embeddedh, or embeddedv lake-GWF connection. connlen must be greater than
430 zero for a horizontal, embeddedh, or embeddedv lake-GWF connection. Any value can be specified
431 if claktype is vertical. If not set, will be set to dx or dy.
434 outlet_lakein: array of integers (xr.DataArray, optional)
435 integer defining the lake number that outlet is connected to. Must be
436 greater than zero.
437 outlet_lakeout: array of integers (xr.DataArray, optional)
438 integer value that defines the lake number that outlet discharge from lake outlet OUTLETNO
439 is routed to. Must be greater than or equal to zero.
440 If zero, outlet discharge from lake outlet OUTLETNO is discharged to an external
441 boundary.
442 outlet_couttype: array of string (xr.DataArray, optional)
443 character string that defines the outlet type for the outlet OUTLETNO. Possible
444 strings include: SPECIFIED–character keyword to indicate the outlet is defined as a specified
445 flow. MANNING–character keyword to indicate the outlet is defined using Manning’s equation.
446 WEIR–character keyword to indicate the outlet is defined using a sharp weir equation.
447 outlet_invert: array of floats (xr.DataArray, optional):
448 float or character value that defines the invert elevation for the lake outlet. A specified
449 INVERT value is only used for active lakes if outlet_type for lake outlet OUTLETNO is not
450 SPECIFIED.
451 outlet_roughness: array of floats (xr.DataArray, optional)
452 defines the roughness coefficient for the lake outlet. Any value can be specified
453 if outlet_type is not MANNING.
454 outlet_width: array of floats (xr.DataArray, optional)
455 float or character value that defines the width of the lake outlet. A specified WIDTH value is
456 only used for active lakes if outlet_type for lake outlet OUTLETNO is not SPECIFIED.
457 outlet_slope: array of floats (xr.DataArray, optional)
458 float or character value that defines the bed slope for the lake outlet. A specified SLOPE value is
459 only used for active lakes if outlet_type for lake outlet OUTLETNO is MANNING.
462 #time series (lake)
463 ts_status: array of strings (xr.DataArray, optional)
464 timeserie used to indicate lake status. Can be ACTIVE, INACTIVE, or CONSTANT.
465 By default, STATUS is ACTIVE.
466 ts_stage: array of floats (xr.DataArray, optional)
467 timeserie used to specify the stage of the lake. The specified STAGE is only applied if
468 the lake is a constant stage lake
469 ts_rainfall: array of floats (xr.DataArray, optional)
470 timeserie used to specify the rainfall rate (LT-1) for the lake. Value must be greater than or equal to zero.
471 ts_evaporation: array of floats (xr.DataArray, optional)
472 timeserie used to specify the the maximum evaporation rate (LT-1) for the lake. Value must be greater than or equal to zero. I
473 ts_runoff: array of floats (xr.DataArray, optional)
474 timeserie used to specify the the runoff rate (L3 T-1) for the lake. Value must be greater than or equal to zero.
475 ts_inflow: array of floats (xr.DataArray, optional)
476 timeserie used to specify the volumetric inflow rate (L3 T-1) for the lake. Value must be greater than or equal to zero.
477 ts_withdrawal: array of floats (xr.DataArray, optional)
478 timeserie used to specify the maximum withdrawal rate (L3 T-1) for the lake. Value must be greater than or equal to zero.
479 ts_auxiliary: array of floats (xr.DataArray, optional)
480 timeserie used to specify value of auxiliary variables of the lake
482 #time series (outlet)
483 ts_rate: array of floats (xr.DataArray, optional)
484 timeserie used to specify the extraction rate for the lake outflow. A positive value indicates
485 inflow and a negative value indicates outflow from the lake. RATE only applies to active
486 (IBOUND > 0) lakes. A specified RATE is only applied if COUTTYPE for the OUTLETNO is SPECIFIED
487 ts_invert: array of floats (xr.DataArray, optional)
488 timeserie used to specify the invert elevation for the lake outlet. A specified INVERT value is only used for
489 active lakes if COUTTYPE for lake outlet OUTLETNO is not SPECIFIED.
490 ts_rough: array of floats (xr.DataArray, optional)
491 timeserie used to specify defines the roughness coefficient for the lake outlet. Any value can be
492 specified if COUTTYPE is not MANNING.
493 ts_width: array of floats (xr.DataArray, optional)
494 timeserie used to specify the width of the lake outlet. A specified WIDTH value is only used for active lakes if
495 COUTTYPE for lake outlet OUTLETNO is not SPECIFIED.
496 ts_slope: array of floats (xr.DataArray, optional)
497 timeserie used to specify the bed slope for the lake outlet. A specified SLOPE value is only used for active lakes
498 if COUTTYPE for lake outlet OUTLETNO is MANNING.
500 lake_tables: array of floats (xr.DataArray, optional)
501 the array contains the lake tables- for those lakes that have them
504 print_input: ({True, False}, optional)
505 keyword to indicate that the list of constant head information will
506 be written to the listing file immediately after it is read. Default is
507 False.
508 print_stage: ({True, False}, optional)
509 Keyword to indicate that the list of lake stages will be printed to the listing file for every
510 stress period in which "HEAD PRINT" is specified in Output Control. If there is no Output Control
511 option and PRINT_STAGE is specified, then stages are printed for the last time step of each
512 stress period.
513 print_flows: ({True, False}, optional)
514 Indicates that the list of constant head flow rates will be printed to
515 the listing file for every stress period time step in which "BUDGET
516 PRINT" is specified in Output Control. If there is no Output Control
517 option and PRINT FLOWS is specified, then flow rates are printed for the
518 last time step of each stress period.
519 Default is False.
520 save_flows: ({True, False}, optional)
521 Indicates that constant head flow terms will be written to the file
522 specified with "BUDGET FILEOUT" in Output Control. Default is False.
525 stagefile: (String, optional)
526 name of the binary output file to write stage information.
527 budgetfile: (String, optional)
528 name of the binary output file to write budget information.
529 budgetcsvfile: (String, optional)
530 name of the comma-separated value (CSV) output file to write budget summary information.
531 A budget summary record will be written to this file for each time step of the simulation.
532 package_convergence_filename: (String, optional)
533 name of the comma spaced values output file to write package convergence information.
534 ts6_filename: String, optional
535 defines a time-series file defining time series that can be used to assign time-varying values.
536 See the "Time-Variable Input" section for instructions on using the time-series capability.
537 time_conversion: float
538 value that is used in converting outlet flow terms that use Manning’s equation or gravitational
539 acceleration to consistent time units. TIME_CONVERSION should be set to 1.0, 60.0, 3,600.0,
540 86,400.0, and 31,557,600.0 when using time units (TIME_UNITS) of seconds, minutes, hours, days,
541 or years in the simulation, respectively. CONVTIME does not need to be specified if no lake
542 outlets are specified or TIME_UNITS are seconds.,
543 length_conversion: float
544 float value that is used in converting outlet flow terms that use Manning’s equation or gravitational
545 acceleration to consistent length units. LENGTH_CONVERSION should be set to 3.28081, 1.0, and 100.0
546 when using length units (LENGTH_UNITS) of feet, meters, or centimeters in the simulation,
547 respectively. LENGTH_CONVERSION does not need to be specified if no lake outlets are specified or
548 LENGTH_UNITS are meters.
549 validate: {True, False}
550 Flag to indicate whether the package should be validated upon
551 initialization. This raises a ValidationError if package input is
552 provided in the wrong manner. Defaults to True.
553 """
555 _pkg_id = "lak"
556 _template = Package._initialize_template(_pkg_id)
558 _period_data_lakes = (
559 "ts_status",
560 "ts_stage",
561 "ts_rainfall",
562 "ts_evaporation",
563 "ts_runoff",
564 "ts_inflow",
565 "ts_withdrawal",
566 "ts_auxiliary",
567 )
568 _period_data_outlets = (
569 "ts_rate",
570 "ts_invert",
571 "ts_rough",
572 "ts_width",
573 "ts_slope",
574 )
576 _period_data = _period_data_lakes + _period_data_outlets
578 _init_schemata = {
579 "lake_number": [DTypeSchema(np.integer), DimsSchema(LAKE_DIM)],
580 "lake_starting_stage": [DTypeSchema(np.floating), DimsSchema(LAKE_DIM)],
581 "lake_boundname": [DTypeSchema(str), DimsSchema(LAKE_DIM)],
582 "connection_lake_number": [DTypeSchema(np.integer), DimsSchema(CONNECTION_DIM)],
583 "connection_type": [DTypeSchema(str), DimsSchema(CONNECTION_DIM)],
584 "connection_bed_leak": [DTypeSchema(np.floating), DimsSchema(CONNECTION_DIM)],
585 "connection_bottom_elevation": [
586 DTypeSchema(np.floating),
587 DimsSchema(CONNECTION_DIM),
588 ],
589 "connection_top_elevation": [
590 DTypeSchema(np.floating),
591 DimsSchema(CONNECTION_DIM),
592 ],
593 "connection_width": [DTypeSchema(np.floating), DimsSchema(CONNECTION_DIM)],
594 "connection_length": [DTypeSchema(np.floating), DimsSchema(CONNECTION_DIM)],
595 "outlet_lakein": [
596 DTypeSchema(np.integer),
597 DimsSchema(OUTLET_DIM) | DimsSchema(),
598 ],
599 "outlet_lakeout": [
600 DTypeSchema(np.integer),
601 DimsSchema(OUTLET_DIM) | DimsSchema(),
602 ],
603 "outlet_couttype": [DTypeSchema(str), DimsSchema(OUTLET_DIM) | DimsSchema()],
604 "outlet_invert": [
605 DTypeSchema(np.floating),
606 DimsSchema(OUTLET_DIM) | DimsSchema(),
607 ],
608 "outlet_roughness": [
609 DTypeSchema(np.floating),
610 DimsSchema(OUTLET_DIM) | DimsSchema(),
611 ],
612 "outlet_width": [
613 DTypeSchema(np.floating),
614 DimsSchema(OUTLET_DIM) | DimsSchema(),
615 ],
616 "outlet_slope": [
617 DTypeSchema(np.floating),
618 DimsSchema(OUTLET_DIM) | DimsSchema(),
619 ],
620 "ts_status": [DTypeSchema(str), DimsSchema("index", "time") | DimsSchema()],
621 "ts_stage": [
622 DTypeSchema(np.floating),
623 DimsSchema("index", "time") | DimsSchema(),
624 ],
625 "ts_rainfall": [
626 DTypeSchema(np.floating),
627 DimsSchema("index", "time") | DimsSchema(),
628 ],
629 "ts_evaporation": [
630 DTypeSchema(np.floating),
631 DimsSchema("index", "time") | DimsSchema(),
632 ],
633 "ts_runoff": [
634 DTypeSchema(np.floating),
635 DimsSchema("index", "time") | DimsSchema(),
636 ],
637 "ts_inflow": [
638 DTypeSchema(np.floating),
639 DimsSchema("index", "time") | DimsSchema(),
640 ],
641 "ts_withdrawal": [
642 DTypeSchema(np.floating),
643 DimsSchema("index", "time") | DimsSchema(),
644 ],
645 "ts_auxiliary": [
646 DTypeSchema(np.floating),
647 DimsSchema("index", "time") | DimsSchema(),
648 ],
649 "ts_rate": [
650 DTypeSchema(np.floating),
651 DimsSchema("index", "time") | DimsSchema(),
652 ],
653 "ts_invert": [
654 DTypeSchema(np.floating),
655 DimsSchema("index", "time") | DimsSchema(),
656 ],
657 "ts_rough": [
658 DTypeSchema(np.floating),
659 DimsSchema("index", "time") | DimsSchema(),
660 ],
661 "ts_width": [
662 DTypeSchema(np.floating),
663 DimsSchema("index", "time") | DimsSchema(),
664 ],
665 "ts_slope": [
666 DTypeSchema(np.floating),
667 DimsSchema("index", "time") | DimsSchema(),
668 ],
669 }
671 _write_schemata = {
672 "lake_number": [AllValueSchema(">", 0)],
673 "connection_lake_number": [AllValueSchema(">", 0)],
674 "connection_cell_id": [AllValueSchema(">", 0)],
675 "connection_width": [AllValueSchema(">", 0)],
676 "connection_length": [AllValueSchema(">", 0)],
677 "outlet_lakein": [AllValueSchema(">", 0)],
678 "outlet_lakeout": [AllValueSchema(">", 0)],
679 "outlet_width": [AllValueSchema(">", 0)],
680 }
682 @init_log_decorator()
683 def __init__(
684 # lake
685 self,
686 lake_number,
687 lake_starting_stage,
688 lake_boundname,
689 # connection
690 connection_lake_number,
691 connection_cell_id,
692 connection_type,
693 connection_bed_leak,
694 connection_bottom_elevation,
695 connection_top_elevation,
696 connection_width,
697 connection_length,
698 # outlet
699 outlet_lakein=None,
700 outlet_lakeout=None,
701 outlet_couttype=None,
702 outlet_invert=None,
703 outlet_roughness=None,
704 outlet_width=None,
705 outlet_slope=None,
706 # time series (lake)
707 ts_status=None,
708 ts_stage=None,
709 ts_rainfall=None,
710 ts_evaporation=None,
711 ts_runoff=None,
712 ts_inflow=None,
713 ts_withdrawal=None,
714 ts_auxiliary=None,
715 # time series (outlet)
716 ts_rate=None,
717 ts_invert=None,
718 ts_rough=None,
719 ts_width=None,
720 ts_slope=None,
721 # lake tables
722 lake_tables=None,
723 # options
724 print_input=False,
725 print_stage=False,
726 print_flows=False,
727 save_flows=False,
728 stagefile=None,
729 budgetfile=None,
730 budgetcsvfile=None,
731 package_convergence_filename=None,
732 ts6_filename=None,
733 time_conversion=None,
734 length_conversion=None,
735 validate=True,
736 ):
737 if ts_status is not None:
738 ts_status = self._convert_to_string_dataarray(xr.DataArray(ts_status))
740 dict_dataset = {
741 "lake_boundname": lake_boundname,
742 "lake_number": lake_number,
743 "lake_starting_stage": lake_starting_stage,
744 "connection_lake_number": connection_lake_number,
745 "connection_cell_id": connection_cell_id,
746 "connection_type": connection_type,
747 "connection_bed_leak": connection_bed_leak,
748 "connection_bottom_elevation": connection_bottom_elevation,
749 "connection_top_elevation": connection_top_elevation,
750 "connection_width": connection_width,
751 "connection_length": connection_length,
752 "outlet_lakein": outlet_lakein,
753 "outlet_lakeout": outlet_lakeout,
754 "outlet_couttype": outlet_couttype,
755 "outlet_invert": outlet_invert,
756 "outlet_roughness": outlet_roughness,
757 "outlet_width": outlet_width,
758 "outlet_slope": outlet_slope,
759 "print_input": print_input,
760 "print_stage": print_stage,
761 "print_flows": print_flows,
762 "save_flows": save_flows,
763 "stagefile": stagefile,
764 "budgetfile": budgetfile,
765 "budgetcsvfile": budgetcsvfile,
766 "package_convergence_filename": package_convergence_filename,
767 "ts6_filename": ts6_filename,
768 "time_conversion": time_conversion,
769 "length_conversion": length_conversion,
770 "ts_status": ts_status,
771 "ts_stage": ts_stage,
772 "ts_rainfall": ts_rainfall,
773 "ts_evaporation": ts_evaporation,
774 "ts_runoff": ts_runoff,
775 "ts_inflow": ts_inflow,
776 "ts_withdrawal": ts_withdrawal,
777 "ts_auxiliary": ts_auxiliary,
778 "ts_rate": ts_rate,
779 "ts_invert": ts_invert,
780 "ts_rough": ts_rough,
781 "ts_width": ts_width,
782 "ts_slope": ts_slope,
783 "lake_tables": lake_tables,
784 }
786 super().__init__(dict_dataset)
788 self._validate_init_schemata(validate)
790 @staticmethod
791 def from_lakes_and_outlets(
792 lakes,
793 outlets=None,
794 print_input=False,
795 print_stage=False,
796 print_flows=False,
797 save_flows=False,
798 stagefile=None,
799 budgetfile=None,
800 budgetcsvfile=None,
801 package_convergence_filename=None,
802 time_conversion=None,
803 length_conversion=None,
804 ):
805 package_content = {}
806 name_to_number = {
807 lake["boundname"].item(): i + 1 for i, lake in enumerate(lakes)
808 }
810 # Package data
811 lake_numbers = list(name_to_number.values())
812 n_connection = [lake["connection_type"].count().values[()] for lake in lakes]
813 package_content["lake_starting_stage"] = xr.DataArray(
814 data=[lake["starting_stage"].item() for lake in lakes],
815 dims=[LAKE_DIM],
816 )
817 package_content["lake_number"] = xr.DataArray(
818 data=lake_numbers, dims=[LAKE_DIM]
819 )
820 package_content["lake_boundname"] = xr.DataArray(
821 list(name_to_number.keys()), dims=[LAKE_DIM]
822 )
824 # Connection data
825 package_content["connection_lake_number"] = xr.DataArray(
826 data=np.repeat(lake_numbers, n_connection),
827 dims=[CONNECTION_DIM],
828 )
829 connection_data = create_connection_data(lakes)
830 package_content.update(connection_data)
831 for ts_name in Lake._period_data_lakes:
832 shortname = ts_name[3:]
833 package_content[ts_name] = concatenate_timeseries(lakes, shortname)
835 for ts_name in Lake._period_data_outlets:
836 shortname = ts_name[3:]
837 package_content[ts_name] = concatenate_timeseries(outlets, shortname)
839 # Align timeseries variables
840 ts_vars = Lake._period_data_lakes + Lake._period_data_outlets
841 assigned_ts_vars = [
842 ts_var for ts_var in ts_vars if package_content[ts_var] is not None
843 ]
844 aligned_timeseries = xr.align(
845 *(package_content[ts_var] for ts_var in assigned_ts_vars), join="outer"
846 )
847 package_content.update(
848 {ts_var: ts for ts_var, ts in zip(assigned_ts_vars, aligned_timeseries)}
849 )
851 if outlets is not None:
852 outlet_data = create_outlet_data(outlets, name_to_number)
853 package_content.update(outlet_data)
854 package_content["print_input"] = print_input
855 package_content["print_stage"] = print_stage
856 package_content["print_flows"] = print_flows
857 package_content["save_flows"] = save_flows
858 package_content["stagefile"] = stagefile
859 package_content["budgetfile"] = budgetfile
860 package_content["budgetcsvfile"] = budgetcsvfile
861 package_content["package_convergence_filename"] = package_convergence_filename
862 package_content["time_conversion"] = time_conversion
863 package_content["length_conversion"] = length_conversion
864 package_content["lake_tables"] = join_lake_tables(lake_numbers, lakes)
865 return mf6.Lake(**package_content)
867 def _has_outlets(self):
868 # item() will not work here if the object is an array.
869 # .values[()] will simply return the full numpy array.
870 outlet_lakein = self.dataset["outlet_lakein"].values[()]
871 if outlet_lakein is None or (
872 np.isscalar(outlet_lakein) and np.isnan(outlet_lakein)
873 ):
874 return False
875 return True
877 def _has_laketables(self):
878 # item() will not work here if the object is an array.
879 # .values[()] will simply return the full numpy array.
880 tables = self.dataset["lake_tables"].values[()]
881 if tables is None:
882 return False
883 if any([pd.api.types.is_numeric_dtype(t) for t in tables]):
884 return True
885 return False
887 def _has_timeseries(self):
888 for name in self._period_data:
889 if "time" in self.dataset[name].coords:
890 if len(self.dataset[name].coords["time"]) > 0:
891 return True
892 return False
894 @classmethod
895 def _convert_to_string_dataarray(cls, x: xr.DataArray) -> xr.DataArray:
896 # when adding a string dataarray to a dataset with more coordinates, the
897 # values for coordinates in the dataset that are not present in the dataarray
898 # are set to NaN, and the dataarray type changes to obj (because it now has both strings and NaNs)
899 # This function can be used to convert such a dataarray back to string type. to detect nan's we cannot use np.isnan because that works only on numeric types.
900 # instead we use the property that any equality check is false for nan's
902 idx = np.where(x != x)
903 x[idx[0][:]] = ""
904 return x.astype(str)
906 def render(self, directory, pkgname, globaltimes, binary):
907 d = {}
908 for var in (
909 "print_input",
910 "print_stage",
911 "print_flows",
912 "save_flows",
913 "stagefile",
914 "budgetfile",
915 "budgetcsvfile",
916 "package_convergence_filename",
917 "ts6_filename",
918 "time_conversion",
919 "length_conversion",
920 ):
921 value = self[var].item()
922 if self._valid(value):
923 d[var] = value
925 d["nlakes"] = len(self.dataset["lake_number"])
926 d["noutlets"] = 0
927 if self._has_outlets():
928 d["noutlets"] = len(self.dataset["outlet_lakein"])
930 d["ntables"] = 0
931 if self._has_laketables():
932 d["ntables"] = len(self.dataset["lake_tables"].coords["laketable_lake_nr"])
934 packagedata = []
935 for name, number, stage in zip(
936 self.dataset["lake_boundname"],
937 self.dataset["lake_number"],
938 self.dataset["lake_starting_stage"],
939 ):
940 nconn = (self.dataset["connection_lake_number"] == number).sum()
941 row = tuple(a.item() for a in (number, stage, nconn, name))
942 packagedata.append(row)
943 d["packagedata"] = packagedata
945 return self._template.render(d)
947 def _get_iconn(self, lake_numbers_per_connection):
948 iconn = np.full_like(lake_numbers_per_connection, dtype=np.int64, fill_value=0)
949 maxlake = lake_numbers_per_connection.max()
950 connections_per_lake = np.zeros(maxlake + 1)
951 for i in range(np.size(lake_numbers_per_connection)):
952 lakeno = lake_numbers_per_connection[i]
953 connections_per_lake[lakeno] += 1
954 iconn[i] = connections_per_lake[lakeno]
955 return iconn
957 def _connection_dataframe(self) -> pd.DataFrame:
958 connection_vars = [
959 "connection_lake_number",
960 "connection_type",
961 "connection_bed_leak",
962 "connection_bottom_elevation",
963 "connection_top_elevation",
964 "connection_width",
965 "connection_length",
966 ]
967 data_df = self.dataset[connection_vars].to_dataframe()
968 lake_numbers_per_connection = self.dataset["connection_lake_number"].values
970 data_df["iconn"] = self._get_iconn(lake_numbers_per_connection)
971 cell_id_df = self.dataset["connection_cell_id"].to_pandas()
972 order = (
973 ["connection_lake_number", "iconn"]
974 + list(cell_id_df.columns)
975 + connection_vars[1:]
976 )
977 return pd.concat([data_df, cell_id_df], axis=1)[order]
979 def _outlet_dataframe(self) -> pd.DataFrame:
980 outlet_vars = [
981 "outlet_lakein",
982 "outlet_lakeout",
983 "outlet_couttype",
984 "outlet_invert",
985 "outlet_roughness",
986 "outlet_width",
987 "outlet_slope",
988 ]
989 df = self.dataset[outlet_vars].to_dataframe()
990 return df
992 def write_blockfile(self, pkgname, globaltimes, write_context: WriteContext):
993 renderdir = pathlib.Path(write_context.write_directory.stem)
994 content = self.render(
995 directory=renderdir,
996 pkgname=pkgname,
997 globaltimes=globaltimes,
998 binary=write_context.use_binary,
999 )
1000 filename = write_context.write_directory / f"{pkgname}.{self._pkg_id}"
1001 with open(filename, "w") as f:
1002 f.write(content)
1003 f.write("\n")
1005 self._write_table_section(
1006 f,
1007 self._connection_dataframe(),
1008 "connectiondata",
1009 )
1010 if self._has_laketables():
1011 lake_number_to_filename = self._write_laketable_filelist_section(f)
1012 self._write_laketable_files(
1013 write_context.write_directory, lake_number_to_filename
1014 )
1016 if self._has_outlets():
1017 f.write("\n")
1018 self._write_table_section(f, self._outlet_dataframe(), "outlets")
1020 if self._has_timeseries():
1021 self._write_period_section(f, globaltimes)
1023 return
1025 def _write_period_section(self, f, globaltimes):
1026 class Period_internal:
1027 """
1028 The Period_internal class is used for rendering the lake package in jinja.
1029 There is no point in instantiating this class as a user.
1030 """
1032 def __init__(self, period_number):
1033 self.period_number = period_number
1034 self.nr_values = 0
1035 self.lake_or_outlet_number = []
1036 self.series_name = []
1037 self.value = []
1039 period_data_list = []
1041 period_data_name_list = [tssname for tssname in self._period_data]
1042 timeseries_dataset = self.dataset[period_data_name_list]
1043 timeseries_times = self.dataset.coords["time"]
1044 iperiods = np.searchsorted(globaltimes, timeseries_times) + 1
1045 for iperiod, (_, period_data) in zip(
1046 iperiods, timeseries_dataset.groupby("time")
1047 ):
1048 period_data_list.append(Period_internal(iperiod))
1049 for tssname in self._period_data:
1050 if len(period_data[tssname].dims) > 0:
1051 for index in period_data.coords["index"].values:
1052 value = period_data[tssname].sel(index=index).values[()]
1053 isvalid = False
1054 if isinstance(value, str):
1055 isvalid = value != ""
1056 else:
1057 isvalid = ~np.isnan(value)
1059 if isvalid:
1060 period_data_list[-1].nr_values += 1
1061 period_data_list[-1].lake_or_outlet_number.append(index)
1062 period_data_list[-1].series_name.append(tssname[3:])
1063 period_data_list[-1].value.append(value)
1065 _template = jinja2.Template(
1066 textwrap.dedent(
1067 """
1068 {% if nperiod > 0 -%}
1069 {% for iperiod in range(0, nperiod) %}
1070 {% if periods[iperiod].nr_values > 0 -%}
1071 begin period {{periods[iperiod].period_number}}{% for ivalue in range(0, periods[iperiod].nr_values) %}
1072 {{periods[iperiod].lake_or_outlet_number[ivalue]}} {{periods[iperiod].series_name[ivalue]}} {{periods[iperiod].value[ivalue]}}{% endfor %}
1073 end period
1074 {% endif -%}
1075 {% endfor -%}
1076 {% endif -%}"""
1077 )
1078 )
1080 d = {}
1081 d["nperiod"] = len(period_data_list)
1082 d["periods"] = period_data_list
1084 period_block = _template.render(d)
1085 f.write(period_block)
1087 def _package_data_to_sparse(self):
1088 return
1090 def fill_stress_perioddata(self):
1091 # this function is called from packagebase and should do nothing in this context
1092 return
1094 def _write_perioddata(self, directory, pkgname, binary):
1095 # this function is called from packagebase and should do nothing in this context
1096 return
1098 def _write_laketable_filelist_section(
1099 self,
1100 f,
1101 ):
1102 """
1103 Writes a section of the lake package input file which lists the referenced laketable files.
1104 Returns a dictionary with as key the lake number and as value the laketable filename- for those
1105 lakes that have a laketable.
1106 """
1107 template = jinja2.Template(
1108 textwrap.dedent(
1109 """\n\
1110 begin tables{% for lakenumber, lakefile in file_dict.items() %}
1111 {{lakenumber}} TAB6 FILEIN {{lakefile}}{% endfor %}
1112 end tables\n
1113 """
1114 )
1115 )
1117 lake_number_to_lake_table_filename = {}
1118 d = {}
1120 for name, number in zip(
1121 self.dataset["lake_boundname"],
1122 self.dataset["lake_number"],
1123 ):
1124 lake_number = number.values[()]
1125 lake_name = name.values[()]
1127 if (
1128 lake_number
1129 in self.dataset["lake_tables"].coords["laketable_lake_nr"].values
1130 ):
1131 table_file = lake_name + ".ltbl"
1132 lake_number_to_lake_table_filename[lake_number] = table_file
1134 d["file_dict"] = lake_number_to_lake_table_filename
1135 tables_block = template.render(d)
1136 f.write(tables_block)
1138 return lake_number_to_lake_table_filename
1140 def _write_laketable_files(self, directory, lake_number_to_filename):
1141 """
1142 writes a laketable file, containing a table which specifies the relation between stage,
1143 volume and area for one single lake.
1144 """
1146 template = Package._initialize_template("laketable")
1148 for num, file in lake_number_to_filename.items():
1149 d = {}
1150 table = self.dataset["lake_tables"].sel(
1151 {
1152 "laketable_lake_nr": num,
1153 }
1154 )
1156 # count number of rows
1157 stage_col = table.sel({"column": "stage"})
1158 d["nrow"] = (
1159 stage_col.where(pd.api.types.is_numeric_dtype).count().values[()]
1160 )
1162 # check if the barea column is present for this table (and not filled with nan's)
1163 has_barea_column = "barea" in table.coords["column"]
1164 if has_barea_column:
1165 barea_column = table.sel({"column": "barea"})
1166 has_barea_column = (
1167 barea_column.where(pd.api.types.is_numeric_dtype).count().values[()]
1168 > 0
1169 )
1171 columns = ["stage", "sarea", "volume"]
1172 if has_barea_column:
1173 columns.append("barea")
1174 d["ncol"] = len(columns)
1175 table_dataframe = pd.DataFrame(table.sel({"column": columns}).transpose())
1177 string_table = table_dataframe.iloc[
1178 range(d["nrow"]), range(d["ncol"])
1179 ].to_csv(
1180 header=False,
1181 index=False,
1182 sep=" ",
1183 lineterminator="\n",
1184 )
1186 d["table"] = string_table
1187 # write lake table to file
1188 fullpath_laketable = directory / file
1189 laketable_file = template.render(d)
1190 with open(fullpath_laketable, "w") as f:
1191 f.write(laketable_file)
1193 def _write_table_section(
1194 self, f, dataframe: pd.DataFrame, title: str, index: bool = False
1195 ) -> None:
1196 """
1197 writes a dataframe to file. Used for the connection data and for the outlet data.
1198 """
1199 f.write(f"begin {title}\n")
1200 block = dataframe.to_csv(
1201 index=index,
1202 header=False,
1203 sep=" ",
1204 lineterminator="\n",
1205 )
1206 trimmedlines = [line.strip() for line in block.splitlines()]
1207 trimmedblock = "\n".join(map(str, trimmedlines)) + "\n"
1208 f.write(trimmedblock)
1209 f.write(f"end {title}\n")
1210 return
1212 def is_splitting_supported(self) -> bool:
1213 return False
1215 def is_regridding_supported(self) -> bool:
1216 return False
1218 def is_clipping_supported(self) -> bool:
1219 return False