2021-03-08 11:20:46 +00:00
|
|
|
import re
|
2022-11-17 13:14:49 +00:00
|
|
|
import typing as t
|
2021-02-25 08:09:14 +00:00
|
|
|
|
2019-03-24 21:34:50 +00:00
|
|
|
import yaml
|
2021-02-25 08:09:14 +00:00
|
|
|
from _io import TextIOWrapper
|
2019-07-10 07:00:44 +00:00
|
|
|
from yaml.parser import ParserError
|
2019-07-11 02:31:23 +00:00
|
|
|
from yaml.scanner import ScannerError
|
2019-03-24 21:34:50 +00:00
|
|
|
|
2019-05-05 09:45:24 +00:00
|
|
|
|
2022-11-17 13:14:49 +00:00
|
|
|
def load(stream: t.Union[str, t.IO[str]]) -> t.Any:
|
2021-03-29 07:05:15 +00:00
|
|
|
return yaml.load(stream, Loader=yaml.SafeLoader)
|
2019-03-24 21:34:50 +00:00
|
|
|
|
2019-05-05 09:45:24 +00:00
|
|
|
|
2022-11-17 13:14:49 +00:00
|
|
|
def load_all(stream: str) -> t.Iterator[t.Any]:
|
Improve job running in local and k8s
Running jobs was previously done with "exec". This was because it
allowed us to avoid copying too much container specification information
from the docker-compose/deployments files to the jobs files. However,
this was limiting:
- In order to run a job, the corresponding container had to be running.
This was particularly painful in Kubernetes, where containers are
crashing as long as migrations are not correctly run.
- Containers in which we need to run jobs needed to be present in the
docker-compose/deployments files. This is unnecessary, for example when
mysql is disabled, or in the case of the certbot container.
Now, we create dedicated jobs files, both for local and k8s deployment.
This introduces a little redundancy, but not too much. Note that
dependent containers are not listed in the docker-compose.jobs.yml file,
so an actual platform is still supposed to be running when we launch the
jobs.
This also introduces a subtle change: now, jobs go through the container
entrypoint prior to running. This is probably a good thing, as it will
avoid forgetting about incorrect environment variables.
In k8s, we find ourselves interacting way too much with the kubectl
utility. Parsing output from the CLI is a pain. So we need to switch to
the native kubernetes client library.
2020-03-25 17:47:36 +00:00
|
|
|
return yaml.load_all(stream, Loader=yaml.SafeLoader)
|
|
|
|
|
|
|
|
|
2022-11-17 13:14:49 +00:00
|
|
|
def dump(content: t.Any, fileobj: TextIOWrapper) -> None:
|
2020-01-21 16:09:58 +00:00
|
|
|
yaml.dump(content, stream=fileobj, default_flow_style=False)
|
|
|
|
|
|
|
|
|
2022-11-17 13:14:49 +00:00
|
|
|
def dumps(content: t.Any) -> str:
|
2021-04-06 10:09:00 +00:00
|
|
|
result = yaml.dump(content, default_flow_style=False)
|
|
|
|
assert isinstance(result, str)
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
2022-11-17 13:14:49 +00:00
|
|
|
def parse(v: t.Union[str, t.IO[str]]) -> t.Any:
|
2019-03-24 21:34:50 +00:00
|
|
|
"""
|
2019-07-10 07:00:44 +00:00
|
|
|
Parse a yaml-formatted string.
|
2019-03-24 21:34:50 +00:00
|
|
|
"""
|
2019-07-10 07:00:44 +00:00
|
|
|
try:
|
|
|
|
return load(v)
|
2019-07-11 02:31:23 +00:00
|
|
|
except (ParserError, ScannerError):
|
2019-07-10 07:00:44 +00:00
|
|
|
pass
|
2019-03-24 21:34:50 +00:00
|
|
|
return v
|
2021-03-08 11:20:46 +00:00
|
|
|
|
|
|
|
|
2022-11-17 13:14:49 +00:00
|
|
|
def parse_key_value(text: str) -> t.Optional[t.Tuple[str, t.Any]]:
|
|
|
|
"""
|
|
|
|
Parse <KEY>=<YAML VALUE> command line arguments.
|
|
|
|
|
|
|
|
Return None if text could not be parsed.
|
|
|
|
"""
|
|
|
|
match = re.match(r"(?P<key>[a-zA-Z0-9_-]+)=(?P<value>(.|\n|\r)*)", text)
|
|
|
|
if not match:
|
|
|
|
return None
|
|
|
|
key = match.groupdict()["key"]
|
|
|
|
value = match.groupdict()["value"]
|
|
|
|
if not value:
|
|
|
|
# Empty strings are interpreted as null values, which is incorrect.
|
|
|
|
value = "''"
|
|
|
|
return key, parse(value)
|