Improve job running in local and k8s
Running jobs was previously done with "exec". This was because it
allowed us to avoid copying too much container specification information
from the docker-compose/deployments files to the jobs files. However,
this was limiting:
- In order to run a job, the corresponding container had to be running.
This was particularly painful in Kubernetes, where containers are
crashing as long as migrations are not correctly run.
- Containers in which we need to run jobs needed to be present in the
docker-compose/deployments files. This is unnecessary, for example when
mysql is disabled, or in the case of the certbot container.
Now, we create dedicated jobs files, both for local and k8s deployment.
This introduces a little redundancy, but not too much. Note that
dependent containers are not listed in the docker-compose.jobs.yml file,
so an actual platform is still supposed to be running when we launch the
jobs.
This also introduces a subtle change: now, jobs go through the container
entrypoint prior to running. This is probably a good thing, as it will
avoid forgetting about incorrect environment variables.
In k8s, we find ourselves interacting way too much with the kubectl
utility. Parsing output from the CLI is a pain. So we need to switch to
the native kubernetes client library.
2020-03-25 17:47:36 +00:00
|
|
|
from datetime import datetime
|
|
|
|
from time import sleep
|
2021-04-06 10:09:00 +00:00
|
|
|
from typing import Any, List, Optional, Type
|
Improve job running in local and k8s
Running jobs was previously done with "exec". This was because it
allowed us to avoid copying too much container specification information
from the docker-compose/deployments files to the jobs files. However,
this was limiting:
- In order to run a job, the corresponding container had to be running.
This was particularly painful in Kubernetes, where containers are
crashing as long as migrations are not correctly run.
- Containers in which we need to run jobs needed to be present in the
docker-compose/deployments files. This is unnecessary, for example when
mysql is disabled, or in the case of the certbot container.
Now, we create dedicated jobs files, both for local and k8s deployment.
This introduces a little redundancy, but not too much. Note that
dependent containers are not listed in the docker-compose.jobs.yml file,
so an actual platform is still supposed to be running when we launch the
jobs.
This also introduces a subtle change: now, jobs go through the container
entrypoint prior to running. This is probably a good thing, as it will
avoid forgetting about incorrect environment variables.
In k8s, we find ourselves interacting way too much with the kubectl
utility. Parsing output from the CLI is a pain. So we need to switch to
the native kubernetes client library.
2020-03-25 17:47:36 +00:00
|
|
|
|
2019-01-22 20:25:04 +00:00
|
|
|
import click
|
|
|
|
|
2019-06-03 22:44:12 +00:00
|
|
|
from .. import config as tutor_config
|
2019-05-11 19:20:09 +00:00
|
|
|
from .. import env as tutor_env
|
2021-11-23 08:25:09 +00:00
|
|
|
from .. import exceptions, fmt, jobs, serialize, utils
|
2021-04-06 10:09:00 +00:00
|
|
|
from ..types import Config, get_typed
|
2021-11-08 13:46:38 +00:00
|
|
|
from .config import save as config_save_command
|
2021-02-25 08:09:14 +00:00
|
|
|
from .context import Context
|
|
|
|
|
|
|
|
|
|
|
|
class K8sClients:
|
|
|
|
_instance = None
|
|
|
|
|
|
|
|
def __init__(self) -> None:
|
|
|
|
# Loading the kubernetes module here to avoid import overhead
|
|
|
|
from kubernetes import client, config # pylint: disable=import-outside-toplevel
|
|
|
|
|
|
|
|
config.load_kube_config()
|
|
|
|
self._batch_api = None
|
|
|
|
self._core_api = None
|
|
|
|
self._client = client
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def instance(cls: Type["K8sClients"]) -> "K8sClients":
|
|
|
|
if cls._instance is None:
|
|
|
|
cls._instance = cls()
|
|
|
|
return cls._instance
|
|
|
|
|
|
|
|
@property
|
|
|
|
def batch_api(self): # type: ignore
|
|
|
|
if self._batch_api is None:
|
|
|
|
self._batch_api = self._client.BatchV1Api()
|
|
|
|
return self._batch_api
|
|
|
|
|
|
|
|
@property
|
|
|
|
def core_api(self): # type: ignore
|
|
|
|
if self._core_api is None:
|
|
|
|
self._core_api = self._client.CoreV1Api()
|
|
|
|
return self._core_api
|
|
|
|
|
|
|
|
|
|
|
|
class K8sJobRunner(jobs.BaseJobRunner):
|
|
|
|
def load_job(self, name: str) -> Any:
|
|
|
|
all_jobs = self.render("k8s", "jobs.yml")
|
|
|
|
for job in serialize.load_all(all_jobs):
|
2021-04-06 10:09:00 +00:00
|
|
|
job_name = job["metadata"]["name"]
|
|
|
|
if not isinstance(job_name, str):
|
|
|
|
raise exceptions.TutorError(
|
|
|
|
"Invalid job name: '{}'. Expected str.".format(job_name)
|
|
|
|
)
|
2021-02-25 08:09:14 +00:00
|
|
|
if job_name == name:
|
|
|
|
return job
|
2021-09-06 14:20:36 +00:00
|
|
|
raise exceptions.TutorError("Could not find job '{}'".format(name))
|
2021-02-25 08:09:14 +00:00
|
|
|
|
|
|
|
def active_job_names(self) -> List[str]:
|
|
|
|
"""
|
|
|
|
Return a list of active job names
|
|
|
|
Docs:
|
|
|
|
https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#list-job-v1-batch
|
|
|
|
"""
|
|
|
|
api = K8sClients.instance().batch_api
|
|
|
|
return [
|
|
|
|
job.metadata.name
|
2021-04-06 10:09:00 +00:00
|
|
|
for job in api.list_namespaced_job(k8s_namespace(self.config)).items
|
2021-02-25 08:09:14 +00:00
|
|
|
if job.status.active
|
|
|
|
]
|
|
|
|
|
|
|
|
def run_job(self, service: str, command: str) -> int:
|
|
|
|
job_name = "{}-job".format(service)
|
2021-09-06 14:20:36 +00:00
|
|
|
job = self.load_job(job_name)
|
2021-02-25 08:09:14 +00:00
|
|
|
# Create a unique job name to make it deduplicate jobs and make it easier to
|
|
|
|
# find later. Logs of older jobs will remain available for some time.
|
|
|
|
job_name += "-" + datetime.now().strftime("%Y%m%d%H%M%S")
|
|
|
|
|
|
|
|
# Wait until all other jobs are completed
|
|
|
|
while True:
|
|
|
|
active_jobs = self.active_job_names()
|
|
|
|
if not active_jobs:
|
|
|
|
break
|
|
|
|
fmt.echo_info(
|
|
|
|
"Waiting for active jobs to terminate: {}".format(" ".join(active_jobs))
|
|
|
|
)
|
|
|
|
sleep(5)
|
|
|
|
|
|
|
|
# Configure job
|
|
|
|
job["metadata"]["name"] = job_name
|
|
|
|
job["metadata"].setdefault("labels", {})
|
|
|
|
job["metadata"]["labels"]["app.kubernetes.io/name"] = job_name
|
2021-07-11 14:16:54 +00:00
|
|
|
# Define k8s entrypoint/args
|
|
|
|
shell_command = ["sh", "-e", "-c"]
|
|
|
|
if job["spec"]["template"]["spec"]["containers"][0].get("command") == []:
|
|
|
|
# In some cases, we need to bypass the container entrypoint.
|
|
|
|
# Unfortunately, AFAIK, there is no way to do so in K8s manifests. So we mark
|
|
|
|
# some jobs with "command: []". For these jobs, the entrypoint becomes "sh -e -c".
|
|
|
|
# We do not do this for every job, because some (most) entrypoints are actually useful.
|
|
|
|
job["spec"]["template"]["spec"]["containers"][0]["command"] = shell_command
|
|
|
|
container_args = [command]
|
|
|
|
else:
|
|
|
|
container_args = shell_command + [command]
|
|
|
|
job["spec"]["template"]["spec"]["containers"][0]["args"] = container_args
|
2021-02-25 08:09:14 +00:00
|
|
|
job["spec"]["backoffLimit"] = 1
|
|
|
|
job["spec"]["ttlSecondsAfterFinished"] = 3600
|
|
|
|
# Save patched job to "jobs.yml" file
|
|
|
|
with open(tutor_env.pathjoin(self.root, "k8s", "jobs.yml"), "w") as job_file:
|
|
|
|
serialize.dump(job, job_file)
|
|
|
|
# We cannot use the k8s API to create the job: configMap and volume names need
|
|
|
|
# to be found with the right suffixes.
|
|
|
|
utils.kubectl(
|
|
|
|
"apply",
|
|
|
|
"--kustomize",
|
|
|
|
tutor_env.pathjoin(self.root),
|
|
|
|
"--selector",
|
|
|
|
"app.kubernetes.io/name={}".format(job_name),
|
|
|
|
)
|
|
|
|
|
|
|
|
message = (
|
|
|
|
"Job {job_name} is running. To view the logs from this job, run:\n\n"
|
|
|
|
""" kubectl logs --namespace={namespace} --follow $(kubectl get --namespace={namespace} pods """
|
|
|
|
"""--selector=job-name={job_name} -o=jsonpath="{{.items[0].metadata.name}}")\n\n"""
|
|
|
|
"Waiting for job completion..."
|
2021-04-06 10:09:00 +00:00
|
|
|
).format(job_name=job_name, namespace=k8s_namespace(self.config))
|
2021-02-25 08:09:14 +00:00
|
|
|
fmt.echo_info(message)
|
|
|
|
|
|
|
|
# Wait for completion
|
|
|
|
field_selector = "metadata.name={}".format(job_name)
|
|
|
|
while True:
|
|
|
|
namespaced_jobs = K8sClients.instance().batch_api.list_namespaced_job(
|
2021-06-03 16:12:52 +00:00
|
|
|
k8s_namespace(self.config), field_selector=field_selector
|
2021-02-25 08:09:14 +00:00
|
|
|
)
|
|
|
|
if not namespaced_jobs.items:
|
|
|
|
continue
|
|
|
|
job = namespaced_jobs.items[0]
|
|
|
|
if not job.status.active:
|
|
|
|
if job.status.succeeded:
|
|
|
|
fmt.echo_info("Job {} successful.".format(job_name))
|
|
|
|
break
|
|
|
|
if job.status.failed:
|
|
|
|
raise exceptions.TutorError(
|
|
|
|
"Job {} failed. View the job logs to debug this issue.".format(
|
|
|
|
job_name
|
|
|
|
)
|
|
|
|
)
|
|
|
|
sleep(5)
|
|
|
|
return 0
|
2019-01-22 20:25:04 +00:00
|
|
|
|
|
|
|
|
2019-10-08 20:25:31 +00:00
|
|
|
@click.group(help="Run Open edX on Kubernetes")
|
2021-02-25 08:09:14 +00:00
|
|
|
def k8s() -> None:
|
2019-01-22 20:25:04 +00:00
|
|
|
pass
|
|
|
|
|
2019-04-23 07:57:55 +00:00
|
|
|
|
2019-05-05 09:45:24 +00:00
|
|
|
@click.command(help="Configure and run Open edX from scratch")
|
2019-06-05 17:45:22 +00:00
|
|
|
@click.option("-I", "--non-interactive", is_flag=True, help="Run non-interactively")
|
2021-02-25 08:09:14 +00:00
|
|
|
@click.pass_context
|
|
|
|
def quickstart(context: click.Context, non_interactive: bool) -> None:
|
2019-01-22 20:25:04 +00:00
|
|
|
click.echo(fmt.title("Interactive platform configuration"))
|
2021-11-08 13:46:38 +00:00
|
|
|
context.invoke(
|
|
|
|
config_save_command,
|
|
|
|
interactive=(not non_interactive),
|
|
|
|
set_vars=[],
|
|
|
|
unset_vars=[],
|
2021-02-25 08:09:14 +00:00
|
|
|
)
|
2021-11-08 13:46:38 +00:00
|
|
|
config = tutor_config.load(context.obj.root)
|
2021-10-14 10:47:23 +00:00
|
|
|
if not config["ENABLE_WEB_PROXY"]:
|
2019-06-06 19:58:21 +00:00
|
|
|
fmt.echo_alert(
|
2021-10-14 10:47:23 +00:00
|
|
|
"Potentially invalid configuration: ENABLE_WEB_PROXY=false\n"
|
2020-09-17 10:53:14 +00:00
|
|
|
"This setting might have been defined because you previously set WEB_PROXY=true. This is no longer"
|
|
|
|
" necessary in order to get Tutor to work on Kubernetes. In Tutor v11+ a Caddy-based load balancer is"
|
|
|
|
" provided out of the box to handle SSL/TLS certificate generation at runtime. If you disable this"
|
|
|
|
" service, you will have to configure an Ingress resource and a certificate manager yourself to redirect"
|
2021-10-14 10:47:23 +00:00
|
|
|
" traffic to the caddy service. See the Kubernetes section in the Tutor documentation for more"
|
2020-09-17 10:53:14 +00:00
|
|
|
" information."
|
2019-06-06 19:58:21 +00:00
|
|
|
)
|
2019-06-05 13:43:51 +00:00
|
|
|
click.echo(fmt.title("Updating the current environment"))
|
2021-02-25 08:09:14 +00:00
|
|
|
tutor_env.save(context.obj.root, config)
|
2019-01-22 20:25:04 +00:00
|
|
|
click.echo(fmt.title("Starting the platform"))
|
2021-02-25 08:09:14 +00:00
|
|
|
context.invoke(start)
|
2019-05-09 07:51:06 +00:00
|
|
|
click.echo(fmt.title("Database creation and migrations"))
|
2021-02-25 08:09:14 +00:00
|
|
|
context.invoke(init, limit=None)
|
2020-09-17 10:53:14 +00:00
|
|
|
fmt.echo_info(
|
|
|
|
"""Your Open edX platform is ready and can be accessed at the following urls:
|
|
|
|
|
|
|
|
{http}://{lms_host}
|
|
|
|
{http}://{cms_host}
|
|
|
|
""".format(
|
|
|
|
http="https" if config["ENABLE_HTTPS"] else "http",
|
|
|
|
lms_host=config["LMS_HOST"],
|
|
|
|
cms_host=config["CMS_HOST"],
|
|
|
|
)
|
|
|
|
)
|
2019-01-22 20:25:04 +00:00
|
|
|
|
2019-04-23 07:57:55 +00:00
|
|
|
|
2021-11-29 20:55:13 +00:00
|
|
|
@click.command(
|
|
|
|
short_help="Run all configured Open edX resources",
|
|
|
|
help=(
|
|
|
|
"Run all configured Open edX resources. You may limit this command to "
|
|
|
|
"some resources by passing name arguments."
|
|
|
|
),
|
|
|
|
)
|
|
|
|
@click.argument("names", metavar="name", nargs=-1)
|
2019-12-12 16:05:56 +00:00
|
|
|
@click.pass_obj
|
2021-11-29 20:55:13 +00:00
|
|
|
def start(context: Context, names: List[str]) -> None:
|
2021-06-03 16:12:52 +00:00
|
|
|
config = tutor_config.load(context.root)
|
|
|
|
# Create namespace, if necessary
|
|
|
|
# Note that this step should not be run for some users, in particular those
|
|
|
|
# who do not have permission to edit the namespace.
|
|
|
|
try:
|
|
|
|
utils.kubectl("get", "namespaces", k8s_namespace(config))
|
|
|
|
fmt.echo_info("Namespace already exists: skipping creation.")
|
|
|
|
except exceptions.TutorError:
|
|
|
|
fmt.echo_info("Namespace does not exist: now creating it...")
|
|
|
|
utils.kubectl(
|
|
|
|
"apply",
|
|
|
|
"--kustomize",
|
|
|
|
tutor_env.pathjoin(context.root),
|
|
|
|
"--wait",
|
|
|
|
"--selector",
|
|
|
|
"app.kubernetes.io/component=namespace",
|
|
|
|
)
|
2021-11-29 20:55:13 +00:00
|
|
|
|
|
|
|
names = names or ["all"]
|
|
|
|
for name in names:
|
|
|
|
if name == "all":
|
|
|
|
# Create volumes
|
|
|
|
utils.kubectl(
|
|
|
|
"apply",
|
|
|
|
"--kustomize",
|
|
|
|
tutor_env.pathjoin(context.root),
|
|
|
|
"--wait",
|
|
|
|
"--selector",
|
|
|
|
"app.kubernetes.io/component=volume",
|
|
|
|
)
|
|
|
|
# Create everything else except jobs
|
|
|
|
utils.kubectl(
|
|
|
|
"apply",
|
|
|
|
"--kustomize",
|
|
|
|
tutor_env.pathjoin(context.root),
|
|
|
|
"--selector",
|
|
|
|
"app.kubernetes.io/component notin (job,volume,namespace)",
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
utils.kubectl(
|
|
|
|
"apply",
|
|
|
|
"--kustomize",
|
|
|
|
tutor_env.pathjoin(context.root),
|
|
|
|
"--selector",
|
|
|
|
"app.kubernetes.io/name={}".format(name),
|
|
|
|
)
|
2019-01-22 20:25:04 +00:00
|
|
|
|
2019-04-23 07:57:55 +00:00
|
|
|
|
2021-11-29 20:55:13 +00:00
|
|
|
@click.command(
|
|
|
|
short_help="Stop a running platform",
|
|
|
|
help=(
|
|
|
|
"Stop a running platform by deleting all resources, except for volumes. "
|
|
|
|
"You may limit this command to some resources by passing name arguments."
|
|
|
|
),
|
|
|
|
)
|
|
|
|
@click.argument("names", metavar="name", nargs=-1)
|
2019-12-12 16:05:56 +00:00
|
|
|
@click.pass_obj
|
2021-11-29 20:55:13 +00:00
|
|
|
def stop(context: Context, names: List[str]) -> None:
|
2019-12-12 16:05:56 +00:00
|
|
|
config = tutor_config.load(context.root)
|
2021-11-29 20:55:13 +00:00
|
|
|
names = names or ["all"]
|
|
|
|
resource_types = "deployments,services,configmaps,jobs"
|
|
|
|
not_lb_selector = "app.kubernetes.io/component!=loadbalancer"
|
|
|
|
for name in names:
|
|
|
|
if name == "all":
|
|
|
|
utils.kubectl(
|
|
|
|
"delete",
|
|
|
|
*resource_selector(config, not_lb_selector),
|
|
|
|
resource_types,
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
utils.kubectl(
|
|
|
|
"delete",
|
|
|
|
*resource_selector(
|
|
|
|
config,
|
|
|
|
not_lb_selector,
|
|
|
|
"app.kubernetes.io/name={}".format(name),
|
|
|
|
),
|
|
|
|
resource_types,
|
|
|
|
)
|
2019-01-22 20:25:04 +00:00
|
|
|
|
2019-04-23 07:57:55 +00:00
|
|
|
|
2019-07-08 05:59:14 +00:00
|
|
|
@click.command(help="Reboot an existing platform")
|
2021-02-25 08:09:14 +00:00
|
|
|
@click.pass_context
|
|
|
|
def reboot(context: click.Context) -> None:
|
|
|
|
context.invoke(stop)
|
|
|
|
context.invoke(start)
|
2019-07-08 05:59:14 +00:00
|
|
|
|
|
|
|
|
2019-01-22 20:25:04 +00:00
|
|
|
@click.command(help="Completely delete an existing platform")
|
|
|
|
@click.option("-y", "--yes", is_flag=True, help="Do not ask for confirmation")
|
2019-12-12 16:05:56 +00:00
|
|
|
@click.pass_obj
|
2021-02-25 08:09:14 +00:00
|
|
|
def delete(context: Context, yes: bool) -> None:
|
2019-01-22 20:25:04 +00:00
|
|
|
if not yes:
|
2019-05-05 09:45:24 +00:00
|
|
|
click.confirm(
|
|
|
|
"Are you sure you want to delete the platform? All data will be removed.",
|
|
|
|
abort=True,
|
|
|
|
)
|
2019-05-09 07:51:06 +00:00
|
|
|
utils.kubectl(
|
2019-12-12 16:05:56 +00:00
|
|
|
"delete",
|
|
|
|
"-k",
|
|
|
|
tutor_env.pathjoin(context.root),
|
|
|
|
"--ignore-not-found=true",
|
|
|
|
"--wait",
|
2019-05-09 07:51:06 +00:00
|
|
|
)
|
2019-01-22 20:25:04 +00:00
|
|
|
|
2019-04-23 07:57:55 +00:00
|
|
|
|
2019-06-05 17:28:06 +00:00
|
|
|
@click.command(help="Initialise all applications")
|
2020-06-01 20:38:04 +00:00
|
|
|
@click.option("-l", "--limit", help="Limit initialisation to this service or plugin")
|
2019-12-12 16:05:56 +00:00
|
|
|
@click.pass_obj
|
2021-02-25 08:09:14 +00:00
|
|
|
def init(context: Context, limit: Optional[str]) -> None:
|
2019-12-12 16:05:56 +00:00
|
|
|
config = tutor_config.load(context.root)
|
2021-03-13 18:46:44 +00:00
|
|
|
runner = K8sJobRunner(context.root, config)
|
2021-10-18 09:43:40 +00:00
|
|
|
wait_for_pod_ready(config, "caddy")
|
|
|
|
for name in ["elasticsearch", "mysql", "mongodb"]:
|
2021-11-29 20:55:13 +00:00
|
|
|
if tutor_config.is_service_activated(config, name):
|
|
|
|
wait_for_pod_ready(config, name)
|
2021-03-13 18:46:44 +00:00
|
|
|
jobs.initialise(runner, limit_to=limit)
|
2019-01-22 20:25:04 +00:00
|
|
|
|
2019-04-23 07:57:55 +00:00
|
|
|
|
2021-11-30 17:02:14 +00:00
|
|
|
@click.command(help="Scale the number of replicas of a given deployment")
|
|
|
|
@click.argument("deployment")
|
|
|
|
@click.argument("replicas", type=int)
|
|
|
|
@click.pass_obj
|
|
|
|
def scale(context: Context, deployment: str, replicas: int) -> None:
|
|
|
|
config = tutor_config.load(context.root)
|
|
|
|
utils.kubectl(
|
|
|
|
"scale",
|
|
|
|
# Note that we don't use the full resource selector because selectors
|
|
|
|
# are not compatible with the deployment/<name> argument.
|
|
|
|
*resource_namespace_selector(
|
|
|
|
config,
|
|
|
|
),
|
|
|
|
"--replicas={}".format(replicas),
|
|
|
|
"deployment/{}".format(deployment),
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2019-01-22 20:25:04 +00:00
|
|
|
@click.command(help="Create an Open edX user and interactively set their password")
|
|
|
|
@click.option("--superuser", is_flag=True, help="Make superuser")
|
|
|
|
@click.option("--staff", is_flag=True, help="Make staff user")
|
2019-07-11 03:55:12 +00:00
|
|
|
@click.option(
|
|
|
|
"-p",
|
|
|
|
"--password",
|
|
|
|
help="Specify password from the command line. If undefined, you will be prompted to input a password",
|
|
|
|
)
|
2019-01-22 20:25:04 +00:00
|
|
|
@click.argument("name")
|
|
|
|
@click.argument("email")
|
2019-12-12 16:05:56 +00:00
|
|
|
@click.pass_obj
|
2021-02-25 08:09:14 +00:00
|
|
|
def createuser(
|
|
|
|
context: Context, superuser: str, staff: bool, password: str, name: str, email: str
|
|
|
|
) -> None:
|
2019-12-12 16:05:56 +00:00
|
|
|
config = tutor_config.load(context.root)
|
2021-03-13 18:46:44 +00:00
|
|
|
command = jobs.create_user_command(superuser, staff, name, email, password=password)
|
2020-03-13 11:09:48 +00:00
|
|
|
# This needs to be interactive in case the user needs to type a password
|
2019-06-06 19:58:21 +00:00
|
|
|
kubectl_exec(config, "lms", command, attach=True)
|
2019-01-22 20:25:04 +00:00
|
|
|
|
2019-04-23 07:57:55 +00:00
|
|
|
|
2019-01-22 20:25:04 +00:00
|
|
|
@click.command(help="Import the demo course")
|
2019-12-12 16:05:56 +00:00
|
|
|
@click.pass_obj
|
2021-02-25 08:09:14 +00:00
|
|
|
def importdemocourse(context: Context) -> None:
|
2019-05-11 22:10:14 +00:00
|
|
|
fmt.echo_info("Importing demo course")
|
2019-12-12 16:05:56 +00:00
|
|
|
config = tutor_config.load(context.root)
|
2021-03-13 18:46:44 +00:00
|
|
|
runner = K8sJobRunner(context.root, config)
|
|
|
|
jobs.import_demo_course(runner)
|
2019-01-22 20:25:04 +00:00
|
|
|
|
2019-04-23 07:57:55 +00:00
|
|
|
|
2020-03-13 11:09:48 +00:00
|
|
|
@click.command(
|
2021-09-15 09:21:01 +00:00
|
|
|
help="Assign a theme to the LMS and the CMS. To reset to the default theme , use 'default' as the theme name."
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"-d",
|
|
|
|
"--domain",
|
|
|
|
"domains",
|
|
|
|
multiple=True,
|
|
|
|
help=(
|
|
|
|
"Limit the theme to these domain names. By default, the theme is "
|
|
|
|
"applied to the LMS and the CMS, both in development and production mode"
|
|
|
|
),
|
2020-03-13 11:09:48 +00:00
|
|
|
)
|
|
|
|
@click.argument("theme_name")
|
|
|
|
@click.pass_obj
|
2021-09-15 09:21:01 +00:00
|
|
|
def settheme(context: Context, domains: List[str], theme_name: str) -> None:
|
2020-03-13 11:09:48 +00:00
|
|
|
config = tutor_config.load(context.root)
|
2021-03-13 18:46:44 +00:00
|
|
|
runner = K8sJobRunner(context.root, config)
|
2021-09-15 09:21:01 +00:00
|
|
|
domains = domains or jobs.get_all_openedx_domains(config)
|
|
|
|
jobs.set_theme(theme_name, domains, runner)
|
2020-03-13 11:09:48 +00:00
|
|
|
|
|
|
|
|
2019-06-06 19:58:21 +00:00
|
|
|
@click.command(name="exec", help="Execute a command in a pod of the given application")
|
|
|
|
@click.argument("service")
|
|
|
|
@click.argument("command")
|
2019-12-12 16:05:56 +00:00
|
|
|
@click.pass_obj
|
2021-02-25 08:09:14 +00:00
|
|
|
def exec_command(context: Context, service: str, command: str) -> None:
|
2019-12-12 16:05:56 +00:00
|
|
|
config = tutor_config.load(context.root)
|
2019-06-06 19:58:21 +00:00
|
|
|
kubectl_exec(config, service, command, attach=True)
|
2019-01-22 20:25:04 +00:00
|
|
|
|
2019-04-23 07:57:55 +00:00
|
|
|
|
2019-05-09 07:51:06 +00:00
|
|
|
@click.command(help="View output from containers")
|
2019-06-06 19:58:21 +00:00
|
|
|
@click.option("-c", "--container", help="Print the logs of this specific container")
|
2019-05-09 07:51:06 +00:00
|
|
|
@click.option("-f", "--follow", is_flag=True, help="Follow log output")
|
|
|
|
@click.option("--tail", type=int, help="Number of lines to show from each container")
|
|
|
|
@click.argument("service")
|
2019-12-12 16:05:56 +00:00
|
|
|
@click.pass_obj
|
2021-02-25 08:09:14 +00:00
|
|
|
def logs(
|
|
|
|
context: Context, container: str, follow: bool, tail: bool, service: str
|
|
|
|
) -> None:
|
2019-12-12 16:05:56 +00:00
|
|
|
config = tutor_config.load(context.root)
|
2019-01-22 20:25:04 +00:00
|
|
|
|
2019-05-09 07:51:06 +00:00
|
|
|
command = ["logs"]
|
2019-06-05 19:01:02 +00:00
|
|
|
selectors = ["app.kubernetes.io/name=" + service] if service else []
|
|
|
|
command += resource_selector(config, *selectors)
|
2019-05-09 07:51:06 +00:00
|
|
|
|
2019-06-06 19:58:21 +00:00
|
|
|
if container:
|
|
|
|
command += ["-c", container]
|
2019-05-09 07:51:06 +00:00
|
|
|
if follow:
|
|
|
|
command += ["--follow"]
|
|
|
|
if tail is not None:
|
|
|
|
command += ["--tail", str(tail)]
|
|
|
|
|
|
|
|
utils.kubectl(*command)
|
2019-01-22 20:25:04 +00:00
|
|
|
|
2019-04-23 07:57:55 +00:00
|
|
|
|
2020-09-17 10:53:14 +00:00
|
|
|
@click.command(help="Wait for a pod to become ready")
|
|
|
|
@click.argument("name")
|
|
|
|
@click.pass_obj
|
2021-02-25 08:09:14 +00:00
|
|
|
def wait(context: Context, name: str) -> None:
|
2020-09-17 10:53:14 +00:00
|
|
|
config = tutor_config.load(context.root)
|
|
|
|
wait_for_pod_ready(config, name)
|
|
|
|
|
|
|
|
|
2019-12-24 16:22:12 +00:00
|
|
|
@click.command(help="Upgrade from a previous Open edX named release")
|
|
|
|
@click.option(
|
2021-04-13 20:14:43 +00:00
|
|
|
"--from",
|
|
|
|
"from_version",
|
|
|
|
default="koa",
|
2021-10-18 09:43:40 +00:00
|
|
|
type=click.Choice(["ironwood", "juniper", "koa", "lilac"]),
|
2019-12-24 16:22:12 +00:00
|
|
|
)
|
|
|
|
@click.pass_obj
|
2021-02-25 08:09:14 +00:00
|
|
|
def upgrade(context: Context, from_version: str) -> None:
|
2019-12-24 16:22:12 +00:00
|
|
|
config = tutor_config.load(context.root)
|
|
|
|
|
2020-09-17 10:53:14 +00:00
|
|
|
running_version = from_version
|
|
|
|
if running_version == "ironwood":
|
|
|
|
upgrade_from_ironwood(config)
|
|
|
|
running_version = "juniper"
|
|
|
|
|
|
|
|
if running_version == "juniper":
|
2021-04-13 20:14:43 +00:00
|
|
|
upgrade_from_juniper(config)
|
2020-09-17 10:53:14 +00:00
|
|
|
running_version = "koa"
|
|
|
|
|
2021-04-13 20:14:43 +00:00
|
|
|
if running_version == "koa":
|
|
|
|
upgrade_from_koa(config)
|
|
|
|
running_version = "lilac"
|
|
|
|
|
2021-10-18 09:43:40 +00:00
|
|
|
if running_version == "lilac":
|
|
|
|
# Nothing to do here
|
|
|
|
running_version = "maple"
|
|
|
|
|
2020-09-17 10:53:14 +00:00
|
|
|
|
2021-04-06 10:09:00 +00:00
|
|
|
def upgrade_from_ironwood(config: Config) -> None:
|
2020-09-17 10:53:14 +00:00
|
|
|
if not config["RUN_MONGODB"]:
|
|
|
|
fmt.echo_info(
|
|
|
|
"You are not running MongDB (RUN_MONGODB=false). It is your "
|
|
|
|
"responsibility to upgrade your MongoDb instance to v3.6. There is "
|
|
|
|
"nothing left to do to upgrade from Ironwood."
|
|
|
|
)
|
|
|
|
return
|
|
|
|
message = """Automatic release upgrade is unsupported in Kubernetes. To upgrade from Ironwood, you should upgrade
|
|
|
|
your MongoDb cluster from v3.2 to v3.6. You should run something similar to:
|
2019-12-24 16:22:12 +00:00
|
|
|
|
|
|
|
# Upgrade from v3.2 to v3.4
|
|
|
|
tutor k8s stop
|
|
|
|
tutor config save --set DOCKER_IMAGE_MONGODB=mongo:3.4.24
|
|
|
|
tutor k8s start
|
|
|
|
tutor k8s exec mongodb mongo --eval 'db.adminCommand({ setFeatureCompatibilityVersion: "3.4" })'
|
|
|
|
|
|
|
|
# Upgrade from v3.4 to v3.6
|
|
|
|
tutor k8s stop
|
|
|
|
tutor config save --set DOCKER_IMAGE_MONGODB=mongo:3.6.18
|
|
|
|
tutor k8s start
|
|
|
|
tutor k8s exec mongodb mongo --eval 'db.adminCommand({ setFeatureCompatibilityVersion: "3.6" })'
|
|
|
|
|
|
|
|
tutor config save --unset DOCKER_IMAGE_MONGODB"""
|
2020-09-17 10:53:14 +00:00
|
|
|
fmt.echo_info(message)
|
|
|
|
|
|
|
|
|
2021-04-06 10:09:00 +00:00
|
|
|
def upgrade_from_juniper(config: Config) -> None:
|
2020-09-17 10:53:14 +00:00
|
|
|
if not config["RUN_MYSQL"]:
|
|
|
|
fmt.echo_info(
|
|
|
|
"You are not running MySQL (RUN_MYSQL=false). It is your "
|
|
|
|
"responsibility to upgrade your MySQL instance to v5.7. There is "
|
|
|
|
"nothing left to do to upgrade from Juniper."
|
|
|
|
)
|
|
|
|
return
|
|
|
|
|
|
|
|
message = """Automatic release upgrade is unsupported in Kubernetes. To upgrade from Juniper, you should upgrade
|
|
|
|
your MySQL database from v5.6 to v5.7. You should run something similar to:
|
|
|
|
|
|
|
|
tutor k8s start
|
|
|
|
tutor k8s exec mysql bash -e -c "mysql_upgrade \
|
|
|
|
-u $(tutor config printvalue MYSQL_ROOT_USERNAME) \
|
|
|
|
--password='$(tutor config printvalue MYSQL_ROOT_PASSWORD)'
|
|
|
|
"""
|
|
|
|
fmt.echo_info(message)
|
2019-12-24 16:22:12 +00:00
|
|
|
|
|
|
|
|
2021-04-13 20:14:43 +00:00
|
|
|
def upgrade_from_koa(config: Config) -> None:
|
|
|
|
if not config["RUN_MONGODB"]:
|
|
|
|
fmt.echo_info(
|
|
|
|
"You are not running MongDB (RUN_MONGODB=false). It is your "
|
|
|
|
"responsibility to upgrade your MongoDb instance to v4.0. There is "
|
|
|
|
"nothing left to do to upgrade to Lilac from Koa."
|
|
|
|
)
|
|
|
|
return
|
|
|
|
message = """Automatic release upgrade is unsupported in Kubernetes. To upgrade from Koa to Lilac, you should upgrade
|
|
|
|
your MongoDb cluster from v3.6 to v4.0. You should run something similar to:
|
|
|
|
|
|
|
|
tutor k8s stop
|
2021-07-06 02:10:39 +00:00
|
|
|
tutor config save --set DOCKER_IMAGE_MONGODB=mongo:4.0.25
|
2021-04-13 20:14:43 +00:00
|
|
|
tutor k8s start
|
|
|
|
tutor k8s exec mongodb mongo --eval 'db.adminCommand({ setFeatureCompatibilityVersion: "4.0" })'
|
|
|
|
tutor config save --unset DOCKER_IMAGE_MONGODB
|
|
|
|
"""
|
|
|
|
fmt.echo_info(message)
|
|
|
|
|
|
|
|
|
2021-02-25 08:09:14 +00:00
|
|
|
def kubectl_exec(
|
2021-04-06 10:09:00 +00:00
|
|
|
config: Config, service: str, command: str, attach: bool = False
|
2021-02-25 08:09:14 +00:00
|
|
|
) -> int:
|
2019-06-06 19:58:21 +00:00
|
|
|
selector = "app.kubernetes.io/name={}".format(service)
|
Improve job running in local and k8s
Running jobs was previously done with "exec". This was because it
allowed us to avoid copying too much container specification information
from the docker-compose/deployments files to the jobs files. However,
this was limiting:
- In order to run a job, the corresponding container had to be running.
This was particularly painful in Kubernetes, where containers are
crashing as long as migrations are not correctly run.
- Containers in which we need to run jobs needed to be present in the
docker-compose/deployments files. This is unnecessary, for example when
mysql is disabled, or in the case of the certbot container.
Now, we create dedicated jobs files, both for local and k8s deployment.
This introduces a little redundancy, but not too much. Note that
dependent containers are not listed in the docker-compose.jobs.yml file,
so an actual platform is still supposed to be running when we launch the
jobs.
This also introduces a subtle change: now, jobs go through the container
entrypoint prior to running. This is probably a good thing, as it will
avoid forgetting about incorrect environment variables.
In k8s, we find ourselves interacting way too much with the kubectl
utility. Parsing output from the CLI is a pain. So we need to switch to
the native kubernetes client library.
2020-03-25 17:47:36 +00:00
|
|
|
pods = K8sClients.instance().core_api.list_namespaced_pod(
|
2021-06-03 16:12:52 +00:00
|
|
|
namespace=k8s_namespace(config), label_selector=selector
|
2019-06-06 19:58:21 +00:00
|
|
|
)
|
Improve job running in local and k8s
Running jobs was previously done with "exec". This was because it
allowed us to avoid copying too much container specification information
from the docker-compose/deployments files to the jobs files. However,
this was limiting:
- In order to run a job, the corresponding container had to be running.
This was particularly painful in Kubernetes, where containers are
crashing as long as migrations are not correctly run.
- Containers in which we need to run jobs needed to be present in the
docker-compose/deployments files. This is unnecessary, for example when
mysql is disabled, or in the case of the certbot container.
Now, we create dedicated jobs files, both for local and k8s deployment.
This introduces a little redundancy, but not too much. Note that
dependent containers are not listed in the docker-compose.jobs.yml file,
so an actual platform is still supposed to be running when we launch the
jobs.
This also introduces a subtle change: now, jobs go through the container
entrypoint prior to running. This is probably a good thing, as it will
avoid forgetting about incorrect environment variables.
In k8s, we find ourselves interacting way too much with the kubectl
utility. Parsing output from the CLI is a pain. So we need to switch to
the native kubernetes client library.
2020-03-25 17:47:36 +00:00
|
|
|
if not pods.items:
|
|
|
|
raise exceptions.TutorError(
|
|
|
|
"Could not find an active pod for the {} service".format(service)
|
|
|
|
)
|
|
|
|
pod_name = pods.items[0].metadata.name
|
2019-06-06 19:58:21 +00:00
|
|
|
|
|
|
|
# Run command
|
|
|
|
attach_opts = ["-i", "-t"] if attach else []
|
2021-02-25 08:09:14 +00:00
|
|
|
return utils.kubectl(
|
2019-06-06 19:58:21 +00:00
|
|
|
"exec",
|
|
|
|
*attach_opts,
|
|
|
|
"--namespace",
|
2021-04-06 10:09:00 +00:00
|
|
|
k8s_namespace(config),
|
Improve job running in local and k8s
Running jobs was previously done with "exec". This was because it
allowed us to avoid copying too much container specification information
from the docker-compose/deployments files to the jobs files. However,
this was limiting:
- In order to run a job, the corresponding container had to be running.
This was particularly painful in Kubernetes, where containers are
crashing as long as migrations are not correctly run.
- Containers in which we need to run jobs needed to be present in the
docker-compose/deployments files. This is unnecessary, for example when
mysql is disabled, or in the case of the certbot container.
Now, we create dedicated jobs files, both for local and k8s deployment.
This introduces a little redundancy, but not too much. Note that
dependent containers are not listed in the docker-compose.jobs.yml file,
so an actual platform is still supposed to be running when we launch the
jobs.
This also introduces a subtle change: now, jobs go through the container
entrypoint prior to running. This is probably a good thing, as it will
avoid forgetting about incorrect environment variables.
In k8s, we find ourselves interacting way too much with the kubectl
utility. Parsing output from the CLI is a pain. So we need to switch to
the native kubernetes client library.
2020-03-25 17:47:36 +00:00
|
|
|
pod_name,
|
2019-06-06 19:58:21 +00:00
|
|
|
"--",
|
|
|
|
"sh",
|
|
|
|
"-e",
|
|
|
|
"-c",
|
|
|
|
command,
|
|
|
|
)
|
|
|
|
|
2019-01-22 20:25:04 +00:00
|
|
|
|
2021-04-06 10:09:00 +00:00
|
|
|
def wait_for_pod_ready(config: Config, service: str) -> None:
|
2019-06-05 19:01:02 +00:00
|
|
|
fmt.echo_info("Waiting for a {} pod to be ready...".format(service))
|
|
|
|
utils.kubectl(
|
|
|
|
"wait",
|
|
|
|
*resource_selector(config, "app.kubernetes.io/name={}".format(service)),
|
2019-06-06 19:58:21 +00:00
|
|
|
"--for=condition=ContainersReady",
|
2019-06-05 19:01:02 +00:00
|
|
|
"--timeout=600s",
|
|
|
|
"pod",
|
|
|
|
)
|
|
|
|
|
2019-04-23 07:57:55 +00:00
|
|
|
|
2021-11-30 17:02:14 +00:00
|
|
|
def resource_selector(config: Config, *selectors: str) -> List[str]:
|
|
|
|
"""
|
|
|
|
Convenient utility to filter the resources that belong to this project.
|
|
|
|
"""
|
|
|
|
selector = ",".join(
|
|
|
|
["app.kubernetes.io/instance=openedx-" + get_typed(config, "ID", str)]
|
|
|
|
+ list(selectors)
|
|
|
|
)
|
|
|
|
return resource_namespace_selector(config) + ["--selector=" + selector]
|
|
|
|
|
|
|
|
|
|
|
|
def resource_namespace_selector(config: Config) -> List[str]:
|
|
|
|
"""
|
|
|
|
Convenient utility to filter the resources that belong to this project namespace.
|
|
|
|
"""
|
|
|
|
return ["--namespace", k8s_namespace(config)]
|
|
|
|
|
|
|
|
|
2021-04-06 10:09:00 +00:00
|
|
|
def k8s_namespace(config: Config) -> str:
|
|
|
|
return get_typed(config, "K8S_NAMESPACE", str)
|
|
|
|
|
|
|
|
|
2019-01-22 20:25:04 +00:00
|
|
|
k8s.add_command(quickstart)
|
|
|
|
k8s.add_command(start)
|
|
|
|
k8s.add_command(stop)
|
2019-07-08 05:59:14 +00:00
|
|
|
k8s.add_command(reboot)
|
2019-01-22 20:25:04 +00:00
|
|
|
k8s.add_command(delete)
|
2019-06-05 17:28:06 +00:00
|
|
|
k8s.add_command(init)
|
2021-11-30 17:02:14 +00:00
|
|
|
k8s.add_command(scale)
|
2019-01-22 20:25:04 +00:00
|
|
|
k8s.add_command(createuser)
|
|
|
|
k8s.add_command(importdemocourse)
|
2020-03-13 11:09:48 +00:00
|
|
|
k8s.add_command(settheme)
|
2019-06-06 19:58:21 +00:00
|
|
|
k8s.add_command(exec_command)
|
2019-05-09 07:51:06 +00:00
|
|
|
k8s.add_command(logs)
|
2020-09-17 10:53:14 +00:00
|
|
|
k8s.add_command(wait)
|
2019-12-24 16:22:12 +00:00
|
|
|
k8s.add_command(upgrade)
|