mirror of
https://github.com/ansible/awx.git
synced 2026-01-11 01:57:35 -03:30
Merge pull request #10204 from rooftopcellist/container-groups-registry-creds
Container groups registry creds SUMMARY Fixes #10114 In VM-based installs, the user provides image pull creds to us, then we put them in an authfile and give it to podman via --authfile. This is not so simple with ContainerGroups because we need to use the k8s API to apply a podspec to create containers in this paradigm. Currently, the EE pod gets created, but errors when pulling the custom EE in from the private repo: This work will modify the init() for the AWXReceptorJob class to create a k8s secret in the given namespace, then specify that secret name in the pod-spec as an imagePullSecret Also, the imagePullPolicy was not being enforced when running JT's in EE's using container groups, this is because the imagePullPolicy nevery got set on the pod spec. ISSUE TYPE Bugfix Pull Request COMPONENT NAME API AWX VERSION devel ADDITIONAL INFORMATION Issues that this solves: Image pull secret now gets created in the cluster namespace specified by the user for that container group. imagePullSecret name gets set on the pod spec If the pull secret already exists in the namespace, delete it, then create it. (kube_api.replace_namespaced_secret did not work for this case...) Enforce imagePullPolicy for EE's in container groups Basic error handling Reviewed-by: Alan Rominger <arominge@redhat.com> Reviewed-by: Christian Adams <rooftopcellist@gmail.com> Reviewed-by: Jeff Bradberry <None> Reviewed-by: Shane McDonald <me@shanemcd.com> Reviewed-by: Chris Meyers <None>
This commit is contained in:
commit
2b4732f07b
@ -1,10 +1,13 @@
|
||||
import collections
|
||||
import json
|
||||
import logging
|
||||
from base64 import b64encode
|
||||
from urllib import parse as urlparse
|
||||
|
||||
from django.conf import settings
|
||||
from kubernetes import client, config
|
||||
from django.utils.functional import cached_property
|
||||
from django.utils.translation import ugettext_lazy as _
|
||||
|
||||
from awx.main.utils.common import parse_yaml_or_json
|
||||
from awx.main.utils.execution_environments import get_default_pod_spec
|
||||
@ -51,6 +54,96 @@ class PodManager(object):
|
||||
|
||||
return pods
|
||||
|
||||
def create_secret(self, job):
|
||||
registry_cred = job.execution_environment.credential
|
||||
host = registry_cred.get_input('host')
|
||||
# urlparse requires '//' to be provided if scheme is not specified
|
||||
original_parsed = urlparse.urlsplit(host)
|
||||
if (not original_parsed.scheme and not host.startswith('//')) or original_parsed.hostname is None:
|
||||
host = 'https://%s' % (host)
|
||||
parsed = urlparse.urlsplit(host)
|
||||
host = parsed.hostname
|
||||
if parsed.port:
|
||||
host = "{0}:{1}".format(host, parsed.port)
|
||||
|
||||
username = registry_cred.get_input("username")
|
||||
password = registry_cred.get_input("password")
|
||||
|
||||
# Construct container auth dict and base64 encode it
|
||||
token = b64encode("{}:{}".format(username, password).encode('UTF-8')).decode()
|
||||
auth_dict = json.dumps({"auths": {host: {"auth": token}}}, indent=4)
|
||||
auth_data = b64encode(str(auth_dict).encode('UTF-8')).decode()
|
||||
|
||||
# Construct Secret object
|
||||
secret = client.V1Secret()
|
||||
secret_name = "automation-{0}-image-pull-secret-{1}".format(settings.INSTALL_UUID[:5], job.execution_environment.credential.id)
|
||||
secret.metadata = client.V1ObjectMeta(name="{}".format(secret_name))
|
||||
secret.type = "kubernetes.io/dockerconfigjson"
|
||||
secret.kind = "Secret"
|
||||
secret.data = {".dockerconfigjson": auth_data}
|
||||
|
||||
# Check if secret already exists
|
||||
replace_secret = False
|
||||
try:
|
||||
existing_secret = self.kube_api.read_namespaced_secret(namespace=self.namespace, name=secret_name)
|
||||
if existing_secret.data != secret.data:
|
||||
replace_secret = True
|
||||
secret_exists = True
|
||||
except client.rest.ApiException as e:
|
||||
if e.status == 404:
|
||||
secret_exists = False
|
||||
else:
|
||||
error_msg = _('Invalid openshift or k8s cluster credential')
|
||||
if e.status == 403:
|
||||
error_msg = _(
|
||||
'Failed to create secret for container group {} because the needed service account roles are needed. Add get, list, create and delete roles for secret resources for your cluster credential.'.format(
|
||||
job.instance_group.name
|
||||
)
|
||||
)
|
||||
full_error_msg = '{0}: {1}'.format(error_msg, str(e))
|
||||
logger.exception(full_error_msg)
|
||||
raise PermissionError(full_error_msg)
|
||||
|
||||
if replace_secret:
|
||||
try:
|
||||
# Try to replace existing secret
|
||||
self.kube_api.delete_namespaced_secret(name=secret.metadata.name, namespace=self.namespace)
|
||||
self.kube_api.create_namespaced_secret(namespace=self.namespace, body=secret)
|
||||
except client.rest.ApiException as e:
|
||||
error_msg = _('Invalid openshift or k8s cluster credential')
|
||||
if e.status == 403:
|
||||
error_msg = _(
|
||||
'Failed to delete secret for container group {} because the needed service account roles are needed. Add create and delete roles for secret resources for your cluster credential.'.format(
|
||||
job.instance_group.name
|
||||
)
|
||||
)
|
||||
full_error_msg = '{0}: {1}'.format(error_msg, str(e))
|
||||
logger.exception(full_error_msg)
|
||||
# let job continue for the case where secret was created manually and cluster cred doesn't have permission to create a secret
|
||||
except Exception as e:
|
||||
error_msg = 'Failed to create imagePullSecret for container group {}'.format(job.instance_group.name)
|
||||
logger.exception('{0}: {1}'.format(error_msg, str(e)))
|
||||
raise RuntimeError(error_msg)
|
||||
elif secret_exists and not replace_secret:
|
||||
pass
|
||||
else:
|
||||
# Create an image pull secret in namespace
|
||||
try:
|
||||
self.kube_api.create_namespaced_secret(namespace=self.namespace, body=secret)
|
||||
except client.rest.ApiException as e:
|
||||
if e.status == 403:
|
||||
error_msg = _(
|
||||
'Failed to create imagePullSecret: {}. Check that openshift or k8s credential has permission to create a secret.'.format(e.status)
|
||||
)
|
||||
logger.exception(error_msg)
|
||||
# let job continue for the case where secret was created manually and cluster cred doesn't have permission to create a secret
|
||||
except Exception:
|
||||
error_msg = 'Failed to create imagePullSecret for container group {}'.format(job.instance_group.name)
|
||||
logger.exception(error_msg)
|
||||
job.cancel(job_explanation=error_msg)
|
||||
|
||||
return secret.metadata.name
|
||||
|
||||
@property
|
||||
def namespace(self):
|
||||
return self.pod_definition['metadata']['namespace']
|
||||
|
||||
@ -842,7 +842,7 @@ class BaseTask(object):
|
||||
username = cred.get_input('username')
|
||||
password = cred.get_input('password')
|
||||
token = "{}:{}".format(username, password)
|
||||
auth_data = {'auths': {host: {'auth': b64encode(token.encode('ascii')).decode()}}}
|
||||
auth_data = {'auths': {host: {'auth': b64encode(token.encode('UTF-8')).decode('UTF-8')}}}
|
||||
authfile.write(json.dumps(auth_data, indent=4))
|
||||
params["container_options"].append(f'--authfile={authfile.name}')
|
||||
else:
|
||||
@ -3074,6 +3074,24 @@ class AWXReceptorJob:
|
||||
pod_spec['spec']['containers'][0]['image'] = ee.image
|
||||
pod_spec['spec']['containers'][0]['args'] = ['ansible-runner', 'worker', '--private-data-dir=/runner']
|
||||
|
||||
# Enforce EE Pull Policy
|
||||
pull_options = {"always": "Always", "missing": "IfNotPresent", "never": "Never"}
|
||||
if self.task and self.task.instance.execution_environment:
|
||||
if self.task.instance.execution_environment.pull:
|
||||
pod_spec['spec']['containers'][0]['imagePullPolicy'] = pull_options[self.task.instance.execution_environment.pull]
|
||||
|
||||
if self.task and self.task.instance.is_container_group_task:
|
||||
# If EE credential is passed, create an imagePullSecret
|
||||
if self.task.instance.execution_environment and self.task.instance.execution_environment.credential:
|
||||
# Create pull secret in k8s cluster based on ee cred
|
||||
from awx.main.scheduler.kubernetes import PodManager # prevent circular import
|
||||
|
||||
pm = PodManager(self.task.instance)
|
||||
secret_name = pm.create_secret(job=self.task.instance)
|
||||
|
||||
# Inject secret name into podspec
|
||||
pod_spec['spec']['imagePullSecrets'] = [{"name": secret_name}]
|
||||
|
||||
if self.task:
|
||||
pod_spec['metadata'] = deepmerge(
|
||||
pod_spec.get('metadata', {}),
|
||||
|
||||
@ -47,11 +47,10 @@ def construct_rsyslog_conf_template(settings=settings):
|
||||
return tmpl
|
||||
|
||||
if protocol.startswith('http'):
|
||||
scheme = 'https'
|
||||
# urlparse requires '//' to be provided if scheme is not specified
|
||||
original_parsed = urlparse.urlsplit(host)
|
||||
if (not original_parsed.scheme and not host.startswith('//')) or original_parsed.hostname is None:
|
||||
host = '%s://%s' % (scheme, host) if scheme else '//%s' % host
|
||||
host = 'https://%s' % (host)
|
||||
parsed = urlparse.urlsplit(host)
|
||||
|
||||
host = escape_quotes(parsed.hostname)
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user