Delete some old container group v1 code

This commit is contained in:
Shane McDonald 2021-01-13 19:46:48 -05:00
parent 1d9f01a201
commit d37cb64aaf
2 changed files with 5 additions and 68 deletions

View File

@ -32,7 +32,7 @@ def set_pythonpath(venv_libdir, env):
class IsolatedManager(object):
def __init__(self, event_handler, canceled_callback=None, check_callback=None, pod_manager=None):
def __init__(self, event_handler, canceled_callback=None, check_callback=None):
"""
:param event_handler: a callable used to persist event data from isolated nodes
:param canceled_callback: a callable - which returns `True` or `False`
@ -45,28 +45,12 @@ class IsolatedManager(object):
self.started_at = None
self.captured_command_artifact = False
self.instance = None
self.pod_manager = pod_manager
def build_inventory(self, hosts):
if self.instance and self.instance.is_container_group_task:
inventory = {'all': {'hosts': {}}}
fd, path = tempfile.mkstemp(
prefix='.kubeconfig', dir=self.private_data_dir
)
with open(path, 'wb') as temp:
temp.write(yaml.dump(self.pod_manager.kube_config).encode())
temp.flush()
os.chmod(temp.name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
for host in hosts:
inventory['all']['hosts'][host] = {
"ansible_connection": "kubectl",
"ansible_kubectl_config": path,
}
else:
inventory = '\n'.join([
'{} ansible_ssh_user={}'.format(host, settings.AWX_ISOLATED_USERNAME)
for host in hosts
])
inventory = '\n'.join([
'{} ansible_ssh_user={}'.format(host, settings.AWX_ISOLATED_USERNAME)
for host in hosts
])
return inventory

View File

@ -1180,12 +1180,6 @@ class BaseTask(object):
if os.path.isdir(job_profiling_dir):
shutil.copytree(job_profiling_dir, os.path.join(awx_profiling_dir, str(instance.pk)))
if instance.is_containerized:
from awx.main.scheduler.kubernetes import PodManager # prevent circular import
pm = PodManager(instance)
logger.debug(f"Deleting pod {pm.pod_name}")
pm.delete()
def event_handler(self, event_data):
#
@ -1325,16 +1319,6 @@ class BaseTask(object):
Run the job/task and capture its output.
'''
self.instance = self.model.objects.get(pk=pk)
containerized = self.instance.is_containerized
pod_manager = None
if containerized:
# Here we are trying to launch a pod before transitioning the job into a running
# state. For some scenarios (like waiting for resources to become available) we do this
# rather than marking the job as error or failed. This is not always desirable. Cases
# such as invalid authentication should surface as an error.
pod_manager = self.deploy_container_group_pod(self.instance)
if not pod_manager:
return
# self.instance because of the update_model pattern and when it's used in callback handlers
self.instance = self.update_model(pk, status='running',
@ -1517,37 +1501,6 @@ class BaseTask(object):
raise AwxTaskError.TaskError(self.instance, rc)
def deploy_container_group_pod(self, task):
from awx.main.scheduler.kubernetes import PodManager # Avoid circular import
pod_manager = PodManager(self.instance)
try:
log_name = task.log_format
logger.debug(f"Launching pod for {log_name}.")
pod_manager.deploy()
except (ApiException, Exception) as exc:
if isinstance(exc, ApiException) and exc.status == 403:
try:
if 'exceeded quota' in json.loads(exc.body)['message']:
# If the k8s cluster does not have capacity, we move the
# job back into pending and wait until the next run of
# the task manager. This does not exactly play well with
# our current instance group precendence logic, since it
# will just sit here forever if kubernetes returns this
# error.
logger.warn(exc.body)
logger.warn(f"Could not launch pod for {log_name}. Exceeded quota.")
self.update_model(task.pk, status='pending')
return
except Exception:
logger.exception(f"Unable to handle response from Kubernetes API for {log_name}.")
logger.exception(f"Error when launching pod for {log_name}")
self.update_model(task.pk, status='error', result_traceback=traceback.format_exc())
return
self.update_model(task.pk, execution_node=pod_manager.pod_name)
return pod_manager