mirror of
https://github.com/ansible-collections/community.general.git
synced 2024-09-14 20:13:21 +02:00
* nomad_job module
* Delete nomad_job.py
* new module nomad_job
* fix symlink
* disable test with centos6 , not supported
* fix centos unsupported
* fix
* requested changes doc
* disable freebsd ci
* requested change docs + check_mode
* lint
* fix syntax
* update docs
* doc fix
Co-authored-by: Felix Fontein <felix@fontein.de>
* Update nomad_job.py
fix docs + ssl true default
* Update nomad_job.yml
disable ssl ci
* nomad_job_info
* Update nomad_job_info.py
fix token nomad job info
* Update nomad_job.py
idempotence + check_mode plan result
* Update nomad_job.py
fail if no id with json content
* Update nomad_job.yml
ci idempotence + check_mode , nomad_job and nomad_job_info
* Update nomad_job.yml
fix ci
* Update main.yml
add kill nomad ci
* Update main.yml
always kill
* fix check mode delete job
* ci with delete and check_mode
* lint
* force start in first deploy
* 12.4 nomad
* fix version nomad
* fix ci assert
* fix ci
* fix ci
* lint
* fix version job id None, import os unused
* lint job_info
* Update aliases
* docs frag + info refacto
* lint
lint
* ci
* jmespath
* fix ci
Co-authored-by: FERREIRA Christophe <christophe.ferreira@cnaf.fr>
Co-authored-by: Felix Fontein <felix@fontein.de>
(cherry picked from commit b2e075e6d3
)
Co-authored-by: chris93111 <christopheferreira@ymail.com>
This commit is contained in:
parent
e3e3682eb3
commit
1b579dfdc2
10 changed files with 1255 additions and 0 deletions
51
plugins/doc_fragments/nomad.py
Normal file
51
plugins/doc_fragments/nomad.py
Normal file
|
@ -0,0 +1,51 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2020 FERREIRA Christophe <christophe.ferreira@cnaf.fr>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
# Standard files documentation fragment
|
||||
DOCUMENTATION = r'''
|
||||
options:
|
||||
host:
|
||||
description:
|
||||
- FQDN of Nomad server.
|
||||
required: true
|
||||
type: str
|
||||
use_ssl:
|
||||
description:
|
||||
- Use TLS/SSL connection.
|
||||
type: bool
|
||||
default: true
|
||||
timeout:
|
||||
description:
|
||||
- Timeout (in seconds) for the request to Nomad.
|
||||
type: int
|
||||
default: 5
|
||||
validate_certs:
|
||||
description:
|
||||
- Enable TLS/SSL certificate validation.
|
||||
type: bool
|
||||
default: true
|
||||
client_cert:
|
||||
description:
|
||||
- Path of certificate for TLS/SSL.
|
||||
type: path
|
||||
client_key:
|
||||
description:
|
||||
- Path of certificate's private key for TLS/SSL.
|
||||
type: path
|
||||
namespace:
|
||||
description:
|
||||
- Namespace for Nomad.
|
||||
type: str
|
||||
token:
|
||||
description:
|
||||
- ACL token for authentification.
|
||||
type: str
|
||||
'''
|
255
plugins/modules/clustering/nomad/nomad_job.py
Normal file
255
plugins/modules/clustering/nomad/nomad_job.py
Normal file
|
@ -0,0 +1,255 @@
|
|||
#!/usr/bin/python
|
||||
# coding: utf-8 -*-
|
||||
|
||||
# (c) 2020, FERREIRA Christophe <christophe.ferreira@cnaf.fr>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: nomad_job
|
||||
author: FERREIRA Christophe (@chris93111)
|
||||
version_added: "1.3.0"
|
||||
short_description: Launch a Nomad Job
|
||||
description:
|
||||
- Launch a Nomad job.
|
||||
- Stop a Nomad job.
|
||||
- Force start a Nomad job
|
||||
requirements:
|
||||
- python-nomad
|
||||
extends_documentation_fragment:
|
||||
- community.general.nomad
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of job for delete, stop and start job without source.
|
||||
- Name of job for delete, stop and start job without source.
|
||||
- Either this or I(content) must be specified.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Deploy or remove job.
|
||||
choices: ["present", "absent"]
|
||||
required: true
|
||||
type: str
|
||||
force_start:
|
||||
description:
|
||||
- Force job to started.
|
||||
type: bool
|
||||
default: false
|
||||
content:
|
||||
description:
|
||||
- Content of Nomad job.
|
||||
- Either this or I(name) must be specified.
|
||||
type: str
|
||||
content_format:
|
||||
description:
|
||||
- Type of content of Nomad job.
|
||||
choices: ["hcl", "json"]
|
||||
default: hcl
|
||||
type: str
|
||||
notes:
|
||||
- C(check_mode) is supported.
|
||||
seealso:
|
||||
- name: Nomad jobs documentation
|
||||
description: Complete documentation for Nomad API jobs.
|
||||
link: https://www.nomadproject.io/api-docs/jobs/
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create job
|
||||
community.general.nomad_job:
|
||||
host: localhost
|
||||
state: present
|
||||
content: "{{ lookup('ansible.builtin.file', 'job.hcl') }}"
|
||||
timeout: 120
|
||||
|
||||
- name: Stop job
|
||||
community.general.nomad_job:
|
||||
host: localhost
|
||||
state: absent
|
||||
name: api
|
||||
|
||||
- name: Force job to start
|
||||
community.general.nomad_job:
|
||||
host: localhost
|
||||
state: present
|
||||
name: api
|
||||
timeout: 120
|
||||
force_start: true
|
||||
'''
|
||||
|
||||
import json
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
import_nomad = None
|
||||
try:
|
||||
import nomad
|
||||
import_nomad = True
|
||||
except ImportError:
|
||||
import_nomad = False
|
||||
|
||||
|
||||
def run():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
host=dict(required=True, type='str'),
|
||||
state=dict(required=True, choices=['present', 'absent']),
|
||||
use_ssl=dict(type='bool', default=True),
|
||||
timeout=dict(type='int', default=5),
|
||||
validate_certs=dict(type='bool', default=True),
|
||||
client_cert=dict(type='path', default=None),
|
||||
client_key=dict(type='path', default=None),
|
||||
namespace=dict(type='str', default=None),
|
||||
name=dict(type='str', default=None),
|
||||
content_format=dict(choices=['hcl', 'json'], default='hcl'),
|
||||
content=dict(type='str', default=None),
|
||||
force_start=dict(type='bool', default=False),
|
||||
token=dict(type='str', default=None, no_log=True)
|
||||
),
|
||||
supports_check_mode=True,
|
||||
mutually_exclusive=[
|
||||
["name", "content"]
|
||||
],
|
||||
required_one_of=[
|
||||
['name', 'content']
|
||||
]
|
||||
)
|
||||
|
||||
if not import_nomad:
|
||||
module.fail_json(msg=missing_required_lib("python-nomad"))
|
||||
|
||||
certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key'))
|
||||
|
||||
nomad_client = nomad.Nomad(
|
||||
host=module.params.get('host'),
|
||||
secure=module.params.get('use_ssl'),
|
||||
timeout=module.params.get('timeout'),
|
||||
verify=module.params.get('validate_certs'),
|
||||
cert=certificate_ssl,
|
||||
namespace=module.params.get('namespace'),
|
||||
token=module.params.get('token')
|
||||
)
|
||||
|
||||
if module.params.get('state') == "present":
|
||||
|
||||
if module.params.get('name') and not module.params.get('force_start'):
|
||||
module.fail_json(msg='For start job with name, force_start is needed')
|
||||
|
||||
changed = False
|
||||
if module.params.get('content'):
|
||||
|
||||
if module.params.get('content_format') == 'json':
|
||||
|
||||
job_json = module.params.get('content')
|
||||
try:
|
||||
job_json = json.loads(job_json)
|
||||
except ValueError as e:
|
||||
module.fail_json(msg=to_native(e))
|
||||
job = dict()
|
||||
job['job'] = job_json
|
||||
try:
|
||||
job_id = job_json.get('ID')
|
||||
if job_id is None:
|
||||
module.fail_json(msg="Cannot retrieve job with ID None")
|
||||
plan = nomad_client.job.plan_job(job_id, job, diff=True)
|
||||
if not plan['Diff'].get('Type') == "None":
|
||||
changed = True
|
||||
if not module.check_mode:
|
||||
result = nomad_client.jobs.register_job(job)
|
||||
else:
|
||||
result = plan
|
||||
else:
|
||||
result = plan
|
||||
except Exception as e:
|
||||
module.fail_json(msg=to_native(e))
|
||||
|
||||
if module.params.get('content_format') == 'hcl':
|
||||
|
||||
try:
|
||||
job_hcl = module.params.get('content')
|
||||
job_json = nomad_client.jobs.parse(job_hcl)
|
||||
job = dict()
|
||||
job['job'] = job_json
|
||||
except nomad.api.exceptions.BadRequestNomadException as err:
|
||||
msg = str(err.nomad_resp.reason) + " " + str(err.nomad_resp.text)
|
||||
module.fail_json(msg=to_native(msg))
|
||||
try:
|
||||
job_id = job_json.get('ID')
|
||||
plan = nomad_client.job.plan_job(job_id, job, diff=True)
|
||||
if not plan['Diff'].get('Type') == "None":
|
||||
changed = True
|
||||
if not module.check_mode:
|
||||
result = nomad_client.jobs.register_job(job)
|
||||
else:
|
||||
result = plan
|
||||
else:
|
||||
result = plan
|
||||
except Exception as e:
|
||||
module.fail_json(msg=to_native(e))
|
||||
|
||||
if module.params.get('force_start'):
|
||||
|
||||
try:
|
||||
job = dict()
|
||||
if module.params.get('name'):
|
||||
job_name = module.params.get('name')
|
||||
else:
|
||||
job_name = job_json['Name']
|
||||
job_json = nomad_client.job.get_job(job_name)
|
||||
if job_json['Status'] == 'running':
|
||||
result = job_json
|
||||
else:
|
||||
job_json['Status'] = 'running'
|
||||
job_json['Stop'] = False
|
||||
job['job'] = job_json
|
||||
if not module.check_mode:
|
||||
result = nomad_client.jobs.register_job(job)
|
||||
else:
|
||||
result = nomad_client.validate.validate_job(job)
|
||||
if not result.status_code == 200:
|
||||
module.fail_json(msg=to_native(result.text))
|
||||
result = json.loads(result.text)
|
||||
changed = True
|
||||
except Exception as e:
|
||||
module.fail_json(msg=to_native(e))
|
||||
|
||||
if module.params.get('state') == "absent":
|
||||
|
||||
try:
|
||||
if not module.params.get('name') is None:
|
||||
job_name = module.params.get('name')
|
||||
else:
|
||||
if module.params.get('content_format') == 'hcl':
|
||||
job_json = nomad_client.jobs.parse(module.params.get('content'))
|
||||
job_name = job_json['Name']
|
||||
if module.params.get('content_format') == 'json':
|
||||
job_json = module.params.get('content')
|
||||
job_name = job_json['Name']
|
||||
job = nomad_client.job.get_job(job_name)
|
||||
if job['Status'] == 'dead':
|
||||
changed = False
|
||||
result = job
|
||||
else:
|
||||
if not module.check_mode:
|
||||
result = nomad_client.job.deregister_job(job_name)
|
||||
else:
|
||||
result = job
|
||||
changed = True
|
||||
except Exception as e:
|
||||
module.fail_json(msg=to_native(e))
|
||||
|
||||
module.exit_json(changed=changed, result=result)
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
run()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
345
plugins/modules/clustering/nomad/nomad_job_info.py
Normal file
345
plugins/modules/clustering/nomad/nomad_job_info.py
Normal file
|
@ -0,0 +1,345 @@
|
|||
#!/usr/bin/python
|
||||
# coding: utf-8 -*-
|
||||
|
||||
# (c) 2020, FERREIRA Christophe <christophe.ferreira@cnaf.fr>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: nomad_job_info
|
||||
author: FERREIRA Christophe (@chris93111)
|
||||
version_added: "1.3.0"
|
||||
short_description: Get Nomad Jobs info
|
||||
description:
|
||||
- Get info for one Nomad job.
|
||||
- List Nomad jobs.
|
||||
requirements:
|
||||
- python-nomad
|
||||
extends_documentation_fragment:
|
||||
- community.general.nomad
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of job for Get info.
|
||||
- If not specified, lists all jobs.
|
||||
type: str
|
||||
notes:
|
||||
- C(check_mode) is supported.
|
||||
seealso:
|
||||
- name: Nomad jobs documentation
|
||||
description: Complete documentation for Nomad API jobs.
|
||||
link: https://www.nomadproject.io/api-docs/jobs/
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get info for job awx
|
||||
community.general.nomad_job:
|
||||
host: localhost
|
||||
name: awx
|
||||
register: result
|
||||
|
||||
- name: List Nomad jobs
|
||||
community.general.nomad_job:
|
||||
host: localhost
|
||||
register: result
|
||||
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
result:
|
||||
description: List with dictionary contains jobs info
|
||||
returned: success
|
||||
type: list
|
||||
sample: [
|
||||
{
|
||||
"Affinities": null,
|
||||
"AllAtOnce": false,
|
||||
"Constraints": null,
|
||||
"ConsulToken": "",
|
||||
"CreateIndex": 13,
|
||||
"Datacenters": [
|
||||
"dc1"
|
||||
],
|
||||
"Dispatched": false,
|
||||
"ID": "example",
|
||||
"JobModifyIndex": 13,
|
||||
"Meta": null,
|
||||
"ModifyIndex": 13,
|
||||
"Multiregion": null,
|
||||
"Name": "example",
|
||||
"Namespace": "default",
|
||||
"NomadTokenID": "",
|
||||
"ParameterizedJob": null,
|
||||
"ParentID": "",
|
||||
"Payload": null,
|
||||
"Periodic": null,
|
||||
"Priority": 50,
|
||||
"Region": "global",
|
||||
"Spreads": null,
|
||||
"Stable": false,
|
||||
"Status": "pending",
|
||||
"StatusDescription": "",
|
||||
"Stop": false,
|
||||
"SubmitTime": 1602244370615307000,
|
||||
"TaskGroups": [
|
||||
{
|
||||
"Affinities": null,
|
||||
"Constraints": null,
|
||||
"Count": 1,
|
||||
"EphemeralDisk": {
|
||||
"Migrate": false,
|
||||
"SizeMB": 300,
|
||||
"Sticky": false
|
||||
},
|
||||
"Meta": null,
|
||||
"Migrate": {
|
||||
"HealthCheck": "checks",
|
||||
"HealthyDeadline": 300000000000,
|
||||
"MaxParallel": 1,
|
||||
"MinHealthyTime": 10000000000
|
||||
},
|
||||
"Name": "cache",
|
||||
"Networks": null,
|
||||
"ReschedulePolicy": {
|
||||
"Attempts": 0,
|
||||
"Delay": 30000000000,
|
||||
"DelayFunction": "exponential",
|
||||
"Interval": 0,
|
||||
"MaxDelay": 3600000000000,
|
||||
"Unlimited": true
|
||||
},
|
||||
"RestartPolicy": {
|
||||
"Attempts": 3,
|
||||
"Delay": 15000000000,
|
||||
"Interval": 1800000000000,
|
||||
"Mode": "fail"
|
||||
},
|
||||
"Scaling": null,
|
||||
"Services": null,
|
||||
"ShutdownDelay": null,
|
||||
"Spreads": null,
|
||||
"StopAfterClientDisconnect": null,
|
||||
"Tasks": [
|
||||
{
|
||||
"Affinities": null,
|
||||
"Artifacts": null,
|
||||
"CSIPluginConfig": null,
|
||||
"Config": {
|
||||
"image": "redis:3.2",
|
||||
"port_map": [
|
||||
{
|
||||
"db": 6379.0
|
||||
}
|
||||
]
|
||||
},
|
||||
"Constraints": null,
|
||||
"DispatchPayload": null,
|
||||
"Driver": "docker",
|
||||
"Env": null,
|
||||
"KillSignal": "",
|
||||
"KillTimeout": 5000000000,
|
||||
"Kind": "",
|
||||
"Leader": false,
|
||||
"Lifecycle": null,
|
||||
"LogConfig": {
|
||||
"MaxFileSizeMB": 10,
|
||||
"MaxFiles": 10
|
||||
},
|
||||
"Meta": null,
|
||||
"Name": "redis",
|
||||
"Resources": {
|
||||
"CPU": 500,
|
||||
"Devices": null,
|
||||
"DiskMB": 0,
|
||||
"IOPS": 0,
|
||||
"MemoryMB": 256,
|
||||
"Networks": [
|
||||
{
|
||||
"CIDR": "",
|
||||
"DNS": null,
|
||||
"Device": "",
|
||||
"DynamicPorts": [
|
||||
{
|
||||
"HostNetwork": "default",
|
||||
"Label": "db",
|
||||
"To": 0,
|
||||
"Value": 0
|
||||
}
|
||||
],
|
||||
"IP": "",
|
||||
"MBits": 10,
|
||||
"Mode": "",
|
||||
"ReservedPorts": null
|
||||
}
|
||||
]
|
||||
},
|
||||
"RestartPolicy": {
|
||||
"Attempts": 3,
|
||||
"Delay": 15000000000,
|
||||
"Interval": 1800000000000,
|
||||
"Mode": "fail"
|
||||
},
|
||||
"Services": [
|
||||
{
|
||||
"AddressMode": "auto",
|
||||
"CanaryMeta": null,
|
||||
"CanaryTags": null,
|
||||
"Checks": [
|
||||
{
|
||||
"AddressMode": "",
|
||||
"Args": null,
|
||||
"CheckRestart": null,
|
||||
"Command": "",
|
||||
"Expose": false,
|
||||
"FailuresBeforeCritical": 0,
|
||||
"GRPCService": "",
|
||||
"GRPCUseTLS": false,
|
||||
"Header": null,
|
||||
"InitialStatus": "",
|
||||
"Interval": 10000000000,
|
||||
"Method": "",
|
||||
"Name": "alive",
|
||||
"Path": "",
|
||||
"PortLabel": "",
|
||||
"Protocol": "",
|
||||
"SuccessBeforePassing": 0,
|
||||
"TLSSkipVerify": false,
|
||||
"TaskName": "",
|
||||
"Timeout": 2000000000,
|
||||
"Type": "tcp"
|
||||
}
|
||||
],
|
||||
"Connect": null,
|
||||
"EnableTagOverride": false,
|
||||
"Meta": null,
|
||||
"Name": "redis-cache",
|
||||
"PortLabel": "db",
|
||||
"Tags": [
|
||||
"global",
|
||||
"cache"
|
||||
],
|
||||
"TaskName": ""
|
||||
}
|
||||
],
|
||||
"ShutdownDelay": 0,
|
||||
"Templates": null,
|
||||
"User": "",
|
||||
"Vault": null,
|
||||
"VolumeMounts": null
|
||||
}
|
||||
],
|
||||
"Update": {
|
||||
"AutoPromote": false,
|
||||
"AutoRevert": false,
|
||||
"Canary": 0,
|
||||
"HealthCheck": "checks",
|
||||
"HealthyDeadline": 180000000000,
|
||||
"MaxParallel": 1,
|
||||
"MinHealthyTime": 10000000000,
|
||||
"ProgressDeadline": 600000000000,
|
||||
"Stagger": 30000000000
|
||||
},
|
||||
"Volumes": null
|
||||
}
|
||||
],
|
||||
"Type": "service",
|
||||
"Update": {
|
||||
"AutoPromote": false,
|
||||
"AutoRevert": false,
|
||||
"Canary": 0,
|
||||
"HealthCheck": "",
|
||||
"HealthyDeadline": 0,
|
||||
"MaxParallel": 1,
|
||||
"MinHealthyTime": 0,
|
||||
"ProgressDeadline": 0,
|
||||
"Stagger": 30000000000
|
||||
},
|
||||
"VaultNamespace": "",
|
||||
"VaultToken": "",
|
||||
"Version": 0
|
||||
}
|
||||
]
|
||||
|
||||
'''
|
||||
|
||||
|
||||
import os
|
||||
import json
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
import_nomad = None
|
||||
try:
|
||||
import nomad
|
||||
import_nomad = True
|
||||
except ImportError:
|
||||
import_nomad = False
|
||||
|
||||
|
||||
def run():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
host=dict(required=True, type='str'),
|
||||
use_ssl=dict(type='bool', default=True),
|
||||
timeout=dict(type='int', default=5),
|
||||
validate_certs=dict(type='bool', default=True),
|
||||
client_cert=dict(type='path', default=None),
|
||||
client_key=dict(type='path', default=None),
|
||||
namespace=dict(type='str', default=None),
|
||||
name=dict(type='str', default=None),
|
||||
token=dict(type='str', default=None, no_log=True)
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
if not import_nomad:
|
||||
module.fail_json(msg=missing_required_lib("python-nomad"))
|
||||
|
||||
certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key'))
|
||||
|
||||
nomad_client = nomad.Nomad(
|
||||
host=module.params.get('host'),
|
||||
secure=module.params.get('use_ssl'),
|
||||
timeout=module.params.get('timeout'),
|
||||
verify=module.params.get('validate_certs'),
|
||||
cert=certificate_ssl,
|
||||
namespace=module.params.get('namespace'),
|
||||
token=module.params.get('token')
|
||||
)
|
||||
|
||||
changed = False
|
||||
nomad_jobs = list()
|
||||
try:
|
||||
job_list = nomad_client.jobs.get_jobs()
|
||||
for job in job_list:
|
||||
nomad_jobs.append(nomad_client.job.get_job(job.get('ID')))
|
||||
result = nomad_jobs
|
||||
except Exception as e:
|
||||
module.fail_json(msg=to_native(e))
|
||||
|
||||
if module.params.get('name'):
|
||||
filter = list()
|
||||
try:
|
||||
for job in result:
|
||||
if job.get('ID') == module.params.get('name'):
|
||||
filter.append(job)
|
||||
result = filter
|
||||
if not filter:
|
||||
module.fail_json(msg="Couldn't find Job with id " + str(module.params.get('name')))
|
||||
except Exception as e:
|
||||
module.fail_json(msg=to_native(e))
|
||||
|
||||
module.exit_json(changed=changed, result=result)
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
run()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
1
plugins/modules/nomad_job.py
Symbolic link
1
plugins/modules/nomad_job.py
Symbolic link
|
@ -0,0 +1 @@
|
|||
clustering/nomad/nomad_job.py
|
1
plugins/modules/nomad_job_info.py
Symbolic link
1
plugins/modules/nomad_job_info.py
Symbolic link
|
@ -0,0 +1 @@
|
|||
clustering/nomad/nomad_job_info.py
|
6
tests/integration/targets/nomad/aliases
Normal file
6
tests/integration/targets/nomad/aliases
Normal file
|
@ -0,0 +1,6 @@
|
|||
shippable/posix/group2
|
||||
nomad_job_info
|
||||
destructive
|
||||
skip/aix
|
||||
skip/centos6
|
||||
skip/freebsd
|
396
tests/integration/targets/nomad/files/job.hcl
Normal file
396
tests/integration/targets/nomad/files/job.hcl
Normal file
|
@ -0,0 +1,396 @@
|
|||
# There can only be a single job definition per file. This job is named
|
||||
# "example" so it will create a job with the ID and Name "example".
|
||||
|
||||
# The "job" stanza is the top-most configuration option in the job
|
||||
# specification. A job is a declarative specification of tasks that Nomad
|
||||
# should run. Jobs have a globally unique name, one or many task groups, which
|
||||
# are themselves collections of one or many tasks.
|
||||
#
|
||||
# For more information and examples on the "job" stanza, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/job.html
|
||||
#
|
||||
job "example" {
|
||||
# The "region" parameter specifies the region in which to execute the job.
|
||||
# If omitted, this inherits the default region name of "global".
|
||||
# region = "global"
|
||||
#
|
||||
# The "datacenters" parameter specifies the list of datacenters which should
|
||||
# be considered when placing this task. This must be provided.
|
||||
datacenters = ["dc1"]
|
||||
|
||||
# The "type" parameter controls the type of job, which impacts the scheduler's
|
||||
# decision on placement. This configuration is optional and defaults to
|
||||
# "service". For a full list of job types and their differences, please see
|
||||
# the online documentation.
|
||||
#
|
||||
# For more information, please see the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/jobspec/schedulers.html
|
||||
#
|
||||
type = "service"
|
||||
|
||||
|
||||
# The "constraint" stanza defines additional constraints for placing this job,
|
||||
# in addition to any resource or driver constraints. This stanza may be placed
|
||||
# at the "job", "group", or "task" level, and supports variable interpolation.
|
||||
#
|
||||
# For more information and examples on the "constraint" stanza, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/constraint.html
|
||||
#
|
||||
# constraint {
|
||||
# attribute = "${attr.kernel.name}"
|
||||
# value = "linux"
|
||||
# }
|
||||
|
||||
# The "update" stanza specifies the update strategy of task groups. The update
|
||||
# strategy is used to control things like rolling upgrades, canaries, and
|
||||
# blue/green deployments. If omitted, no update strategy is enforced. The
|
||||
# "update" stanza may be placed at the job or task group. When placed at the
|
||||
# job, it applies to all groups within the job. When placed at both the job and
|
||||
# group level, the stanzas are merged with the group's taking precedence.
|
||||
#
|
||||
# For more information and examples on the "update" stanza, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/update.html
|
||||
#
|
||||
update {
|
||||
# The "max_parallel" parameter specifies the maximum number of updates to
|
||||
# perform in parallel. In this case, this specifies to update a single task
|
||||
# at a time.
|
||||
max_parallel = 1
|
||||
|
||||
# The "min_healthy_time" parameter specifies the minimum time the allocation
|
||||
# must be in the healthy state before it is marked as healthy and unblocks
|
||||
# further allocations from being updated.
|
||||
min_healthy_time = "10s"
|
||||
|
||||
# The "healthy_deadline" parameter specifies the deadline in which the
|
||||
# allocation must be marked as healthy after which the allocation is
|
||||
# automatically transitioned to unhealthy. Transitioning to unhealthy will
|
||||
# fail the deployment and potentially roll back the job if "auto_revert" is
|
||||
# set to true.
|
||||
healthy_deadline = "3m"
|
||||
|
||||
# The "progress_deadline" parameter specifies the deadline in which an
|
||||
# allocation must be marked as healthy. The deadline begins when the first
|
||||
# allocation for the deployment is created and is reset whenever an allocation
|
||||
# as part of the deployment transitions to a healthy state. If no allocation
|
||||
# transitions to the healthy state before the progress deadline, the
|
||||
# deployment is marked as failed.
|
||||
progress_deadline = "10m"
|
||||
|
||||
# The "auto_revert" parameter specifies if the job should auto-revert to the
|
||||
# last stable job on deployment failure. A job is marked as stable if all the
|
||||
# allocations as part of its deployment were marked healthy.
|
||||
auto_revert = false
|
||||
|
||||
# The "canary" parameter specifies that changes to the job that would result
|
||||
# in destructive updates should create the specified number of canaries
|
||||
# without stopping any previous allocations. Once the operator determines the
|
||||
# canaries are healthy, they can be promoted which unblocks a rolling update
|
||||
# of the remaining allocations at a rate of "max_parallel".
|
||||
#
|
||||
# Further, setting "canary" equal to the count of the task group allows
|
||||
# blue/green deployments. When the job is updated, a full set of the new
|
||||
# version is deployed and upon promotion the old version is stopped.
|
||||
canary = 0
|
||||
}
|
||||
# The migrate stanza specifies the group's strategy for migrating off of
|
||||
# draining nodes. If omitted, a default migration strategy is applied.
|
||||
#
|
||||
# For more information on the "migrate" stanza, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/migrate.html
|
||||
#
|
||||
migrate {
|
||||
# Specifies the number of task groups that can be migrated at the same
|
||||
# time. This number must be less than the total count for the group as
|
||||
# (count - max_parallel) will be left running during migrations.
|
||||
max_parallel = 1
|
||||
|
||||
# Specifies the mechanism in which allocations health is determined. The
|
||||
# potential values are "checks" or "task_states".
|
||||
health_check = "checks"
|
||||
|
||||
# Specifies the minimum time the allocation must be in the healthy state
|
||||
# before it is marked as healthy and unblocks further allocations from being
|
||||
# migrated. This is specified using a label suffix like "30s" or "15m".
|
||||
min_healthy_time = "10s"
|
||||
|
||||
# Specifies the deadline in which the allocation must be marked as healthy
|
||||
# after which the allocation is automatically transitioned to unhealthy. This
|
||||
# is specified using a label suffix like "2m" or "1h".
|
||||
healthy_deadline = "5m"
|
||||
}
|
||||
# The "group" stanza defines a series of tasks that should be co-located on
|
||||
# the same Nomad client. Any task within a group will be placed on the same
|
||||
# client.
|
||||
#
|
||||
# For more information and examples on the "group" stanza, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/group.html
|
||||
#
|
||||
group "cache" {
|
||||
# The "count" parameter specifies the number of the task groups that should
|
||||
# be running under this group. This value must be non-negative and defaults
|
||||
# to 1.
|
||||
count = 1
|
||||
|
||||
# The "restart" stanza configures a group's behavior on task failure. If
|
||||
# left unspecified, a default restart policy is used based on the job type.
|
||||
#
|
||||
# For more information and examples on the "restart" stanza, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/restart.html
|
||||
#
|
||||
restart {
|
||||
# The number of attempts to run the job within the specified interval.
|
||||
attempts = 2
|
||||
interval = "30m"
|
||||
|
||||
# The "delay" parameter specifies the duration to wait before restarting
|
||||
# a task after it has failed.
|
||||
delay = "15s"
|
||||
|
||||
# The "mode" parameter controls what happens when a task has restarted
|
||||
# "attempts" times within the interval. "delay" mode delays the next
|
||||
# restart until the next interval. "fail" mode does not restart the task
|
||||
# if "attempts" has been hit within the interval.
|
||||
mode = "fail"
|
||||
}
|
||||
|
||||
# The "ephemeral_disk" stanza instructs Nomad to utilize an ephemeral disk
|
||||
# instead of a hard disk requirement. Clients using this stanza should
|
||||
# not specify disk requirements in the resources stanza of the task. All
|
||||
# tasks in this group will share the same ephemeral disk.
|
||||
#
|
||||
# For more information and examples on the "ephemeral_disk" stanza, please
|
||||
# see the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/ephemeral_disk.html
|
||||
#
|
||||
ephemeral_disk {
|
||||
# When sticky is true and the task group is updated, the scheduler
|
||||
# will prefer to place the updated allocation on the same node and
|
||||
# will migrate the data. This is useful for tasks that store data
|
||||
# that should persist across allocation updates.
|
||||
# sticky = true
|
||||
#
|
||||
# Setting migrate to true results in the allocation directory of a
|
||||
# sticky allocation directory to be migrated.
|
||||
# migrate = true
|
||||
#
|
||||
# The "size" parameter specifies the size in MB of shared ephemeral disk
|
||||
# between tasks in the group.
|
||||
size = 300
|
||||
}
|
||||
|
||||
# The "affinity" stanza enables operators to express placement preferences
|
||||
# based on node attributes or metadata.
|
||||
#
|
||||
# For more information and examples on the "affinity" stanza, please
|
||||
# see the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/affinity.html
|
||||
#
|
||||
# affinity {
|
||||
# attribute specifies the name of a node attribute or metadata
|
||||
# attribute = "${node.datacenter}"
|
||||
|
||||
|
||||
# value specifies the desired attribute value. In this example Nomad
|
||||
# will prefer placement in the "us-west1" datacenter.
|
||||
# value = "us-west1"
|
||||
|
||||
|
||||
# weight can be used to indicate relative preference
|
||||
# when the job has more than one affinity. It defaults to 50 if not set.
|
||||
# weight = 100
|
||||
# }
|
||||
|
||||
|
||||
# The "spread" stanza allows operators to increase the failure tolerance of
|
||||
# their applications by specifying a node attribute that allocations
|
||||
# should be spread over.
|
||||
#
|
||||
# For more information and examples on the "spread" stanza, please
|
||||
# see the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/spread.html
|
||||
#
|
||||
# spread {
|
||||
# attribute specifies the name of a node attribute or metadata
|
||||
# attribute = "${node.datacenter}"
|
||||
|
||||
|
||||
# targets can be used to define desired percentages of allocations
|
||||
# for each targeted attribute value.
|
||||
#
|
||||
# target "us-east1" {
|
||||
# percent = 60
|
||||
# }
|
||||
# target "us-west1" {
|
||||
# percent = 40
|
||||
# }
|
||||
# }
|
||||
|
||||
# The "task" stanza creates an individual unit of work, such as a Docker
|
||||
# container, web application, or batch processing.
|
||||
#
|
||||
# For more information and examples on the "task" stanza, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/task.html
|
||||
#
|
||||
task "redis" {
|
||||
# The "driver" parameter specifies the task driver that should be used to
|
||||
# run the task.
|
||||
driver = "docker"
|
||||
|
||||
# The "config" stanza specifies the driver configuration, which is passed
|
||||
# directly to the driver to start the task. The details of configurations
|
||||
# are specific to each driver, so please see specific driver
|
||||
# documentation for more information.
|
||||
config {
|
||||
image = "redis:3.2"
|
||||
|
||||
port_map {
|
||||
db = 6379
|
||||
}
|
||||
}
|
||||
|
||||
# The "artifact" stanza instructs Nomad to download an artifact from a
|
||||
# remote source prior to starting the task. This provides a convenient
|
||||
# mechanism for downloading configuration files or data needed to run the
|
||||
# task. It is possible to specify the "artifact" stanza multiple times to
|
||||
# download multiple artifacts.
|
||||
#
|
||||
# For more information and examples on the "artifact" stanza, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/artifact.html
|
||||
#
|
||||
# artifact {
|
||||
# source = "http://foo.com/artifact.tar.gz"
|
||||
# options {
|
||||
# checksum = "md5:c4aa853ad2215426eb7d70a21922e794"
|
||||
# }
|
||||
# }
|
||||
|
||||
|
||||
# The "logs" stanza instructs the Nomad client on how many log files and
|
||||
# the maximum size of those logs files to retain. Logging is enabled by
|
||||
# default, but the "logs" stanza allows for finer-grained control over
|
||||
# the log rotation and storage configuration.
|
||||
#
|
||||
# For more information and examples on the "logs" stanza, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/logs.html
|
||||
#
|
||||
# logs {
|
||||
# max_files = 10
|
||||
# max_file_size = 15
|
||||
# }
|
||||
|
||||
# The "resources" stanza describes the requirements a task needs to
|
||||
# execute. Resource requirements include memory, network, cpu, and more.
|
||||
# This ensures the task will execute on a machine that contains enough
|
||||
# resource capacity.
|
||||
#
|
||||
# For more information and examples on the "resources" stanza, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/resources.html
|
||||
#
|
||||
resources {
|
||||
cpu = 500 # 500 MHz
|
||||
memory = 256 # 256MB
|
||||
|
||||
network {
|
||||
mbits = 10
|
||||
port "db" {}
|
||||
}
|
||||
}
|
||||
# The "service" stanza instructs Nomad to register this task as a service
|
||||
# in the service discovery engine, which is currently Consul. This will
|
||||
# make the service addressable after Nomad has placed it on a host and
|
||||
# port.
|
||||
#
|
||||
# For more information and examples on the "service" stanza, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/service.html
|
||||
#
|
||||
service {
|
||||
name = "redis-cache"
|
||||
tags = ["global", "cache"]
|
||||
port = "db"
|
||||
|
||||
check {
|
||||
name = "alive"
|
||||
type = "tcp"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
|
||||
# The "template" stanza instructs Nomad to manage a template, such as
|
||||
# a configuration file or script. This template can optionally pull data
|
||||
# from Consul or Vault to populate runtime configuration data.
|
||||
#
|
||||
# For more information and examples on the "template" stanza, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/template.html
|
||||
#
|
||||
# template {
|
||||
# data = "---\nkey: {{ key \"service/my-key\" }}"
|
||||
# destination = "local/file.yml"
|
||||
# change_mode = "signal"
|
||||
# change_signal = "SIGHUP"
|
||||
# }
|
||||
|
||||
# The "template" stanza can also be used to create environment variables
|
||||
# for tasks that prefer those to config files. The task will be restarted
|
||||
# when data pulled from Consul or Vault changes.
|
||||
#
|
||||
# template {
|
||||
# data = "KEY={{ key \"service/my-key\" }}"
|
||||
# destination = "local/file.env"
|
||||
# env = true
|
||||
# }
|
||||
|
||||
# The "vault" stanza instructs the Nomad client to acquire a token from
|
||||
# a HashiCorp Vault server. The Nomad servers must be configured and
|
||||
# authorized to communicate with Vault. By default, Nomad will inject
|
||||
# The token into the job via an environment variable and make the token
|
||||
# available to the "template" stanza. The Nomad client handles the renewal
|
||||
# and revocation of the Vault token.
|
||||
#
|
||||
# For more information and examples on the "vault" stanza, please see
|
||||
# the online documentation at:
|
||||
#
|
||||
# https://www.nomadproject.io/docs/job-specification/vault.html
|
||||
#
|
||||
# vault {
|
||||
# policies = ["cdn", "frontend"]
|
||||
# change_mode = "signal"
|
||||
# change_signal = "SIGHUP"
|
||||
# }
|
||||
|
||||
# Controls the timeout between signalling a task it will be killed
|
||||
# and killing the task. If not set a default is used.
|
||||
# kill_timeout = "20s"
|
||||
}
|
||||
}
|
||||
}
|
4
tests/integration/targets/nomad/meta/main.yml
Normal file
4
tests/integration/targets/nomad/meta/main.yml
Normal file
|
@ -0,0 +1,4 @@
|
|||
---
|
||||
dependencies:
|
||||
- setup_pkg_mgr
|
||||
- setup_openssl
|
106
tests/integration/targets/nomad/tasks/main.yml
Normal file
106
tests/integration/targets/nomad/tasks/main.yml
Normal file
|
@ -0,0 +1,106 @@
|
|||
- name: Skip unsupported platforms
|
||||
meta: end_play
|
||||
when: ansible_distribution == 'CentOS' and ansible_distribution_major_version is not version('7', '>=')
|
||||
|
||||
- name: Install Nomad and test
|
||||
vars:
|
||||
nomad_version: 0.12.4
|
||||
nomad_uri: https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version }}_{{ ansible_system | lower }}_{{ nomad_arch }}.zip
|
||||
nomad_cmd: '{{ output_dir }}/nomad'
|
||||
block:
|
||||
|
||||
- name: register pyOpenSSL version
|
||||
command: '{{ ansible_python_interpreter }} -c ''import OpenSSL; print(OpenSSL.__version__)'''
|
||||
register: pyopenssl_version
|
||||
|
||||
- name: Install requests<2.20 (CentOS/RHEL 6)
|
||||
pip:
|
||||
name: requests<2.20
|
||||
register: result
|
||||
until: result is success
|
||||
when: ansible_distribution_file_variety|default() == 'RedHat' and ansible_distribution_major_version is version('6', '<=')
|
||||
|
||||
- name: Install python-nomad
|
||||
pip:
|
||||
name: python-nomad
|
||||
register: result
|
||||
until: result is success
|
||||
|
||||
- name: Install jmespath
|
||||
pip:
|
||||
name: jmespath
|
||||
register: result
|
||||
until: result is success
|
||||
|
||||
- when: pyopenssl_version.stdout is version('0.15', '>=')
|
||||
block:
|
||||
- name: Generate privatekey
|
||||
community.crypto.openssl_privatekey:
|
||||
path: '{{ output_dir }}/privatekey.pem'
|
||||
|
||||
- name: Generate CSR
|
||||
community.crypto.openssl_csr:
|
||||
path: '{{ output_dir }}/csr.csr'
|
||||
privatekey_path: '{{ output_dir }}/privatekey.pem'
|
||||
subject:
|
||||
commonName: localhost
|
||||
|
||||
- name: Generate selfsigned certificate
|
||||
register: selfsigned_certificate
|
||||
community.crypto.openssl_certificate:
|
||||
path: '{{ output_dir }}/cert.pem'
|
||||
csr_path: '{{ output_dir }}/csr.csr'
|
||||
privatekey_path: '{{ output_dir }}/privatekey.pem'
|
||||
provider: selfsigned
|
||||
selfsigned_digest: sha256
|
||||
|
||||
- name: Install unzip
|
||||
package:
|
||||
name: unzip
|
||||
register: result
|
||||
until: result is success
|
||||
when: ansible_distribution != "MacOSX"
|
||||
|
||||
- assert:
|
||||
that: ansible_architecture in ['i386', 'x86_64', 'amd64']
|
||||
|
||||
- set_fact:
|
||||
nomad_arch: '386'
|
||||
when: ansible_architecture == 'i386'
|
||||
|
||||
- set_fact:
|
||||
nomad_arch: amd64
|
||||
when: ansible_architecture in ['x86_64', 'amd64']
|
||||
|
||||
- name: Download nomad binary
|
||||
unarchive:
|
||||
src: '{{ nomad_uri }}'
|
||||
dest: '{{ output_dir }}'
|
||||
remote_src: true
|
||||
register: result
|
||||
until: result is success
|
||||
|
||||
- vars:
|
||||
remote_dir: '{{ echo_output_dir.stdout }}'
|
||||
block:
|
||||
|
||||
- command: echo {{ output_dir }}
|
||||
register: echo_output_dir
|
||||
|
||||
- name: Run tests integration
|
||||
block:
|
||||
- name: Start nomad (dev mode enabled)
|
||||
shell: nohup {{ nomad_cmd }} agent -dev </dev/null >/dev/null 2>&1 &
|
||||
|
||||
- name: wait nomad up
|
||||
wait_for:
|
||||
host: localhost
|
||||
port: 4646
|
||||
delay: 10
|
||||
timeout: 60
|
||||
|
||||
- import_tasks: nomad_job.yml
|
||||
always:
|
||||
|
||||
- name: kill nomad
|
||||
shell: pkill nomad
|
90
tests/integration/targets/nomad/tasks/nomad_job.yml
Normal file
90
tests/integration/targets/nomad/tasks/nomad_job.yml
Normal file
|
@ -0,0 +1,90 @@
|
|||
---
|
||||
|
||||
- name: run check deploy nomad job
|
||||
nomad_job:
|
||||
host: localhost
|
||||
state: present
|
||||
use_ssl: false
|
||||
content: "{{ lookup('file', 'job.hcl') }}"
|
||||
register: job_check_deployed
|
||||
check_mode: true
|
||||
|
||||
- name: run create nomad job
|
||||
nomad_job:
|
||||
host: localhost
|
||||
state: present
|
||||
use_ssl: false
|
||||
content: "{{ lookup('file', 'job.hcl') }}"
|
||||
force_start: true
|
||||
register: job_deployed
|
||||
|
||||
- name: get nomad job deployed
|
||||
nomad_job_info:
|
||||
host: localhost
|
||||
use_ssl: false
|
||||
name: example
|
||||
register: get_nomad_job
|
||||
|
||||
- name: get list of nomad jobs
|
||||
nomad_job_info:
|
||||
host: localhost
|
||||
use_ssl: false
|
||||
register: list_nomad_jobs
|
||||
|
||||
- name: assert job is deployed and tasks is changed
|
||||
assert:
|
||||
that:
|
||||
- job_check_deployed is changed
|
||||
- job_deployed is changed
|
||||
- get_nomad_job.result[0].ID == "example"
|
||||
- list_nomad_jobs.result | length == 1
|
||||
|
||||
- name: run check deploy job idempotence
|
||||
nomad_job:
|
||||
host: localhost
|
||||
state: present
|
||||
use_ssl: false
|
||||
content: "{{ lookup('file', 'job.hcl') }}"
|
||||
register: job_check_deployed_idempotence
|
||||
check_mode: true
|
||||
|
||||
- name: run create nomad job idempotence
|
||||
nomad_job:
|
||||
host: localhost
|
||||
state: present
|
||||
use_ssl: false
|
||||
content: "{{ lookup('file', 'job.hcl') }}"
|
||||
register: job_deployed_idempotence
|
||||
|
||||
- name: run check delete nomad job
|
||||
nomad_job:
|
||||
host: localhost
|
||||
state: absent
|
||||
use_ssl: false
|
||||
content: "{{ lookup('file', 'job.hcl') }}"
|
||||
register: job_deleted_check
|
||||
check_mode: true
|
||||
|
||||
- name: run delete nomad job
|
||||
nomad_job:
|
||||
host: localhost
|
||||
state: absent
|
||||
use_ssl: false
|
||||
content: "{{ lookup('file', 'job.hcl') }}"
|
||||
register: job_deleted
|
||||
|
||||
- name: get job deleted
|
||||
nomad_job_info:
|
||||
host: localhost
|
||||
use_ssl: false
|
||||
name: example
|
||||
register: get_job_delete
|
||||
|
||||
- name: assert idempotence
|
||||
assert:
|
||||
that:
|
||||
- job_check_deployed_idempotence is not changed
|
||||
- job_deployed_idempotence is not changed
|
||||
- job_deleted_check is changed
|
||||
- job_deleted is changed
|
||||
- get_job_delete.result[0].Stop
|
Loading…
Reference in a new issue