From 24aeedbc15c92cf5871683389cc8dac3d19562b2 Mon Sep 17 00:00:00 2001 From: Laszlo Szomor Date: Mon, 19 Jun 2023 08:59:02 +0200 Subject: [PATCH] lvg: add UUID reset and active state management feature (#6682) * lvg: add UUID reset, rename, active switch feature * Add changelog fragment for 6682 * Fix Sanity 2.15,devel tests * Fix issue with LVM autoactivation * Remove rename implementation Add active/inactive states Fix errors when a PV is missing Apply suggestions from code review Co-authored-by: Felix Fontein * Update plugins/modules/lvg.py Co-authored-by: Felix Fontein --------- Co-authored-by: Felix Fontein --- .../fragments/6682-lvg-clonesupport.yml | 3 + plugins/modules/lvg.py | 359 ++++++++++++++---- tests/integration/targets/lvg/tasks/main.yml | 10 + tests/integration/targets/lvg/tasks/setup.yml | 18 +- .../targets/lvg/tasks/setup_missing_pv.yml | 18 + .../targets/lvg/tasks/teardown.yml | 14 +- .../targets/lvg/tasks/teardown_missing_pv.yml | 8 + .../targets/lvg/tasks/test_active_change.yml | 153 ++++++++ .../targets/lvg/tasks/test_active_create.yml | 71 ++++ .../targets/lvg/tasks/test_uuid_reset.yml | 107 ++++++ 10 files changed, 682 insertions(+), 79 deletions(-) create mode 100644 changelogs/fragments/6682-lvg-clonesupport.yml create mode 100644 tests/integration/targets/lvg/tasks/setup_missing_pv.yml create mode 100644 tests/integration/targets/lvg/tasks/teardown_missing_pv.yml create mode 100644 tests/integration/targets/lvg/tasks/test_active_change.yml create mode 100644 tests/integration/targets/lvg/tasks/test_active_create.yml create mode 100644 tests/integration/targets/lvg/tasks/test_uuid_reset.yml diff --git a/changelogs/fragments/6682-lvg-clonesupport.yml b/changelogs/fragments/6682-lvg-clonesupport.yml new file mode 100644 index 0000000000..91e0d897ab --- /dev/null +++ b/changelogs/fragments/6682-lvg-clonesupport.yml @@ -0,0 +1,3 @@ +minor_changes: + - lvg - add ``active`` and ``inactive`` values to the ``state`` option for active state management feature (https://github.com/ansible-collections/community.general/pull/6682). + - lvg - add ``reset_vg_uuid``, ``reset_pv_uuid`` options for UUID reset feature (https://github.com/ansible-collections/community.general/pull/6682). diff --git a/plugins/modules/lvg.py b/plugins/modules/lvg.py index 860c333218..df96c189ae 100644 --- a/plugins/modules/lvg.py +++ b/plugins/modules/lvg.py @@ -63,15 +63,33 @@ options: default: '' state: description: - - Control if the volume group exists. + - Control if the volume group exists and it's state. + - The states V(active) and V(inactive) implies V(present) state. Added in 7.1.0 + - "If V(active) or V(inactive), the module manages the VG's logical volumes current state. + The module also handles the VG's autoactivation state if supported + unless when creating a volume group and the autoactivation option specified in O(vg_options)." type: str - choices: [ absent, present ] + choices: [ absent, present, active, inactive ] default: present force: description: - If V(true), allows to remove volume group with logical volumes. type: bool default: false + reset_vg_uuid: + description: + - Whether the volume group's UUID is regenerated. + - This is B(not idempotent). Specifying this parameter always results in a change. + type: bool + default: false + version_added: 7.1.0 + reset_pv_uuid: + description: + - Whether the volume group's physical volumes' UUIDs are regenerated. + - This is B(not idempotent). Specifying this parameter always results in a change. + type: bool + default: false + version_added: 7.1.0 seealso: - module: community.general.filesystem - module: community.general.lvol @@ -112,6 +130,30 @@ EXAMPLES = r''' vg: resizableVG pvs: /dev/sda3 pvresize: true + +- name: Deactivate a volume group + community.general.lvg: + state: inactive + vg: vg.services + +- name: Activate a volume group + community.general.lvg: + state: active + vg: vg.services + +- name: Reset a volume group UUID + community.general.lvg: + state: inactive + vg: vg.services + reset_vg_uuid: true + +- name: Reset both volume group and pv UUID + community.general.lvg: + state: inactive + vg: vg.services + pvs: /dev/sdb1,/dev/sdc5 + reset_vg_uuid: true + reset_pv_uuid: true ''' import itertools @@ -119,6 +161,8 @@ import os from ansible.module_utils.basic import AnsibleModule +VG_AUTOACTIVATION_OPT = '--setautoactivation' + def parse_vgs(data): vgs = [] @@ -156,6 +200,178 @@ def parse_pvs(module, data): return pvs +def find_vg(module, vg): + if not vg: + return None + vgs_cmd = module.get_bin_path('vgs', True) + dummy, current_vgs, dummy = module.run_command("%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd, check_rc=True) + + vgs = parse_vgs(current_vgs) + + for test_vg in vgs: + if test_vg['name'] == vg: + this_vg = test_vg + break + else: + this_vg = None + + return this_vg + + +def is_autoactivation_supported(module, vg_cmd): + autoactivation_supported = False + dummy, vgchange_opts, dummy = module.run_command([vg_cmd, '--help'], check_rc=True) + + if VG_AUTOACTIVATION_OPT in vgchange_opts: + autoactivation_supported = True + + return autoactivation_supported + + +def activate_vg(module, vg, active): + changed = False + vgchange_cmd = module.get_bin_path('vgchange', True) + vgs_cmd = module.get_bin_path('vgs', True) + vgs_fields = ['lv_attr'] + + autoactivation_enabled = False + autoactivation_supported = is_autoactivation_supported(module=module, vg_cmd=vgchange_cmd) + + if autoactivation_supported: + vgs_fields.append('autoactivation') + + vgs_cmd_with_opts = [vgs_cmd, '--noheadings', '-o', ','.join(vgs_fields), '--separator', ';', vg] + dummy, current_vg_lv_states, dummy = module.run_command(vgs_cmd_with_opts, check_rc=True) + + lv_active_count = 0 + lv_inactive_count = 0 + + for line in current_vg_lv_states.splitlines(): + parts = line.strip().split(';') + if parts[0][4] == 'a': + lv_active_count += 1 + else: + lv_inactive_count += 1 + if autoactivation_supported: + autoactivation_enabled = autoactivation_enabled or parts[1] == 'enabled' + + activate_flag = None + if active and lv_inactive_count > 0: + activate_flag = 'y' + elif not active and lv_active_count > 0: + activate_flag = 'n' + + # Extra logic necessary because vgchange returns error when autoactivation is already set + if autoactivation_supported: + if active and not autoactivation_enabled: + if module.check_mode: + changed = True + else: + module.run_command([vgchange_cmd, VG_AUTOACTIVATION_OPT, 'y', vg], check_rc=True) + changed = True + elif not active and autoactivation_enabled: + if module.check_mode: + changed = True + else: + module.run_command([vgchange_cmd, VG_AUTOACTIVATION_OPT, 'n', vg], check_rc=True) + changed = True + + if activate_flag is not None: + if module.check_mode: + changed = True + else: + module.run_command([vgchange_cmd, '--activate', activate_flag, vg], check_rc=True) + changed = True + + return changed + + +def append_vgcreate_options(module, state, vgoptions): + vgcreate_cmd = module.get_bin_path('vgcreate', True) + + autoactivation_supported = is_autoactivation_supported(module=module, vg_cmd=vgcreate_cmd) + + if autoactivation_supported and state in ['active', 'inactive']: + if VG_AUTOACTIVATION_OPT not in vgoptions: + if state == 'active': + vgoptions += [VG_AUTOACTIVATION_OPT, 'y'] + else: + vgoptions += [VG_AUTOACTIVATION_OPT, 'n'] + + +def get_pv_values_for_resize(module, device): + pvdisplay_cmd = module.get_bin_path('pvdisplay', True) + pvdisplay_ops = ["--units", "b", "--columns", "--noheadings", "--nosuffix", "--separator", ";", "-o", "dev_size,pv_size,pe_start,vg_extent_size"] + pvdisplay_cmd_device_options = [pvdisplay_cmd, device] + pvdisplay_ops + + dummy, pv_values, dummy = module.run_command(pvdisplay_cmd_device_options, check_rc=True) + + values = pv_values.strip().split(';') + + dev_size = int(values[0]) + pv_size = int(values[1]) + pe_start = int(values[2]) + vg_extent_size = int(values[3]) + + return (dev_size, pv_size, pe_start, vg_extent_size) + + +def resize_pv(module, device): + changed = False + pvresize_cmd = module.get_bin_path('pvresize', True) + + dev_size, pv_size, pe_start, vg_extent_size = get_pv_values_for_resize(module=module, device=device) + if (dev_size - (pe_start + pv_size)) > vg_extent_size: + if module.check_mode: + changed = True + else: + # If there is a missing pv on the machine, versions of pvresize rc indicates failure. + rc, out, err = module.run_command([pvresize_cmd, device]) + dummy, new_pv_size, dummy, dummy = get_pv_values_for_resize(module=module, device=device) + if pv_size == new_pv_size: + module.fail_json(msg="Failed executing pvresize command.", rc=rc, err=err, out=out) + else: + changed = True + + return changed + + +def reset_uuid_pv(module, device): + changed = False + pvs_cmd = module.get_bin_path('pvs', True) + pvs_cmd_with_opts = [pvs_cmd, '--noheadings', '-o', 'uuid', device] + pvchange_cmd = module.get_bin_path('pvchange', True) + pvchange_cmd_with_opts = [pvchange_cmd, '-u', device] + + dummy, orig_uuid, dummy = module.run_command(pvs_cmd_with_opts, check_rc=True) + + if module.check_mode: + changed = True + else: + # If there is a missing pv on the machine, pvchange rc indicates failure. + pvchange_rc, pvchange_out, pvchange_err = module.run_command(pvchange_cmd_with_opts) + dummy, new_uuid, dummy = module.run_command(pvs_cmd_with_opts, check_rc=True) + if orig_uuid.strip() == new_uuid.strip(): + module.fail_json(msg="PV (%s) UUID change failed" % (device), rc=pvchange_rc, err=pvchange_err, out=pvchange_out) + else: + changed = True + + return changed + + +def reset_uuid_vg(module, vg): + changed = False + vgchange_cmd = module.get_bin_path('vgchange', True) + vgchange_cmd_with_opts = [vgchange_cmd, '-u', vg] + if module.check_mode: + changed = True + else: + module.run_command(vgchange_cmd_with_opts, check_rc=True) + changed = True + + return changed + + def main(): module = AnsibleModule( argument_spec=dict( @@ -165,9 +381,14 @@ def main(): pv_options=dict(type='str', default=''), pvresize=dict(type='bool', default=False), vg_options=dict(type='str', default=''), - state=dict(type='str', default='present', choices=['absent', 'present']), + state=dict(type='str', default='present', choices=['absent', 'present', 'active', 'inactive']), force=dict(type='bool', default=False), + reset_vg_uuid=dict(type='bool', default=False), + reset_pv_uuid=dict(type='bool', default=False), ), + required_if=[ + ['reset_pv_uuid', True, ['pvs']], + ], supports_check_mode=True, ) @@ -178,18 +399,25 @@ def main(): pesize = module.params['pesize'] pvoptions = module.params['pv_options'].split() vgoptions = module.params['vg_options'].split() + reset_vg_uuid = module.boolean(module.params['reset_vg_uuid']) + reset_pv_uuid = module.boolean(module.params['reset_pv_uuid']) + + this_vg = find_vg(module=module, vg=vg) + present_state = state in ['present', 'active', 'inactive'] + pvs_required = present_state and this_vg is None + changed = False dev_list = [] if module.params['pvs']: dev_list = list(module.params['pvs']) - elif state == 'present': + elif pvs_required: module.fail_json(msg="No physical volumes given.") # LVM always uses real paths not symlinks so replace symlinks with actual path for idx, dev in enumerate(dev_list): dev_list[idx] = os.path.realpath(dev) - if state == 'present': + if present_state: # check given devices for test_dev in dev_list: if not os.path.exists(test_dev): @@ -216,25 +444,9 @@ def main(): if used_pvs: module.fail_json(msg="Device %s is already in %s volume group." % (used_pvs[0]['name'], used_pvs[0]['vg_name'])) - vgs_cmd = module.get_bin_path('vgs', True) - rc, current_vgs, err = module.run_command("%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd) - - if rc != 0: - module.fail_json(msg="Failed executing vgs command.", rc=rc, err=err) - - changed = False - - vgs = parse_vgs(current_vgs) - - for test_vg in vgs: - if test_vg['name'] == vg: - this_vg = test_vg - break - else: - this_vg = None - if this_vg is None: - if state == 'present': + if present_state: + append_vgcreate_options(module=module, state=state, vgoptions=vgoptions) # create VG if module.check_mode: changed = True @@ -268,68 +480,61 @@ def main(): module.fail_json(msg="Failed to remove volume group %s" % (vg), rc=rc, err=err) else: module.fail_json(msg="Refuse to remove non-empty volume group %s without force=true" % (vg)) + # activate/inactivate existing VG + elif state == 'active': + changed = activate_vg(module=module, vg=vg, active=True) + elif state == 'inactive': + changed = activate_vg(module=module, vg=vg, active=False) + + # reset VG uuid + if reset_vg_uuid: + changed = reset_uuid_vg(module=module, vg=vg) or changed # resize VG - current_devs = [os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg] - devs_to_remove = list(set(current_devs) - set(dev_list)) - devs_to_add = list(set(dev_list) - set(current_devs)) + if dev_list: + current_devs = [os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg] + devs_to_remove = list(set(current_devs) - set(dev_list)) + devs_to_add = list(set(dev_list) - set(current_devs)) - if current_devs: - if state == 'present' and pvresize: - for device in current_devs: - pvresize_cmd = module.get_bin_path('pvresize', True) - pvdisplay_cmd = module.get_bin_path('pvdisplay', True) - pvdisplay_ops = ["--units", "b", "--columns", "--noheadings", "--nosuffix"] - pvdisplay_cmd_device_options = [pvdisplay_cmd, device] + pvdisplay_ops - rc, dev_size, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "dev_size"]) - dev_size = int(dev_size.replace(" ", "")) - rc, pv_size, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "pv_size"]) - pv_size = int(pv_size.replace(" ", "")) - rc, pe_start, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "pe_start"]) - pe_start = int(pe_start.replace(" ", "")) - rc, vg_extent_size, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "vg_extent_size"]) - vg_extent_size = int(vg_extent_size.replace(" ", "")) - if (dev_size - (pe_start + pv_size)) > vg_extent_size: - if module.check_mode: - changed = True - else: - rc, dummy, err = module.run_command([pvresize_cmd, device]) - if rc != 0: - module.fail_json(msg="Failed executing pvresize command.", rc=rc, err=err) - else: + if current_devs: + if present_state: + for device in current_devs: + if pvresize: + changed = resize_pv(module=module, device=device) or changed + if reset_pv_uuid: + changed = reset_uuid_pv(module=module, device=device) or changed + + if devs_to_add or devs_to_remove: + if module.check_mode: + changed = True + else: + if devs_to_add: + devs_to_add_string = ' '.join(devs_to_add) + # create PV + pvcreate_cmd = module.get_bin_path('pvcreate', True) + for current_dev in devs_to_add: + rc, dummy, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)]) + if rc == 0: changed = True - - if devs_to_add or devs_to_remove: - if module.check_mode: - changed = True - else: - if devs_to_add: - devs_to_add_string = ' '.join(devs_to_add) - # create PV - pvcreate_cmd = module.get_bin_path('pvcreate', True) - for current_dev in devs_to_add: - rc, dummy, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)]) + else: + module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err) + # add PV to our VG + vgextend_cmd = module.get_bin_path('vgextend', True) + rc, dummy, err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string)) if rc == 0: changed = True else: - module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err) - # add PV to our VG - vgextend_cmd = module.get_bin_path('vgextend', True) - rc, dummy, err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string)) - if rc == 0: - changed = True - else: - module.fail_json(msg="Unable to extend %s by %s." % (vg, devs_to_add_string), rc=rc, err=err) + module.fail_json(msg="Unable to extend %s by %s." % (vg, devs_to_add_string), rc=rc, err=err) - # remove some PV from our VG - if devs_to_remove: - devs_to_remove_string = ' '.join(devs_to_remove) - vgreduce_cmd = module.get_bin_path('vgreduce', True) - rc, dummy, err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string)) - if rc == 0: - changed = True - else: - module.fail_json(msg="Unable to reduce %s by %s." % (vg, devs_to_remove_string), rc=rc, err=err) + # remove some PV from our VG + if devs_to_remove: + devs_to_remove_string = ' '.join(devs_to_remove) + vgreduce_cmd = module.get_bin_path('vgreduce', True) + rc, dummy, err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string)) + if rc == 0: + changed = True + else: + module.fail_json(msg="Unable to reduce %s by %s." % (vg, devs_to_remove_string), rc=rc, err=err) module.exit_json(changed=changed) diff --git a/tests/integration/targets/lvg/tasks/main.yml b/tests/integration/targets/lvg/tasks/main.yml index e14c48c3fd..15af2d08c4 100644 --- a/tests/integration/targets/lvg/tasks/main.yml +++ b/tests/integration/targets/lvg/tasks/main.yml @@ -18,10 +18,20 @@ block: - import_tasks: setup.yml + - import_tasks: setup_missing_pv.yml + - import_tasks: test_indempotency.yml - import_tasks: test_grow_reduce.yml - import_tasks: test_pvresize.yml + + - import_tasks: test_active_change.yml + + - import_tasks: test_active_create.yml + + - import_tasks: test_uuid_reset.yml always: - import_tasks: teardown.yml + + - import_tasks: teardown_missing_pv.yml diff --git a/tests/integration/targets/lvg/tasks/setup.yml b/tests/integration/targets/lvg/tasks/setup.yml index 3984b9fc3a..466501438b 100644 --- a/tests/integration/targets/lvg/tasks/setup.yml +++ b/tests/integration/targets/lvg/tasks/setup.yml @@ -5,7 +5,7 @@ - name: "Create files to use as a disk devices" command: "dd if=/dev/zero of={{ remote_tmp_dir }}/img{{ item }} bs=1M count=10" - with_sequence: 'count=2' + with_sequence: 'count=4' - name: "Show next free loop device" command: "losetup -f" @@ -21,7 +21,23 @@ - name: "Create loop device for file" command: "losetup -f {{ remote_tmp_dir }}/img2" +- name: "Show next free loop device" + command: "losetup -f" + register: loop_device3 + +- name: "Create loop device for file" + command: "losetup -f {{ remote_tmp_dir }}/img3" + +- name: "Show next free loop device" + command: "losetup -f" + register: loop_device4 + +- name: "Create loop device for file" + command: "losetup -f {{ remote_tmp_dir }}/img4" + - name: "Affect name on disk to work on" set_fact: loop_device1: "{{ loop_device1.stdout }}" loop_device2: "{{ loop_device2.stdout }}" + loop_device3: "{{ loop_device3.stdout }}" + loop_device4: "{{ loop_device4.stdout }}" diff --git a/tests/integration/targets/lvg/tasks/setup_missing_pv.yml b/tests/integration/targets/lvg/tasks/setup_missing_pv.yml new file mode 100644 index 0000000000..863ef87573 --- /dev/null +++ b/tests/integration/targets/lvg/tasks/setup_missing_pv.yml @@ -0,0 +1,18 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: "Prepare VG for missing PV" + lvg: + vg: vg_with_missing_pv + pvs: + - "{{ loop_device3 }}" + - "{{ loop_device4 }}" + +- name: Save loop_device4 pvid + shell: "pvs -ouuid --noheadings {{ loop_device4 }} | xargs -n1 | tr -d '-'" + register: loop_device4_pvid_result + +- name: Detach loop_device4 + command: "losetup -d {{ loop_device4 }}" diff --git a/tests/integration/targets/lvg/tasks/teardown.yml b/tests/integration/targets/lvg/tasks/teardown.yml index de49573214..2d147dee01 100644 --- a/tests/integration/targets/lvg/tasks/teardown.yml +++ b/tests/integration/targets/lvg/tasks/teardown.yml @@ -6,13 +6,25 @@ - name: Remove test volume group lvg: vg: testvg + force: true state: absent +- name: Remove LVM devices + loop: + - "{{ loop_device1 | default('') }}" + - "{{ loop_device2 | default('') }}" + - "{{ loop_device3 | default('') }}" + when: + - item|length > 0 + command: "lvmdevices --deldev {{ item }}" + ignore_errors: true + - name: Detach loop devices command: "losetup -d {{ item }}" loop: - "{{ loop_device1 | default('') }}" - "{{ loop_device2 | default('') }}" + - "{{ loop_device3 | default('') }}" when: - item != '' @@ -20,4 +32,4 @@ file: path: "{{ remote_tmp_dir }}/img{{ item }}" state: absent - with_sequence: 'count=2' + with_sequence: 'count=4' diff --git a/tests/integration/targets/lvg/tasks/teardown_missing_pv.yml b/tests/integration/targets/lvg/tasks/teardown_missing_pv.yml new file mode 100644 index 0000000000..4cff680036 --- /dev/null +++ b/tests/integration/targets/lvg/tasks/teardown_missing_pv.yml @@ -0,0 +1,8 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Remove loop_device4 LVM device + command: "lvmdevices --delpvid {{ loop_device4_pvid_result.stdout }}" + ignore_errors: true diff --git a/tests/integration/targets/lvg/tasks/test_active_change.yml b/tests/integration/targets/lvg/tasks/test_active_change.yml new file mode 100644 index 0000000000..99a2e23bd7 --- /dev/null +++ b/tests/integration/targets/lvg/tasks/test_active_change.yml @@ -0,0 +1,153 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Create volume group on disk device + lvg: + vg: testvg + pvs: "{{ loop_device1 }}" + +- name: Create logical volumes on volume group + loop: + - lv1 + - lv2 + lvol: + vg: testvg + lv: "{{ item }}" + size: 2m + +- name: Collect all lv active status in testvg + shell: vgs -olv_active --noheadings testvg | xargs -n1 + register: initial_lv_status_result + +- name: Assert all lv in testvg are active + loop: "{{ initial_lv_status_result.stdout_lines }}" + assert: + that: + - "'active' == '{{ item }}'" + +- name: Deactivate volume group + lvg: + state: inactive + vg: testvg + register: vg_deactivate_result + +- name: Collect all lv active status in testvg + shell: vgs -olv_active --noheadings testvg | xargs -n1 + register: deactivated_lv_status_result + +- name: Do all assertions to verify expected results + assert: + that: + - vg_deactivate_result is changed + - "'active' not in deactivated_lv_status_result.stdout" + +- name: Deactivate volume group again to verify idempotence + lvg: + state: inactive + vg: testvg + register: repeated_vg_deactivate_result + +- name: Verify vg deactivation idempontency + assert: + that: + - repeated_vg_deactivate_result is not changed + +- name: Activate volume group in check mode + lvg: + state: active + vg: testvg + register: check_mode_vg_activate_result + check_mode: true + +- name: Collect all lv active status in testvg + shell: vgs -olv_active --noheadings testvg | xargs -n1 + register: check_mode_activate_lv_status_result + +- name: Verify VG activation in check mode changed without activating LVs + assert: + that: + - check_mode_vg_activate_result is changed + - "'active' not in check_mode_activate_lv_status_result.stdout" + +- name: Activate volume group + lvg: + state: active + vg: testvg + register: vg_activate_result + +- name: Collect all lv active status in testvg + shell: vgs -olv_active --noheadings testvg | xargs -n1 + register: activate_lv_status_result + +- name: Verify vg activation + assert: + that: + - vg_activate_result is changed + +- name: Assert all lv in testvg are active + loop: "{{ activate_lv_status_result.stdout_lines }}" + assert: + that: + - "'active' == '{{ item }}'" + +- name: Activate volume group again to verify idempontency + lvg: + state: active + vg: testvg + register: repeated_vg_activate_result + +- name: Verify vg activation idempontency + assert: + that: + - repeated_vg_activate_result is not changed + +- name: Deactivate lv2 in testvg + lvol: + vg: testvg + lv: lv2 + active: false + +- name: Activate volume group again to verify partially activated vg activation + lvg: + state: active + vg: testvg + register: partial_vg_activate_result + +- name: Verify partially activated vg activation + assert: + that: + - partial_vg_activate_result is changed + +- name: Collect all lv active status in testvg + shell: vgs -olv_active --noheadings testvg | xargs -n1 + register: activate_partial_lv_status_result + +- name: Assert all lv in testvg are active + loop: "{{ activate_partial_lv_status_result.stdout_lines }}" + assert: + that: + - "'active' == '{{ item }}'" + +- name: Deactive volume group in check mode + lvg: + state: inactive + vg: testvg + register: check_mode_vg_deactivate_result + check_mode: true + +- name: Collect all lv active status in testvg + shell: vgs -olv_active --noheadings testvg | xargs -n1 + register: check_mode_deactivate_lv_status_result + +- name: Verify check mode vg deactivation changed + assert: + that: + - check_mode_vg_deactivate_result is changed + +- name: Assert all lv in testvg are still active + loop: "{{ check_mode_deactivate_lv_status_result.stdout_lines }}" + assert: + that: + - "'active' == '{{ item }}'" diff --git a/tests/integration/targets/lvg/tasks/test_active_create.yml b/tests/integration/targets/lvg/tasks/test_active_create.yml new file mode 100644 index 0000000000..7ac1ffedd7 --- /dev/null +++ b/tests/integration/targets/lvg/tasks/test_active_create.yml @@ -0,0 +1,71 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Collect vgcreate help + command: "vgcreate --help" + register: vgcreate_help_result + +- when: "'--setautoactivation' in vgcreate_help_result.stdout" + block: + - name: Create autoactivated volume group on disk device + lvg: + state: active + vg: vg_autoact_test + pvs: "{{ loop_device2 }}" + + - name: Collect vg autoactivation status for vg_autoact_test + shell: vgs -oautoactivation --noheadings vg_autoact_test | xargs -n1 + register: active_vg_autoact_status_result + + - name: Assert vg autoactivation is set for vg_autoact_test + assert: + that: "'enabled' == active_vg_autoact_status_result.stdout" + + - name: Remove vg_autoact_test for the next test + lvg: + state: absent + vg: vg_autoact_test + force: true + + - name: Create auttoactivation disabled volume group on disk device + lvg: + state: inactive + vg: vg_autoact_test + pvs: "{{ loop_device2 }}" + + - name: Collect vg autoactivation status for vg_autoact_test + shell: vgs -oautoactivation --noheadings vg_autoact_test | xargs -n1 + register: inactive_vg_autoact_status_result + + - name: Assert vg autoactivation disabled for vg_autoact_test + assert: + that: "inactive_vg_autoact_status_result.stdout | length == 0" + + - name: Remove vg_autoact_test for the next test + lvg: + state: absent + vg: vg_autoact_test + force: true + + - name: Create auttoactivation disabled by option volume group on disk device + lvg: + state: active + vg: vg_autoact_test + vg_options: "--setautoactivation n" + pvs: "{{ loop_device2 }}" + + - name: Collect vg autoactivation status for vg_autoact_test + shell: vgs -oautoactivation --noheadings vg_autoact_test | xargs -n1 + register: inactive_by_option_vg_autoact_status_result + + - name: Assert vg autoactivation disabled by option for vg_autoact_test + assert: + that: "inactive_by_option_vg_autoact_status_result.stdout | length == 0" + always: + - name: Cleanup vg_autoact_test + lvg: + state: absent + vg: vg_autoact_test + force: true diff --git a/tests/integration/targets/lvg/tasks/test_uuid_reset.yml b/tests/integration/targets/lvg/tasks/test_uuid_reset.yml new file mode 100644 index 0000000000..8de50ace5c --- /dev/null +++ b/tests/integration/targets/lvg/tasks/test_uuid_reset.yml @@ -0,0 +1,107 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +- name: Create volume group on disk device + lvg: + vg: testvg + pvs: "{{ loop_device1 }}" + +- name: Save testvg uuid + shell: vgs -ouuid --noheadings testvg | xargs -n1 + register: orig_vg_uuid_cmd_result + +- name: Save pv uuid + shell: "pvs -ouuid --noheadings {{ loop_device1 }} | xargs -n1" + register: orig_pv_uuid_cmd_result + +- name: Deactivate and reset vg/pv uuid + lvg: + state: inactive + vg: testvg + pvs: "{{ loop_device1 }}" + reset_vg_uuid: true + reset_pv_uuid: true + register: vg_uuid_reset + +- name: Save testvg uuid + shell: vgs -ouuid --noheadings testvg | xargs -n1 + register: new_vg_uuid_cmd_result + +- name: Save pv uuid + shell: "pvs -ouuid --noheadings {{ loop_device1 }} | xargs -n1" + register: new_pv_uuid_cmd_result + +- name: Do all assertions to verify expected results + assert: + that: + - vg_uuid_reset is changed + - orig_vg_uuid_cmd_result.stdout != new_vg_uuid_cmd_result.stdout + - orig_pv_uuid_cmd_result.stdout != new_pv_uuid_cmd_result.stdout + +- name: Reset vg uuid again to verify non-idempotence + lvg: + vg: testvg + reset_vg_uuid: true + register: repeat_vg_uuid_reset + +- name: Reset pv uuid again to verify non-idempotence + lvg: + vg: testvg + reset_pv_uuid: true + pvs: "{{ loop_device1 }}" + register: repeat_pv_uuid_reset + +- name: Save testvg uuid + shell: vgs -ouuid --noheadings testvg | xargs -n1 + register: repeat_vg_uuid_cmd_result + +- name: Save pv uuid + shell: "pvs -ouuid --noheadings {{ loop_device1 }} | xargs -n1" + register: repeat_pv_uuid_cmd_result + +- name: Do all assertions to verify expected results + assert: + that: + - repeat_vg_uuid_reset is changed + - repeat_pv_uuid_reset is changed + - new_vg_uuid_cmd_result.stdout != repeat_vg_uuid_cmd_result.stdout + - new_pv_uuid_cmd_result.stdout != repeat_pv_uuid_cmd_result.stdout + +- name: Reset vg uuid in check mode + lvg: + vg: testvg + reset_vg_uuid: true + register: check_mode_vg_uuid_reset + check_mode: true + +- name: Reset pv uuid in check mode + lvg: + vg: testvg + reset_pv_uuid: true + pvs: "{{ loop_device1 }}" + register: check_mode_pv_uuid_reset + check_mode: true + +- name: Save testvg uuid + shell: vgs -ouuid --noheadings testvg | xargs -n1 + register: check_mode_vg_uuid_cmd_result + +- name: Save pv uuid + shell: "pvs -ouuid --noheadings {{ loop_device1 }} | xargs -n1" + register: check_mode_pv_uuid_cmd_result + +- name: Do all assertions to verify expected results + assert: + that: + - check_mode_vg_uuid_reset is changed + - check_mode_pv_uuid_reset is changed + - check_mode_vg_uuid_cmd_result.stdout == repeat_vg_uuid_cmd_result.stdout + - check_mode_pv_uuid_cmd_result.stdout == repeat_pv_uuid_cmd_result.stdout + +- name: Activate volume group + lvg: + state: active + vg: testvg + register: vg_activate