mirror of
https://github.com/ansible-collections/community.general.git
synced 2024-09-14 20:13:21 +02:00
Fix bare variable references in docs
This commit is contained in:
parent
947e0f264e
commit
a625bfc8db
6 changed files with 13 additions and 13 deletions
|
@ -108,7 +108,7 @@ From this, we'll use the add_host module to dynamically create a host group cons
|
|||
|
||||
- name: Add all instance public IPs to host group
|
||||
add_host: hostname={{ item.public_ip }} groups=ec2hosts
|
||||
with_items: ec2.instances
|
||||
with_items: "{{ ec2.instances }}"
|
||||
|
||||
With the host group now created, a second play at the bottom of the the same provisioning playbook file might now have some configuration steps::
|
||||
|
||||
|
|
|
@ -222,7 +222,7 @@ Now to the fun part. We create a playbook to create our infrastructure we call i
|
|||
ip_address: "{{ public_ip }}"
|
||||
port: "{{ item.port }}"
|
||||
cidr: "{{ item.cidr | default('0.0.0.0/0') }}"
|
||||
with_items: cs_firewall
|
||||
with_items: "{{ cs_firewall }}"
|
||||
when: public_ip is defined
|
||||
|
||||
- name: ensure static NATs
|
||||
|
|
|
@ -213,11 +213,11 @@ A playbook would looks like this:
|
|||
|
||||
- name: Wait for SSH to come up
|
||||
wait_for: host={{ item.public_ip }} port=22 delay=10 timeout=60
|
||||
with_items: gce.instance_data
|
||||
with_items: "{{ gce.instance_data }}"
|
||||
|
||||
- name: Add host to groupname
|
||||
add_host: hostname={{ item.public_ip }} groupname=new_instances
|
||||
with_items: gce.instance_data
|
||||
with_items: "{{ gce.instance_data }}"
|
||||
|
||||
- name: Manage new instances
|
||||
hosts: new_instances
|
||||
|
|
|
@ -134,7 +134,7 @@ The rax module returns data about the nodes it creates, like IP addresses, hostn
|
|||
ansible_host: "{{ item.rax_accessipv4 }}"
|
||||
ansible_ssh_pass: "{{ item.rax_adminpass }}"
|
||||
groups: raxhosts
|
||||
with_items: rax.success
|
||||
with_items: "{{ rax.success }}"
|
||||
when: rax.action == 'create'
|
||||
|
||||
With the host group now created, the next play in this playbook could now configure servers belonging to the raxhosts group.
|
||||
|
@ -522,7 +522,7 @@ Build a complete webserver environment with servers, custom networks and load ba
|
|||
ansible_ssh_pass: "{{ item.rax_adminpass }}"
|
||||
ansible_user: root
|
||||
groups: web
|
||||
with_items: rax.success
|
||||
with_items: "{{ rax.success }}"
|
||||
when: rax.action == 'create'
|
||||
|
||||
- name: Add servers to Load balancer
|
||||
|
@ -536,7 +536,7 @@ Build a complete webserver environment with servers, custom networks and load ba
|
|||
type: primary
|
||||
wait: yes
|
||||
region: IAD
|
||||
with_items: rax.success
|
||||
with_items: "{{ rax.success }}"
|
||||
when: rax.action == 'create'
|
||||
|
||||
- name: Configure servers
|
||||
|
@ -608,7 +608,7 @@ Using a Control Machine
|
|||
ansible_user: root
|
||||
rax_id: "{{ item.rax_id }}"
|
||||
groups: web,new_web
|
||||
with_items: rax.success
|
||||
with_items: "{{ rax.success }}"
|
||||
when: rax.action == 'create'
|
||||
|
||||
- name: Wait for rackconnect and managed cloud automation to complete
|
||||
|
|
|
@ -209,12 +209,12 @@ Here is the next part of the update play::
|
|||
- name: disable nagios alerts for this host webserver service
|
||||
nagios: action=disable_alerts host={{ inventory_hostname }} services=webserver
|
||||
delegate_to: "{{ item }}"
|
||||
with_items: groups.monitoring
|
||||
with_items: "{{ groups.monitoring }}"
|
||||
|
||||
- name: disable the server in haproxy
|
||||
shell: echo "disable server myapplb/{{ inventory_hostname }}" | socat stdio /var/lib/haproxy/stats
|
||||
delegate_to: "{{ item }}"
|
||||
with_items: groups.lbservers
|
||||
with_items: "{{ groups.lbservers }}"
|
||||
|
||||
The ``pre_tasks`` keyword just lets you list tasks to run before the roles are called. This will make more sense in a minute. If you look at the names of these tasks, you can see that we are disabling Nagios alerts and then removing the webserver that we are currently updating from the HAProxy load balancing pool.
|
||||
|
||||
|
@ -235,12 +235,12 @@ Finally, in the ``post_tasks`` section, we reverse the changes to the Nagios con
|
|||
- name: Enable the server in haproxy
|
||||
shell: echo "enable server myapplb/{{ inventory_hostname }}" | socat stdio /var/lib/haproxy/stats
|
||||
delegate_to: "{{ item }}"
|
||||
with_items: groups.lbservers
|
||||
with_items: "{{ groups.lbservers }}"
|
||||
|
||||
- name: re-enable nagios alerts
|
||||
nagios: action=enable_alerts host={{ inventory_hostname }} services=webserver
|
||||
delegate_to: "{{ item }}"
|
||||
with_items: groups.monitoring
|
||||
with_items: "{{ groups.monitoring }}"
|
||||
|
||||
Again, if you were using a Netscaler or F5 or Elastic Load Balancer, you would just substitute in the appropriate modules instead.
|
||||
|
||||
|
|
|
@ -532,7 +532,7 @@ One can use a regular ``with_items`` with the ``play_hosts`` or ``groups`` varia
|
|||
|
||||
# show all the hosts in the current play
|
||||
- debug: msg={{ item }}
|
||||
with_items: play_hosts
|
||||
with_items: "{{ play_hosts }}"
|
||||
|
||||
There is also a specific lookup plugin ``inventory_hostnames`` that can be used like this::
|
||||
|
||||
|
|
Loading…
Reference in a new issue