forked from johnsimcall/storage-install
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathceph-make-vms.yml
92 lines (87 loc) · 4.08 KB
/
ceph-make-vms.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
# file: ceph-make-vms.yml
#
# prerequisites:
# $ sudo yum --enablerepo=rhel-7-server-rhv-4.1-rpms install python-ovirt-engine-sdk4
#
# vm definitions / assumptions:
# All VMs will have two NICs
#
# execution:
# $ ansible-playbook ceph-make-vms.yml --ask-vault-pass
---
- hosts: localhost
gather_facts: yes
vars_files:
- "{{ ansible_domain }}/rhvm-vault.yml"
- "{{ ansible_domain }}/ceph-vars.yml"
tasks:
- name: Login to RHV
ovirt_auth:
url: "{{ rhvurl }}"
insecure: yes
username: "{{ rhvuser }}"
password: "{{ rhvpass }}"
- name: Create VMs
ovirt_vms:
auth: "{{ ovirt_auth }}"
cluster: "{{ rhvCluster }}"
template: "{{ templateName }}"
name: "{{ item.key }}"
state: present
instance_type: "{{ instanceType }}"
nics:
- name: nic1
profile_name: ovirtmgmt
- name: nic2
profile_name: ovirtmgmt
cloud_init:
host_name: "{{ item.key }}.{{ ansible_domain }}"
user_name: root
root_password: "{{ vm_root_password }}"
authorized_ssh_keys: |
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC1vfRWODjNpS45BwAD864B1ezqIOb5fxCjnOwsgx/p8QchwWQhIUeC+PAUBI/QJ/nzX2pUOx0erNu8wuQRobOMxmYmsIyeuIIkBSJxffeFm4Cuy0glgkNR5ry90AHabO7bvXoY1q2QJ6sdkMeh2r1b/tx+2KFBOpe6v2HAcBpS+srl/fpdOV0GK2HNC0DsHz/2/me2hQ9gNIBOrb/Y7TJL5GcmbbHeteW4g648w1771El6r+JugmwlJ7/B/Jw5b4StpJmYQq0s1x9TJnqXO1kZGZ1YDrUEF25ZxkLZxmLxwD8hUDVbaeCAjbNQCe5OXc3wl96VvwXsPE2d838DBUTL [email protected]
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDP1SXmgAxJDfsZ15OfjiE1nzZa2Nh/B5J67KqXoJRlWUVLxaUw52tlbISSYdmpXmVb84huiIz+W5XcOrtK8lFB8O2seW50nevBRFuMnwQ8kgsHnvQ9TJjzCCngLlQjFrxH3g4oNUvSvv5hgd0j8dGA3NSJN7O0VMMQgWQK+Bt1YYxkwxsQ8NPnSixNevLj/bQvv3WRoekfu1c0LvL5IdY3yhpQ1no84jCXR4CD2ZA7msxzvfxgJ1/IFD0m8EDKRKmbg4RtgfTYB+fwvrTw1l/DoSlUtEdvrBx8T+1s0FCz6qjNfpvCmubAUR/2gWIu49s7q8sOmjRGBw9DL1hVdHUN mflanner@t460s
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC47Ph1jxl8GtvUuQRigjgOoVFOkWUQCKtC4486KpEGJurAzyoMNJ0x9KvvMHOOkd+fli1tyk60DQUtBiBCrXv+fz7Q9myEeI1sEkZ7iMOkwEQqFj7eSmlPwnm07+lHlBs8OnUBd1Tw9KHzux2KnzRNRmq1z9vLUJshe0jbsHoUn+X8CyBurS0J1SwL4hopyDTjMvYhoWc9sT9MyEw1IZ962xCeZ/ey3/0uWIeCgKZA9lQWSGh62ZTux9Oc/8Sa0KO1Faog0Qko+4VamjkOr6AghmJJXfJlF4HAZCmwhrkSWLuBl7pcP2Lc6B5wNh2XVZdLngaQNCeasdPXqCXxkAhX [email protected]
#TODO:dns stuff below doesn't work, using custom_script and runcmd as workaround
dns_servers: "{{ dnsServer }}"
dns_search: "{{ ansible_domain }}"
custom_script: |
runcmd:
- hostnamectl set-hostname {{ item.key }}.{{ ansible_domain }}
- nmcli con modify "System eth0" connection.id eth0 ipv4.dns {{ dnsServer }} ipv4.dns-search {{ ansible_domain }}
- nmcli con modify "System eth1" connection.id eth1
- nmcli net off
- nmcli net on
- yum -y remove cloud-init
cloud_init_nics: # Requires "nmcli net off/on" to apply
- nic_name: eth0
nic_on_boot: true
nic_boot_protocol: static
nic_ip_address: "{{ item.value.nic1.ipaddress }}"
nic_netmask: "{{ item.value.nic1.netmask }}"
nic_gateway: "{{ item.value.nic1.gateway }}"
- nic_name: eth1
nic_on_boot: true
nic_boot_protocol: static
nic_ip_address: "{{ item.value.nic2.ipaddress }}"
nic_netmask: "{{ item.value.nic2.netmask }}"
nic_gateway: "{{ item.value.nic2.gateway }}"
wait: true
with_dict: "{{ vms }}" # See ceph-make-vms-vars.yml
- name: Create and attach disks
ovirt_disks:
auth: "{{ ovirt_auth }}"
vm_name: "{{ item.0.name }}"
storage_domain: "{{ storageDomain }}"
name: "{{ item.0.name }}-disk-{{ item.1 }}"
size: "{{ disks_size }}"
format: cow
interface: virtio_scsi
wait: true
with_subelements:
- "{{ disks }}"
- id
- name: Cleanup RHV auth token
ovirt_auth:
ovirt_auth: "{{ ovirt_auth }}"
state: absent