blob: 0e26579f2276c689a31ae5465333d4bc20499ca6 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
|
---
- hosts: all
tasks:
- name: Set the logs root
set_fact:
logs_root: "{{ ansible_user_dir }}/metal3-logs"
- name: Set log locations and containers
set_fact:
logs_before_pivoting: "{{ logs_root }}/before_pivoting"
logs_after_pivoting: "{{ logs_root }}/after_pivoting"
logs_management_cluster: "{{ logs_root }}/management_cluster"
containers:
- dnsmasq
- httpd-infra
- ironic
- ironic-endpoint-keepalived
- ironic-inspector
- ironic-log-watch
- registry
- sushy-tools
- vbmc
namespaces:
- baremetal-operator-system
- capi-system
- metal3
- name: Create log locations
file:
path: "{{ item }}"
state: directory
loop:
- "{{ logs_before_pivoting }}"
- "{{ logs_after_pivoting }}"
- "{{ logs_management_cluster }}"
- "{{ logs_root }}/libvirt"
- "{{ logs_root }}/system"
- name: Check if the logs before pivoting were stored
stat:
path: /tmp/docker
register: before_pivoting_result
- name: Copy logs before pivoting
copy:
src: /tmp/docker/
dest: "{{ logs_before_pivoting }}/"
remote_src: true
when: before_pivoting_result.stat.exists
- name: Set log location for containers (pivoting happened)
set_fact:
container_logs: "{{ logs_after_pivoting }}"
when: before_pivoting_result.stat.exists
- name: Set log location for containers (no pivoting)
set_fact:
container_logs: "{{ logs_before_pivoting }}"
when: not before_pivoting_result.stat.exists
- name: Fetch current container logs
shell: >
docker logs "{{ item }}" > "{{ container_logs }}/{{ item }}.log" 2>&1
become: true
ignore_errors: true
loop: "{{ containers }}"
- name: Fetch libvirt networks
shell: >
virsh net-dumpxml "{{ item }}" > "{{ logs_root }}/libvirt/net-{{ item }}.xml"
become: true
ignore_errors: true
loop:
- baremetal
- provisioning
- name: Fetch libvirt VMs
shell: |
for vm in $(virsh list --name --all); do
virsh dumpxml "$vm" > "{{ logs_root }}/libvirt/vm-$vm.xml"
done
become: true
ignore_errors: true
- name: Fetch system information
shell: "{{ item }} > {{ logs_root }}/system/{{ item | replace(' ', '-') }}.txt"
become: true
ignore_errors: true
loop:
- dmesg
- dpkg -l
- ip addr
- ip route
- iptables -L -v -n
- journalctl -b -o with-unit
- journalctl -u libvirtd
- pip freeze
- docker images
- docker ps --all
- systemctl
- name: Copy libvirt logs
copy:
src: /var/log/libvirt/qemu/
dest: "{{ logs_root }}/libvirt/"
remote_src: true
become: true
- name: Check if we have a cluster
command: kubectl cluster-info
ignore_errors: true
register: kubectl_result
- include_tasks: fetch_kube_logs.yaml
loop: "{{ namespaces }}"
loop_control:
loop_var: namespace
when: kubectl_result is succeeded
- name: Collect kubernetes resources
shell: |
kubectl get "{{ item }}" -A -o yaml > "{{ logs_management_cluster }}/{{ item }}.yaml"
loop:
- baremetalhosts
- clusters
- endpoints
- hostfirmwaresettings
- machines
- metal3ipaddresses
- metal3ippools
- metal3machines
- nodes
- pods
- preprovisioningimages
- services
ignore_errors: true
when: kubectl_result is succeeded
# FIXME(dtantsur): this is horrible, do something about it
- name: Fetch kubelet status logs from the master user metal3
shell: |
ssh -vvv -o StrictHostKeyChecking=accept-new metal3@192.168.111.100 "sudo systemctl status kubelet" > "{{ logs_root }}/kubelet-0-metal3-status.log"
ignore_errors: true
register: kubelet0metal3status
- debug:
var: kubelet0metal3status.stdout_lines
- debug:
var: kubelet0metal3status.stderr_lines
- name: Fetch kubelet journal logs from the master user metal3
shell: |
ssh -vvv -o StrictHostKeyChecking=accept-new metal3@192.168.111.100 "sudo journalctl -xeu kubelet" > "{{ logs_root }}/kubelet-0-metal3-journal.log"
ignore_errors: true
register: kubelet0metal3journal
- debug:
var: kubelet0metal3journal.stdout_lines
- debug:
var: kubelet0metal3journal.stderr_lines
- name: Fetch kubelet status logs from the master user zuul
shell: |
ssh -vvv -o StrictHostKeyChecking=accept-new zuul@192.168.111.100 "sudo systemctl status kubelet" > "{{ logs_root }}/kubelet-0-zuul-status.log"
ignore_errors: true
register: kubelet0zuulstatus
- debug:
var: kubelet0zuulstatus.stdout_lines
- debug:
var: kubelet0zuulstatus.stderr_lines
- name: Fetch kubelet journal logs from the master user zuul
shell: |
ssh -vvv -o StrictHostKeyChecking=accept-new zuul@192.168.111.100 "sudo journalctl -xeu kubelet" > "{{ logs_root }}/kubelet-0-zuul-journal.log"
ignore_errors: true
register: kubelet0zuuljournal
- debug:
var: kubelet0zuuljournal.stdout_lines
- debug:
var: kubelet0zuuljournal.stderr_lines
# # #
- name: Copy logs to the zuul location
synchronize:
src: "{{ logs_root }}/"
dest: "{{ zuul.executor.log_root }}/{{ inventory_hostname }}/"
mode: pull
become: true
|