2024-05-29
ยง
|
15:18 |
<cdanis@deploy1002> |
helmfile [codfw] START helmfile.d/services/mw-api-int: sync |
[production] |
15:18 |
<cdanis@deploy1002> |
helmfile [codfw] START helmfile.d/services/mw-wikifunctions: sync |
[production] |
15:18 |
<cdanis@deploy1002> |
helmfile [codfw] START helmfile.d/services/mw-debug: sync |
[production] |
15:17 |
<robh@cumin2002> |
END (FAIL) - Cookbook sre.hardware.upgrade-firmware (exit_code=99) upgrade firmware for hosts ['cloudvirt1041'] |
[production] |
15:17 |
<robh@cumin2002> |
START - Cookbook sre.hardware.upgrade-firmware upgrade firmware for hosts ['cloudvirt1041'] |
[production] |
15:14 |
<arnaudb@cumin1002> |
dbctl commit (dc=all): 'db1163 (re)pooling @ 10%: post reimage repool', diff saved to https://phabricator.wikimedia.org/P63560 and previous config saved to /var/cache/conftool/dbconfig/20240529-151455-arnaudb.json |
[production] |
15:14 |
<marostegui@cumin1002> |
dbctl commit (dc=all): 'Repooling after maintenance db1181 (T366123)', diff saved to https://phabricator.wikimedia.org/P63559 and previous config saved to /var/cache/conftool/dbconfig/20240529-151430-marostegui.json |
[production] |
15:12 |
<marostegui@cumin1002> |
dbctl commit (dc=all): 'Depooling db1181 (T366123)', diff saved to https://phabricator.wikimedia.org/P63558 and previous config saved to /var/cache/conftool/dbconfig/20240529-151219-marostegui.json |
[production] |
15:12 |
<marostegui@cumin1002> |
END (PASS) - Cookbook sre.hosts.downtime (exit_code=0) for 8:00:00 on db1181.eqiad.wmnet with reason: Maintenance |
[production] |
15:11 |
<arnaudb@cumin1002> |
dbctl commit (dc=all): 'db1169 (re)pooling @ 100%: post reimage repool', diff saved to https://phabricator.wikimedia.org/P63557 and previous config saved to /var/cache/conftool/dbconfig/20240529-151152-arnaudb.json |
[production] |
15:11 |
<marostegui@cumin1002> |
START - Cookbook sre.hosts.downtime for 8:00:00 on db1181.eqiad.wmnet with reason: Maintenance |
[production] |
15:11 |
<marostegui@cumin1002> |
dbctl commit (dc=all): 'Repooling after maintenance db1174 (T366123)', diff saved to https://phabricator.wikimedia.org/P63556 and previous config saved to /var/cache/conftool/dbconfig/20240529-151145-marostegui.json |
[production] |
15:09 |
<jclark@cumin1002> |
START - Cookbook sre.hosts.reimage for host kafka-main1009.eqiad.wmnet with OS bullseye |
[production] |
15:08 |
<arnaudb@cumin1002> |
END (PASS) - Cookbook sre.hosts.reimage (exit_code=0) for host db1163.eqiad.wmnet with OS bookworm |
[production] |
15:08 |
<jclark@cumin1002> |
END (FAIL) - Cookbook sre.hosts.provision (exit_code=99) for host kafka-main1009.mgmt.eqiad.wmnet with reboot policy FORCED |
[production] |
15:07 |
<marostegui@cumin1002> |
dbctl commit (dc=all): 'Repooling after maintenance db2150 (T364299)', diff saved to https://phabricator.wikimedia.org/P63555 and previous config saved to /var/cache/conftool/dbconfig/20240529-150757-marostegui.json |
[production] |
15:07 |
<jclark@cumin1002> |
START - Cookbook sre.hosts.provision for host kafka-main1009.mgmt.eqiad.wmnet with reboot policy FORCED |
[production] |
15:07 |
<jiji@cumin2002> |
END (PASS) - Cookbook sre.hosts.reimage (exit_code=0) for host mc2046.codfw.wmnet with OS bookworm |
[production] |
15:07 |
<jclark@cumin1002> |
END (FAIL) - Cookbook sre.hosts.provision (exit_code=99) for host kafka-main1009.mgmt.eqiad.wmnet with reboot policy FORCED |
[production] |
15:06 |
<jclark@cumin1002> |
START - Cookbook sre.hosts.provision for host kafka-main1009.mgmt.eqiad.wmnet with reboot policy FORCED |
[production] |
15:06 |
<robh@cumin2002> |
END (ERROR) - Cookbook sre.hardware.upgrade-firmware (exit_code=97) upgrade firmware for hosts ['cloudvirt1041'] |
[production] |
15:05 |
<robh@cumin2002> |
START - Cookbook sre.hardware.upgrade-firmware upgrade firmware for hosts ['cloudvirt1041'] |
[production] |
15:05 |
<andrew@cumin1002> |
END (FAIL) - Cookbook sre.hardware.upgrade-firmware (exit_code=99) upgrade firmware for hosts ['cloudvirt1041'] |
[production] |
15:05 |
<robh@cumin2002> |
END (ERROR) - Cookbook sre.hardware.upgrade-firmware (exit_code=97) upgrade firmware for hosts ['cloudvirt1041'] |
[production] |
15:04 |
<robh@cumin2002> |
START - Cookbook sre.hardware.upgrade-firmware upgrade firmware for hosts ['cloudvirt1041'] |
[production] |
14:58 |
<jiji@cumin1002> |
END (PASS) - Cookbook sre.hosts.reimage (exit_code=0) for host mc1046.eqiad.wmnet with OS bookworm |
[production] |
14:56 |
<arnaudb@cumin1002> |
dbctl commit (dc=all): 'db1169 (re)pooling @ 75%: post reimage repool', diff saved to https://phabricator.wikimedia.org/P63554 and previous config saved to /var/cache/conftool/dbconfig/20240529-145646-arnaudb.json |
[production] |
14:56 |
<marostegui@cumin1002> |
dbctl commit (dc=all): 'Repooling after maintenance db1174', diff saved to https://phabricator.wikimedia.org/P63553 and previous config saved to /var/cache/conftool/dbconfig/20240529-145637-marostegui.json |
[production] |
14:56 |
<cdanis@deploy1002> |
helmfile [codfw] DONE helmfile.d/services/mw-api-int: sync |
[production] |
14:54 |
<cdanis@deploy1002> |
helmfile [codfw] START helmfile.d/services/mw-api-int: sync |
[production] |
14:54 |
<jclark@cumin1002> |
START - Cookbook sre.hosts.provision for host kafka-main1010.mgmt.eqiad.wmnet with reboot policy FORCED |
[production] |
14:53 |
<andrew@cumin1002> |
START - Cookbook sre.hardware.upgrade-firmware upgrade firmware for hosts ['cloudvirt1041'] |
[production] |
14:52 |
<klausman@deploy1002> |
helmfile [ml-serve-eqiad] DONE helmfile.d/admin 'apply'. |
[production] |
14:52 |
<klausman@deploy1002> |
helmfile [ml-serve-eqiad] START helmfile.d/admin 'apply'. |
[production] |
14:50 |
<klausman@deploy1002> |
helmfile [ml-serve-codfw] DONE helmfile.d/admin 'apply'. |
[production] |
14:50 |
<jclark@cumin1002> |
END (FAIL) - Cookbook sre.hosts.provision (exit_code=99) for host kafka-main1010.mgmt.eqiad.wmnet with reboot policy FORCED |
[production] |
14:49 |
<klausman@deploy1002> |
helmfile [ml-serve-codfw] START helmfile.d/admin 'apply'. |
[production] |
14:49 |
<jiji@cumin2002> |
END (PASS) - Cookbook sre.hosts.downtime (exit_code=0) for 2:00:00 on mc2046.codfw.wmnet with reason: host reimage |
[production] |
14:47 |
<jclark@cumin1002> |
START - Cookbook sre.hosts.provision for host kafka-main1010.mgmt.eqiad.wmnet with reboot policy FORCED |
[production] |
14:47 |
<arnaudb@cumin1002> |
END (PASS) - Cookbook sre.hosts.downtime (exit_code=0) for 2:00:00 on db1163.eqiad.wmnet with reason: host reimage |
[production] |
14:47 |
<jclark@cumin1002> |
END (FAIL) - Cookbook sre.hosts.provision (exit_code=99) for host kafka-main1010.mgmt.eqiad.wmnet with reboot policy FORCED |
[production] |
14:45 |
<jclark@cumin1002> |
START - Cookbook sre.hosts.provision for host kafka-main1010.mgmt.eqiad.wmnet with reboot policy FORCED |
[production] |
14:45 |
<jclark@cumin1002> |
END (FAIL) - Cookbook sre.hosts.provision (exit_code=99) for host kafka-main1010.mgmt.eqiad.wmnet with reboot policy FORCED |
[production] |
14:44 |
<jiji@cumin2002> |
START - Cookbook sre.hosts.downtime for 2:00:00 on mc2046.codfw.wmnet with reason: host reimage |
[production] |
14:44 |
<mvernon@cumin2002> |
END (PASS) - Cookbook sre.dns.netbox (exit_code=0) |
[production] |
14:44 |
<mvernon@cumin2002> |
END (PASS) - Cookbook sre.puppet.sync-netbox-hiera (exit_code=0) generate netbox hiera data: "Triggered by cookbooks.sre.dns.netbox: Discovery IPs for apus service - mvernon@cumin2002" |
[production] |
14:43 |
<jclark@cumin1002> |
START - Cookbook sre.hosts.provision for host kafka-main1010.mgmt.eqiad.wmnet with reboot policy FORCED |
[production] |
14:43 |
<arnaudb@cumin1002> |
START - Cookbook sre.hosts.downtime for 2:00:00 on db1163.eqiad.wmnet with reason: host reimage |
[production] |
14:43 |
<mvernon@cumin2002> |
START - Cookbook sre.puppet.sync-netbox-hiera generate netbox hiera data: "Triggered by cookbooks.sre.dns.netbox: Discovery IPs for apus service - mvernon@cumin2002" |
[production] |
14:42 |
<marostegui@cumin1002> |
END (PASS) - Cookbook sre.hosts.downtime (exit_code=0) for 1 day, 0:00:00 on db2139.codfw.wmnet with reason: Maintenance |
[production] |