2023-05-03
ยง
|
16:27 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db2109', diff saved to https://phabricator.wikimedia.org/P47432 and previous config saved to /var/cache/conftool/dbconfig/20230503-162741-ladsgroup.json |
[production] |
16:25 |
<pt1979@cumin2002> |
START - Cookbook sre.hardware.upgrade-firmware upgrade firmware for hosts ['lvs2011'] |
[production] |
16:20 |
<pt1979@cumin2002> |
END (FAIL) - Cookbook sre.hardware.upgrade-firmware (exit_code=99) upgrade firmware for hosts ['lvs2011'] |
[production] |
16:20 |
<pt1979@cumin2002> |
START - Cookbook sre.hardware.upgrade-firmware upgrade firmware for hosts ['lvs2011'] |
[production] |
16:19 |
<pt1979@cumin2002> |
END (PASS) - Cookbook sre.hardware.upgrade-firmware (exit_code=0) upgrade firmware for hosts ['lvs2011'] |
[production] |
16:19 |
<pt1979@cumin2002> |
START - Cookbook sre.hardware.upgrade-firmware upgrade firmware for hosts ['lvs2011'] |
[production] |
16:19 |
<pt1979@cumin2002> |
END (FAIL) - Cookbook sre.hardware.upgrade-firmware (exit_code=99) upgrade firmware for hosts ['lvs2011'] |
[production] |
16:18 |
<pt1979@cumin2002> |
START - Cookbook sre.hardware.upgrade-firmware upgrade firmware for hosts ['lvs2011'] |
[production] |
16:15 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db2171:3316', diff saved to https://phabricator.wikimedia.org/P47431 and previous config saved to /var/cache/conftool/dbconfig/20230503-161545-ladsgroup.json |
[production] |
16:14 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db1174 (T335838)', diff saved to https://phabricator.wikimedia.org/P47430 and previous config saved to /var/cache/conftool/dbconfig/20230503-161402-ladsgroup.json |
[production] |
16:13 |
<pt1979@cumin2002> |
END (PASS) - Cookbook sre.hosts.provision (exit_code=0) for host lvs2011.mgmt.codfw.wmnet with reboot policy FORCED |
[production] |
16:12 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db2109 (T335838)', diff saved to https://phabricator.wikimedia.org/P47429 and previous config saved to /var/cache/conftool/dbconfig/20230503-161235-ladsgroup.json |
[production] |
16:08 |
<jbond@cumin2002> |
END (PASS) - Cookbook sre.hardware.upgrade-firmware (exit_code=0) upgrade firmware for hosts puppetmaster2001.codfw.wmnet |
[production] |
16:08 |
<jbond@cumin2002> |
START - Cookbook sre.hardware.upgrade-firmware upgrade firmware for hosts puppetmaster2001.codfw.wmnet |
[production] |
16:06 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Depooling db2109 (T335838)', diff saved to https://phabricator.wikimedia.org/P47428 and previous config saved to /var/cache/conftool/dbconfig/20230503-160601-ladsgroup.json |
[production] |
16:05 |
<ladsgroup@cumin1001> |
END (PASS) - Cookbook sre.hosts.downtime (exit_code=0) for 1 day, 0:00:00 on db2109.codfw.wmnet with reason: Maintenance |
[production] |
16:05 |
<ladsgroup@cumin1001> |
START - Cookbook sre.hosts.downtime for 1 day, 0:00:00 on db2109.codfw.wmnet with reason: Maintenance |
[production] |
16:01 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db2178 (T335838)', diff saved to https://phabricator.wikimedia.org/P47427 and previous config saved to /var/cache/conftool/dbconfig/20230503-160146-ladsgroup.json |
[production] |
16:00 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db2171:3316 (T335838)', diff saved to https://phabricator.wikimedia.org/P47426 and previous config saved to /var/cache/conftool/dbconfig/20230503-160039-ladsgroup.json |
[production] |
15:59 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Depooling db1174 (T335838)', diff saved to https://phabricator.wikimedia.org/P47425 and previous config saved to /var/cache/conftool/dbconfig/20230503-155946-ladsgroup.json |
[production] |
15:59 |
<ladsgroup@cumin1001> |
END (PASS) - Cookbook sre.hosts.downtime (exit_code=0) for 1 day, 0:00:00 on db1174.eqiad.wmnet with reason: Maintenance |
[production] |
15:59 |
<ladsgroup@cumin1001> |
START - Cookbook sre.hosts.downtime for 1 day, 0:00:00 on db1174.eqiad.wmnet with reason: Maintenance |
[production] |
15:55 |
<ladsgroup@cumin1001> |
END (PASS) - Cookbook sre.hosts.downtime (exit_code=0) for 1 day, 0:00:00 on db1171.eqiad.wmnet with reason: Maintenance |
[production] |
15:55 |
<ladsgroup@cumin1001> |
START - Cookbook sre.hosts.downtime for 1 day, 0:00:00 on db1171.eqiad.wmnet with reason: Maintenance |
[production] |
15:55 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db1170:3312 (T335838)', diff saved to https://phabricator.wikimedia.org/P47424 and previous config saved to /var/cache/conftool/dbconfig/20230503-155506-ladsgroup.json |
[production] |
15:52 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db2171:3315 (T335838)', diff saved to https://phabricator.wikimedia.org/P47423 and previous config saved to /var/cache/conftool/dbconfig/20230503-155221-ladsgroup.json |
[production] |
15:48 |
<eevans@cumin1001> |
START - Cookbook sre.cassandra.roll-restart for nodes matching restbase20[13-27].codfw.wmnet: Upgrade Cassandra โ T335383 - eevans@cumin1001 |
[production] |
15:46 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db2178', diff saved to https://phabricator.wikimedia.org/P47422 and previous config saved to /var/cache/conftool/dbconfig/20230503-154639-ladsgroup.json |
[production] |
15:42 |
<akosiaris@deploy1002> |
helmfile [staging] DONE helmfile.d/services/machinetranslation: apply |
[production] |
15:42 |
<jmm@cumin2002> |
END (PASS) - Cookbook sre.hosts.reboot-single (exit_code=0) for host failoid1002.eqiad.wmnet |
[production] |
15:41 |
<akosiaris@deploy1002> |
helmfile [staging] START helmfile.d/services/machinetranslation: apply |
[production] |
15:40 |
<pt1979@cumin2002> |
START - Cookbook sre.hosts.provision for host lvs2011.mgmt.codfw.wmnet with reboot policy FORCED |
[production] |
15:40 |
<akosiaris@deploy1002> |
helmfile [staging] DONE helmfile.d/services/machinetranslation: apply |
[production] |
15:40 |
<akosiaris@deploy1002> |
helmfile [staging] START helmfile.d/services/machinetranslation: apply |
[production] |
15:40 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db1170:3312', diff saved to https://phabricator.wikimedia.org/P47421 and previous config saved to /var/cache/conftool/dbconfig/20230503-154000-ladsgroup.json |
[production] |
15:38 |
<jmm@cumin2002> |
START - Cookbook sre.hosts.reboot-single for host failoid1002.eqiad.wmnet |
[production] |
15:38 |
<jmm@cumin2002> |
END (PASS) - Cookbook sre.hosts.reboot-single (exit_code=0) for host failoid2002.codfw.wmnet |
[production] |
15:38 |
<pt1979@cumin2002> |
END (PASS) - Cookbook sre.dns.netbox (exit_code=0) |
[production] |
15:38 |
<pt1979@cumin2002> |
END (PASS) - Cookbook sre.puppet.sync-netbox-hiera (exit_code=0) generate netbox hiera data: "Triggered by cookbooks.sre.dns.netbox: Add DNS entries for lvs2011 - pt1979@cumin2002" |
[production] |
15:37 |
<jbond@cumin1001> |
END (FAIL) - Cookbook sre.hardware.upgrade-firmware (exit_code=99) upgrade firmware for hosts puppetmaster1002.eqiad.wmnet |
[production] |
15:37 |
<jbond@cumin1001> |
START - Cookbook sre.hardware.upgrade-firmware upgrade firmware for hosts puppetmaster1002.eqiad.wmnet |
[production] |
15:37 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db2171:3315', diff saved to https://phabricator.wikimedia.org/P47420 and previous config saved to /var/cache/conftool/dbconfig/20230503-153715-ladsgroup.json |
[production] |
15:36 |
<pt1979@cumin2002> |
START - Cookbook sre.puppet.sync-netbox-hiera generate netbox hiera data: "Triggered by cookbooks.sre.dns.netbox: Add DNS entries for lvs2011 - pt1979@cumin2002" |
[production] |
15:34 |
<pt1979@cumin2002> |
START - Cookbook sre.dns.netbox |
[production] |
15:34 |
<eevans@cumin1001> |
END (PASS) - Cookbook sre.cassandra.roll-restart (exit_code=0) for nodes matching restbase1016.eqiad.wmnet: Upgrade Cassandra โ T335383 - eevans@cumin1001 |
[production] |
15:34 |
<jmm@cumin2002> |
START - Cookbook sre.hosts.reboot-single for host failoid2002.codfw.wmnet |
[production] |
15:31 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db2178', diff saved to https://phabricator.wikimedia.org/P47419 and previous config saved to /var/cache/conftool/dbconfig/20230503-153133-ladsgroup.json |
[production] |
15:29 |
<elukey@deploy1002> |
helmfile [ml-staging-codfw] 'sync' command on namespace 'ores-legacy' for release 'main' . |
[production] |
15:24 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db1170:3312', diff saved to https://phabricator.wikimedia.org/P47418 and previous config saved to /var/cache/conftool/dbconfig/20230503-152453-ladsgroup.json |
[production] |
15:24 |
<eevans@cumin1001> |
START - Cookbook sre.cassandra.roll-restart for nodes matching restbase1016.eqiad.wmnet: Upgrade Cassandra โ T335383 - eevans@cumin1001 |
[production] |