2023-11-14
ยง
|
15:39 |
<jclark@cumin1001> |
END (FAIL) - Cookbook sre.hardware.upgrade-firmware (exit_code=99) upgrade firmware for hosts ['cloudvirt1043'] |
[production] |
15:39 |
<jclark@cumin1001> |
END (FAIL) - Cookbook sre.hardware.upgrade-firmware (exit_code=99) upgrade firmware for hosts ['cloudvirt1044'] |
[production] |
15:39 |
<jclark@cumin1001> |
START - Cookbook sre.hardware.upgrade-firmware upgrade firmware for hosts ['cloudvirt1043'] |
[production] |
15:39 |
<jclark@cumin1001> |
END (FAIL) - Cookbook sre.hardware.upgrade-firmware (exit_code=99) upgrade firmware for hosts ['cloudvirt1046'] |
[production] |
15:39 |
<jclark@cumin1001> |
START - Cookbook sre.hardware.upgrade-firmware upgrade firmware for hosts ['cloudvirt1044'] |
[production] |
15:38 |
<jclark@cumin1001> |
END (FAIL) - Cookbook sre.hardware.upgrade-firmware (exit_code=99) upgrade firmware for hosts ['cloudvirt1043'] |
[production] |
15:38 |
<jclark@cumin1001> |
END (FAIL) - Cookbook sre.hardware.upgrade-firmware (exit_code=99) upgrade firmware for hosts ['cloudvirt1044'] |
[production] |
15:37 |
<jclark@cumin1001> |
START - Cookbook sre.hardware.upgrade-firmware upgrade firmware for hosts ['cloudvirt1043'] |
[production] |
15:37 |
<jclark@cumin1001> |
END (FAIL) - Cookbook sre.hardware.upgrade-firmware (exit_code=99) upgrade firmware for hosts ['cloudvirt1043'] |
[production] |
15:35 |
<jclark@cumin1001> |
START - Cookbook sre.hardware.upgrade-firmware upgrade firmware for hosts ['cloudvirt1044'] |
[production] |
15:35 |
<jclark@cumin1001> |
END (FAIL) - Cookbook sre.hardware.upgrade-firmware (exit_code=99) upgrade firmware for hosts ['cloudvirt1044'] |
[production] |
15:34 |
<jmm@cumin2002> |
START - Cookbook sre.puppet.migrate-host for host vrts1002.eqiad.wmnet |
[production] |
15:33 |
<arnaudb@cumin1001> |
dbctl commit (dc=all): 'db1127 (re)pooling @ 100%: Host failed to be depooled properly', diff saved to https://phabricator.wikimedia.org/P53432 and previous config saved to /var/cache/conftool/dbconfig/20231114-153355-arnaudb.json |
[production] |
15:33 |
<arnaudb@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db1236', diff saved to https://phabricator.wikimedia.org/P53431 and previous config saved to /var/cache/conftool/dbconfig/20231114-153344-arnaudb.json |
[production] |
15:32 |
<oblivian@deploy2002> |
helmfile [codfw] DONE helmfile.d/services/mobileapps: apply |
[production] |
15:32 |
<oblivian@deploy2002> |
helmfile [codfw] START helmfile.d/services/mobileapps: apply |
[production] |
15:29 |
<oblivian@deploy2002> |
helmfile [eqiad] DONE helmfile.d/services/mobileapps: apply |
[production] |
15:29 |
<oblivian@deploy2002> |
helmfile [eqiad] START helmfile.d/services/mobileapps: apply |
[production] |
15:28 |
<jclark@cumin1001> |
START - Cookbook sre.hardware.upgrade-firmware upgrade firmware for hosts ['cloudvirt1043'] |
[production] |
15:28 |
<jclark@cumin1001> |
END (FAIL) - Cookbook sre.hardware.upgrade-firmware (exit_code=99) upgrade firmware for hosts ['cloudvirt1043'] |
[production] |
15:26 |
<jclark@cumin1001> |
START - Cookbook sre.hardware.upgrade-firmware upgrade firmware for hosts ['cloudvirt1044'] |
[production] |
15:25 |
<jclark@cumin1001> |
END (FAIL) - Cookbook sre.hardware.upgrade-firmware (exit_code=99) upgrade firmware for hosts ['cloudvirt1044'] |
[production] |
15:23 |
<elukey@deploy2002> |
helmfile [eqiad] DONE helmfile.d/services/changeprop-jobqueue: sync |
[production] |
15:23 |
<jmm@cumin2002> |
END (PASS) - Cookbook sre.puppet.migrate-role (exit_code=0) for role: mariadb::analytics_replica |
[production] |
15:22 |
<elukey@deploy2002> |
helmfile [eqiad] START helmfile.d/services/changeprop-jobqueue: sync |
[production] |
15:22 |
<jclark@cumin1001> |
START - Cookbook sre.hardware.upgrade-firmware upgrade firmware for hosts ['cloudvirt1046'] |
[production] |
15:21 |
<btullis@cumin1001> |
END (PASS) - Cookbook sre.presto.roll-restart-workers (exit_code=0) for Presto analytics cluster: Roll restart of all Presto's jvm daemons. |
[production] |
15:20 |
<jclark@cumin1001> |
START - Cookbook sre.hardware.upgrade-firmware upgrade firmware for hosts ['cloudvirt1043'] |
[production] |
15:18 |
<arnaudb@cumin1001> |
dbctl commit (dc=all): 'db1127 (re)pooling @ 90%: Host failed to be depooled properly', diff saved to https://phabricator.wikimedia.org/P53430 and previous config saved to /var/cache/conftool/dbconfig/20231114-151850-arnaudb.json |
[production] |
15:18 |
<jclark@cumin1001> |
START - Cookbook sre.hardware.upgrade-firmware upgrade firmware for hosts ['cloudvirt1044'] |
[production] |
15:17 |
<jclark@cumin1001> |
END (PASS) - Cookbook sre.hardware.upgrade-firmware (exit_code=0) upgrade firmware for hosts ['cloudrabbit1003'] |
[production] |
15:17 |
<jclark@cumin1001> |
START - Cookbook sre.hardware.upgrade-firmware upgrade firmware for hosts ['cloudrabbit1003'] |
[production] |
15:17 |
<jclark@cumin1001> |
END (PASS) - Cookbook sre.hardware.upgrade-firmware (exit_code=0) upgrade firmware for hosts ['cloudrabbit1003'] |
[production] |
15:16 |
<jclark@cumin1001> |
START - Cookbook sre.hardware.upgrade-firmware upgrade firmware for hosts ['cloudrabbit1003'] |
[production] |
15:16 |
<jclark@cumin1001> |
END (PASS) - Cookbook sre.hardware.upgrade-firmware (exit_code=0) upgrade firmware for hosts ['cloudrabbit1003'] |
[production] |
15:16 |
<jclark@cumin1001> |
START - Cookbook sre.hardware.upgrade-firmware upgrade firmware for hosts ['cloudrabbit1003'] |
[production] |
15:16 |
<arnaudb@cumin1001> |
dbctl commit (dc=all): 'Depooling db1236 (T348183)', diff saved to https://phabricator.wikimedia.org/P53428 and previous config saved to /var/cache/conftool/dbconfig/20231114-151602-arnaudb.json |
[production] |
15:15 |
<arnaudb@cumin1001> |
END (PASS) - Cookbook sre.hosts.downtime (exit_code=0) for 1 day, 0:00:00 on db1236.eqiad.wmnet with reason: Maintenance |
[production] |
15:15 |
<arnaudb@cumin1001> |
START - Cookbook sre.hosts.downtime for 1 day, 0:00:00 on db1236.eqiad.wmnet with reason: Maintenance |
[production] |
15:15 |
<arnaudb@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db1227 (T348183)', diff saved to https://phabricator.wikimedia.org/P53427 and previous config saved to /var/cache/conftool/dbconfig/20231114-151541-arnaudb.json |
[production] |
15:10 |
<jmm@cumin2002> |
START - Cookbook sre.puppet.migrate-role for role: mariadb::analytics_replica |
[production] |
15:03 |
<arnaudb@cumin1001> |
dbctl commit (dc=all): 'db1127 (re)pooling @ 75%: Host failed to be depooled properly', diff saved to https://phabricator.wikimedia.org/P53426 and previous config saved to /var/cache/conftool/dbconfig/20231114-150345-arnaudb.json |
[production] |
15:00 |
<arnaudb@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db1227', diff saved to https://phabricator.wikimedia.org/P53425 and previous config saved to /var/cache/conftool/dbconfig/20231114-150034-arnaudb.json |
[production] |
14:58 |
<jbond@cumin1001> |
END (PASS) - Cookbook sre.puppet.migrate-role (exit_code=0) for role: wmcs::openstack::codfw1dev::backups |
[production] |
14:53 |
<kamila@deploy2002> |
helmfile [staging-eqiad] DONE helmfile.d/admin 'apply'. |
[production] |
14:52 |
<kamila@deploy2002> |
helmfile [staging-eqiad] START helmfile.d/admin 'apply'. |
[production] |
14:52 |
<kamila@deploy2002> |
helmfile [codfw] DONE helmfile.d/admin 'apply'. |
[production] |
14:51 |
<kamila@deploy2002> |
helmfile [codfw] START helmfile.d/admin 'apply'. |
[production] |
14:50 |
<btullis@cumin1001> |
START - Cookbook sre.presto.roll-restart-workers for Presto analytics cluster: Roll restart of all Presto's jvm daemons. |
[production] |
14:50 |
<jbond@cumin1001> |
START - Cookbook sre.puppet.migrate-role for role: wmcs::openstack::codfw1dev::backups |
[production] |