2023-05-03
§
|
15:40 |
<akosiaris@deploy1002> |
helmfile [staging] START helmfile.d/services/machinetranslation: apply |
[production] |
15:40 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db1170:3312', diff saved to https://phabricator.wikimedia.org/P47421 and previous config saved to /var/cache/conftool/dbconfig/20230503-154000-ladsgroup.json |
[production] |
15:38 |
<jmm@cumin2002> |
START - Cookbook sre.hosts.reboot-single for host failoid1002.eqiad.wmnet |
[production] |
15:38 |
<jmm@cumin2002> |
END (PASS) - Cookbook sre.hosts.reboot-single (exit_code=0) for host failoid2002.codfw.wmnet |
[production] |
15:38 |
<pt1979@cumin2002> |
END (PASS) - Cookbook sre.dns.netbox (exit_code=0) |
[production] |
15:38 |
<pt1979@cumin2002> |
END (PASS) - Cookbook sre.puppet.sync-netbox-hiera (exit_code=0) generate netbox hiera data: "Triggered by cookbooks.sre.dns.netbox: Add DNS entries for lvs2011 - pt1979@cumin2002" |
[production] |
15:37 |
<jbond@cumin1001> |
END (FAIL) - Cookbook sre.hardware.upgrade-firmware (exit_code=99) upgrade firmware for hosts puppetmaster1002.eqiad.wmnet |
[production] |
15:37 |
<jbond@cumin1001> |
START - Cookbook sre.hardware.upgrade-firmware upgrade firmware for hosts puppetmaster1002.eqiad.wmnet |
[production] |
15:37 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db2171:3315', diff saved to https://phabricator.wikimedia.org/P47420 and previous config saved to /var/cache/conftool/dbconfig/20230503-153715-ladsgroup.json |
[production] |
15:36 |
<pt1979@cumin2002> |
START - Cookbook sre.puppet.sync-netbox-hiera generate netbox hiera data: "Triggered by cookbooks.sre.dns.netbox: Add DNS entries for lvs2011 - pt1979@cumin2002" |
[production] |
15:34 |
<pt1979@cumin2002> |
START - Cookbook sre.dns.netbox |
[production] |
15:34 |
<eevans@cumin1001> |
END (PASS) - Cookbook sre.cassandra.roll-restart (exit_code=0) for nodes matching restbase1016.eqiad.wmnet: Upgrade Cassandra — T335383 - eevans@cumin1001 |
[production] |
15:34 |
<jmm@cumin2002> |
START - Cookbook sre.hosts.reboot-single for host failoid2002.codfw.wmnet |
[production] |
15:31 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db2178', diff saved to https://phabricator.wikimedia.org/P47419 and previous config saved to /var/cache/conftool/dbconfig/20230503-153133-ladsgroup.json |
[production] |
15:29 |
<elukey@deploy1002> |
helmfile [ml-staging-codfw] 'sync' command on namespace 'ores-legacy' for release 'main' . |
[production] |
15:24 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db1170:3312', diff saved to https://phabricator.wikimedia.org/P47418 and previous config saved to /var/cache/conftool/dbconfig/20230503-152453-ladsgroup.json |
[production] |
15:24 |
<eevans@cumin1001> |
START - Cookbook sre.cassandra.roll-restart for nodes matching restbase1016.eqiad.wmnet: Upgrade Cassandra — T335383 - eevans@cumin1001 |
[production] |
15:22 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db2171:3315', diff saved to https://phabricator.wikimedia.org/P47417 and previous config saved to /var/cache/conftool/dbconfig/20230503-152208-ladsgroup.json |
[production] |
15:17 |
<eevans@cumin1001> |
END (PASS) - Cookbook sre.cassandra.roll-restart (exit_code=0) for nodes matching restbase1016.eqiad.wmnet: Upgrade Cassandra — T335383 - eevans@cumin1001 |
[production] |
15:16 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db2178 (T335838)', diff saved to https://phabricator.wikimedia.org/P47416 and previous config saved to /var/cache/conftool/dbconfig/20230503-151627-ladsgroup.json |
[production] |
15:10 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Depooling db2178 (T335838)', diff saved to https://phabricator.wikimedia.org/P47415 and previous config saved to /var/cache/conftool/dbconfig/20230503-151013-ladsgroup.json |
[production] |
15:10 |
<ladsgroup@cumin1001> |
END (PASS) - Cookbook sre.hosts.downtime (exit_code=0) for 1 day, 0:00:00 on db2178.codfw.wmnet with reason: Maintenance |
[production] |
15:09 |
<ladsgroup@cumin1001> |
START - Cookbook sre.hosts.downtime for 1 day, 0:00:00 on db2178.codfw.wmnet with reason: Maintenance |
[production] |
15:09 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db2157 (T335838)', diff saved to https://phabricator.wikimedia.org/P47414 and previous config saved to /var/cache/conftool/dbconfig/20230503-150947-ladsgroup.json |
[production] |
15:09 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db1170:3312 (T335838)', diff saved to https://phabricator.wikimedia.org/P47413 and previous config saved to /var/cache/conftool/dbconfig/20230503-150947-ladsgroup.json |
[production] |
15:07 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db2171:3315 (T335838)', diff saved to https://phabricator.wikimedia.org/P47412 and previous config saved to /var/cache/conftool/dbconfig/20230503-150702-ladsgroup.json |
[production] |
15:07 |
<eevans@cumin1001> |
START - Cookbook sre.cassandra.roll-restart for nodes matching restbase1016.eqiad.wmnet: Upgrade Cassandra — T335383 - eevans@cumin1001 |
[production] |
15:03 |
<eevans@cumin1001> |
END (PASS) - Cookbook sre.cassandra.roll-restart (exit_code=0) for nodes matching restbase2012.codfw.wmnet: Upgrade Cassandra — T335383 - eevans@cumin1001 |
[production] |
15:01 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Depooling db2171:3316 (T335838)', diff saved to https://phabricator.wikimedia.org/P47411 and previous config saved to /var/cache/conftool/dbconfig/20230503-150103-ladsgroup.json |
[production] |
15:00 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Depooling db2171:3315 (T335838)', diff saved to https://phabricator.wikimedia.org/P47410 and previous config saved to /var/cache/conftool/dbconfig/20230503-150042-ladsgroup.json |
[production] |
15:00 |
<ladsgroup@cumin1001> |
END (PASS) - Cookbook sre.hosts.downtime (exit_code=0) for 1 day, 0:00:00 on db2171.codfw.wmnet with reason: Maintenance |
[production] |
15:00 |
<ladsgroup@cumin1001> |
START - Cookbook sre.hosts.downtime for 1 day, 0:00:00 on db2171.codfw.wmnet with reason: Maintenance |
[production] |
15:00 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db2169:3317 (T335838)', diff saved to https://phabricator.wikimedia.org/P47409 and previous config saved to /var/cache/conftool/dbconfig/20230503-150017-ladsgroup.json |
[production] |
14:59 |
<sukhe> |
fix backup route for high-traffic2 in codfw: set routing-options static route 208.80.153.240/28 next-hop 10.192.17.7 |
[production] |
14:54 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db2157', diff saved to https://phabricator.wikimedia.org/P47408 and previous config saved to /var/cache/conftool/dbconfig/20230503-145440-ladsgroup.json |
[production] |
14:54 |
<sukhe> |
[finished] homer "cr*-codfw*" commit "Gerrit: 914344 remove decommissioned host lvs2007": T335777 |
[production] |
14:53 |
<eevans@cumin1001> |
START - Cookbook sre.cassandra.roll-restart for nodes matching restbase2012.codfw.wmnet: Upgrade Cassandra — T335383 - eevans@cumin1001 |
[production] |
14:52 |
<sukhe> |
homer "cr*-codfw*" commit "Gerrit: 914344 remove decommissioned host lvs2007": T335777 |
[production] |
14:46 |
<sukhe@cumin2002> |
END (PASS) - Cookbook sre.hosts.decommission (exit_code=0) for hosts lvs2007.codfw.wmnet |
[production] |
14:46 |
<sukhe@cumin2002> |
END (PASS) - Cookbook sre.dns.netbox (exit_code=0) |
[production] |
14:45 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db2169:3317', diff saved to https://phabricator.wikimedia.org/P47407 and previous config saved to /var/cache/conftool/dbconfig/20230503-144511-ladsgroup.json |
[production] |
14:45 |
<sukhe@cumin2002> |
START - Cookbook sre.dns.netbox |
[production] |
14:43 |
<slyngshede@cumin1001> |
END (PASS) - Cookbook sre.ganeti.reboot-vm (exit_code=0) for VM idm1001.wikimedia.org |
[production] |
14:42 |
<ottomata> |
Grant IdempotentWrite Kafka Cluster ACL to User:ANONYOUS in kafka logging clusters - T334733 |
[production] |
14:40 |
<jclark@cumin1001> |
END (FAIL) - Cookbook sre.dns.netbox (exit_code=99) |
[production] |
14:40 |
<jclark@cumin1001> |
END (FAIL) - Cookbook sre.puppet.sync-netbox-hiera (exit_code=99) generate netbox hiera data: "Triggered by cookbooks.sre.dns.netbox: update mgmt frav1003 - jclark@cumin1001" |
[production] |
14:40 |
<slyngshede@cumin1001> |
START - Cookbook sre.ganeti.reboot-vm for VM idm1001.wikimedia.org |
[production] |
14:39 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db2157', diff saved to https://phabricator.wikimedia.org/P47406 and previous config saved to /var/cache/conftool/dbconfig/20230503-143933-ladsgroup.json |
[production] |
14:38 |
<jclark@cumin1001> |
START - Cookbook sre.puppet.sync-netbox-hiera generate netbox hiera data: "Triggered by cookbooks.sre.dns.netbox: update mgmt frav1003 - jclark@cumin1001" |
[production] |
14:36 |
<jclark@cumin1001> |
START - Cookbook sre.dns.netbox |
[production] |