2024-06-04
ยง
|
12:15 |
<btullis@deploy1002> |
helmfile [staging] START helmfile.d/services/image-suggestion: apply |
[production] |
12:14 |
<btullis@deploy1002> |
helmfile [staging] DONE helmfile.d/services/image-suggestion: apply |
[production] |
12:14 |
<ladsgroup@cumin1002> |
dbctl commit (dc=all): 'Repooling after maintenance db1246', diff saved to https://phabricator.wikimedia.org/P64005 and previous config saved to /var/cache/conftool/dbconfig/20240604-121415-ladsgroup.json |
[production] |
12:14 |
<btullis@deploy1002> |
helmfile [staging] START helmfile.d/services/image-suggestion: apply |
[production] |
12:12 |
<btullis@deploy1002> |
helmfile [staging] DONE helmfile.d/services/image-suggestion: apply |
[production] |
12:12 |
<btullis@deploy1002> |
helmfile [staging] START helmfile.d/services/image-suggestion: apply |
[production] |
12:10 |
<marostegui@cumin1002> |
dbctl commit (dc=all): 'db1156 (re)pooling @ 75%: Repooling', diff saved to https://phabricator.wikimedia.org/P64004 and previous config saved to /var/cache/conftool/dbconfig/20240604-121056-root.json |
[production] |
12:08 |
<klausman@cumin2002> |
END (PASS) - Cookbook sre.cassandra.roll-reboot (exit_code=0) rolling reboot on A:ml-cache-codfw |
[production] |
12:02 |
<taavi@cumin1002> |
START - Cookbook sre.wikireplicas.add-wiki for database dtpwiki (T365229) |
[production] |
11:59 |
<ladsgroup@cumin1002> |
dbctl commit (dc=all): 'Repooling after maintenance db1246 (T352010)', diff saved to https://phabricator.wikimedia.org/P64003 and previous config saved to /var/cache/conftool/dbconfig/20240604-115907-ladsgroup.json |
[production] |
11:55 |
<marostegui@cumin1002> |
dbctl commit (dc=all): 'db1156 (re)pooling @ 50%: Repooling', diff saved to https://phabricator.wikimedia.org/P64002 and previous config saved to /var/cache/conftool/dbconfig/20240604-115549-root.json |
[production] |
11:54 |
<hnowlan> |
depooling 3 api appservers and 2 appservers in advance of reimaging |
[production] |
11:50 |
<klausman@cumin2002> |
START - Cookbook sre.cassandra.roll-reboot rolling reboot on A:ml-cache-eqiad |
[production] |
11:44 |
<klausman@cumin2002> |
START - Cookbook sre.cassandra.roll-reboot rolling reboot on A:ml-cache-codfw |
[production] |
11:41 |
<marostegui@cumin1002> |
dbctl commit (dc=all): 'Depooling db2136 (T364299)', diff saved to https://phabricator.wikimedia.org/P64001 and previous config saved to /var/cache/conftool/dbconfig/20240604-114157-marostegui.json |
[production] |
11:41 |
<marostegui@cumin1002> |
END (PASS) - Cookbook sre.hosts.downtime (exit_code=0) for 6:00:00 on db2136.codfw.wmnet with reason: Maintenance |
[production] |
11:41 |
<marostegui@cumin1002> |
START - Cookbook sre.hosts.downtime for 6:00:00 on db2136.codfw.wmnet with reason: Maintenance |
[production] |
11:40 |
<marostegui@cumin1002> |
dbctl commit (dc=all): 'db1156 (re)pooling @ 25%: Repooling', diff saved to https://phabricator.wikimedia.org/P64000 and previous config saved to /var/cache/conftool/dbconfig/20240604-114043-root.json |
[production] |
11:39 |
<cgoubert@cumin1002> |
START - Cookbook sre.k8s.reboot-nodes rolling reboot on A:wikikube-worker-codfw |
[production] |
11:39 |
<mvernon@cumin2002> |
END (PASS) - Cookbook sre.swift.roll-restart-reboot-swift-thanos-proxies (exit_code=0) rolling reboot on A:thanos-fe |
[production] |
11:36 |
<jmm@cumin2002> |
END (PASS) - Cookbook sre.hosts.reboot-single (exit_code=0) for host ganeti-test2003.codfw.wmnet |
[production] |
11:29 |
<jmm@cumin2002> |
START - Cookbook sre.hosts.reboot-single for host ganeti-test2003.codfw.wmnet |
[production] |
11:27 |
<jmm@cumin2002> |
END (PASS) - Cookbook sre.hosts.reboot-single (exit_code=0) for host ganeti-test2001.codfw.wmnet |
[production] |
11:25 |
<marostegui@cumin1002> |
dbctl commit (dc=all): 'db1156 (re)pooling @ 10%: Repooling', diff saved to https://phabricator.wikimedia.org/P63999 and previous config saved to /var/cache/conftool/dbconfig/20240604-112537-root.json |
[production] |
11:21 |
<jmm@cumin2002> |
START - Cookbook sre.hosts.reboot-single for host ganeti-test2001.codfw.wmnet |
[production] |
11:15 |
<taavi@cumin1002> |
END (PASS) - Cookbook sre.hosts.reboot-single (exit_code=0) for host cloudlb2003-dev.codfw.wmnet |
[production] |
11:10 |
<marostegui@cumin1002> |
dbctl commit (dc=all): 'db1156 (re)pooling @ 5%: Repooling', diff saved to https://phabricator.wikimedia.org/P63998 and previous config saved to /var/cache/conftool/dbconfig/20240604-111031-root.json |
[production] |
11:06 |
<taavi@cumin1002> |
START - Cookbook sre.hosts.reboot-single for host cloudlb2003-dev.codfw.wmnet |
[production] |
11:06 |
<taavi@cumin1002> |
END (PASS) - Cookbook sre.hosts.reboot-single (exit_code=0) for host cloudlb2002-dev.codfw.wmnet |
[production] |
11:06 |
<cgoubert@cumin1002> |
END (PASS) - Cookbook sre.dns.netbox (exit_code=0) |
[production] |
11:04 |
<cgoubert@cumin1002> |
START - Cookbook sre.dns.netbox |
[production] |
11:00 |
<cgoubert@cumin1002> |
END (FAIL) - Cookbook sre.hardware.upgrade-firmware (exit_code=99) upgrade firmware for hosts mw1358.eqiad.wmnet |
[production] |
10:59 |
<cgoubert@cumin1002> |
START - Cookbook sre.hardware.upgrade-firmware upgrade firmware for hosts mw1358.eqiad.wmnet |
[production] |
10:59 |
<taavi@cumin1002> |
END (PASS) - Cookbook sre.hosts.reboot-single (exit_code=0) for host cloudlb1002.eqiad.wmnet |
[production] |
10:57 |
<taavi@cumin1002> |
START - Cookbook sre.hosts.reboot-single for host cloudlb2002-dev.codfw.wmnet |
[production] |
10:57 |
<taavi@cumin1002> |
END (PASS) - Cookbook sre.hosts.reboot-single (exit_code=0) for host cloudlb2001-dev.codfw.wmnet |
[production] |
10:55 |
<marostegui@cumin1002> |
dbctl commit (dc=all): 'db1156 (re)pooling @ 1%: Repooling', diff saved to https://phabricator.wikimedia.org/P63996 and previous config saved to /var/cache/conftool/dbconfig/20240604-105525-root.json |
[production] |
10:54 |
<filippo@cumin1002> |
END (PASS) - Cookbook sre.hosts.reboot-single (exit_code=0) for host centrallog1002.eqiad.wmnet |
[production] |
10:53 |
<cgoubert@cumin1002> |
END (PASS) - Cookbook sre.hosts.downtime (exit_code=0) for 7 days, 0:00:00 on mw1358.eqiad.wmnet with reason: Waiting on iDrac update |
[production] |
10:53 |
<cgoubert@cumin1002> |
START - Cookbook sre.hosts.downtime for 7 days, 0:00:00 on mw1358.eqiad.wmnet with reason: Waiting on iDrac update |
[production] |
10:50 |
<taavi@cumin1002> |
START - Cookbook sre.hosts.reboot-single for host cloudlb1002.eqiad.wmnet |
[production] |
10:50 |
<taavi@cumin1002> |
END (PASS) - Cookbook sre.hosts.reboot-single (exit_code=0) for host cloudlb1001.eqiad.wmnet |
[production] |
10:49 |
<taavi@cumin1002> |
START - Cookbook sre.hosts.reboot-single for host cloudlb2001-dev.codfw.wmnet |
[production] |
10:48 |
<mvernon@cumin2002> |
START - Cookbook sre.swift.roll-restart-reboot-swift-thanos-proxies rolling reboot on A:thanos-fe |
[production] |
10:46 |
<mvernon@cumin2002> |
END (PASS) - Cookbook sre.swift.roll-restart-reboot-swift-ms-proxies (exit_code=0) rolling reboot on P{ms-fe2*} and (A:swift-fe or A:swift-fe-canary or A:swift-fe-codfw or A:swift-fe-eqiad) |
[production] |
10:45 |
<marostegui> |
dbmaint codfw s1 deploy schema change on db2203 T364299 |
[production] |
10:45 |
<marostegui@cumin1002> |
END (PASS) - Cookbook sre.hosts.downtime (exit_code=0) for 10:00:00 on db2203.codfw.wmnet with reason: Long schema change |
[production] |
10:45 |
<marostegui@cumin1002> |
START - Cookbook sre.hosts.downtime for 10:00:00 on db2203.codfw.wmnet with reason: Long schema change |
[production] |
10:45 |
<marostegui@cumin1002> |
END (PASS) - Cookbook sre.hosts.downtime (exit_code=0) for 10:00:00 on db2141.codfw.wmnet with reason: Long schema change |
[production] |
10:45 |
<marostegui@cumin1002> |
START - Cookbook sre.hosts.downtime for 10:00:00 on db2141.codfw.wmnet with reason: Long schema change |
[production] |