2022-01-25
ยง
|
18:00 |
<mwdebug-deploy@deploy1002> |
helmfile [codfw] DONE helmfile.d/services/mwdebug: sync on pinkunicorn |
[production] |
17:59 |
<mwdebug-deploy@deploy1002> |
helmfile [codfw] START helmfile.d/services/mwdebug: apply on pinkunicorn |
[production] |
17:59 |
<mwdebug-deploy@deploy1002> |
helmfile [eqiad] DONE helmfile.d/services/mwdebug: sync on pinkunicorn |
[production] |
17:57 |
<mwdebug-deploy@deploy1002> |
helmfile [eqiad] START helmfile.d/services/mwdebug: apply on pinkunicorn |
[production] |
17:47 |
<marostegui@cumin1001> |
dbctl commit (dc=all): 'es1022 (re)pooling @ 40%: repooling after reimage', diff saved to https://phabricator.wikimedia.org/P19212 and previous config saved to /var/cache/conftool/dbconfig/20220125-174659-root.json |
[production] |
17:45 |
<ladsgroup@cumin1001> |
START - Cookbook sre.hosts.reimage for host es1028.eqiad.wmnet with OS bullseye |
[production] |
17:31 |
<marostegui@cumin1001> |
dbctl commit (dc=all): 'es1022 (re)pooling @ 25%: repooling after reimage', diff saved to https://phabricator.wikimedia.org/P19211 and previous config saved to /var/cache/conftool/dbconfig/20220125-173156-root.json |
[production] |
17:16 |
<marostegui@cumin1001> |
dbctl commit (dc=all): 'es1022 (re)pooling @ 20%: repooling after reimage', diff saved to https://phabricator.wikimedia.org/P19210 and previous config saved to /var/cache/conftool/dbconfig/20220125-171652-root.json |
[production] |
17:02 |
<cwhite> |
upgrade elasticsearch-curator on apifeatureusage1001 |
[production] |
17:01 |
<marostegui@cumin1001> |
dbctl commit (dc=all): 'es1022 (re)pooling @ 10%: repooling after reimage', diff saved to https://phabricator.wikimedia.org/P19209 and previous config saved to /var/cache/conftool/dbconfig/20220125-170148-root.json |
[production] |
16:56 |
<mwdebug-deploy@deploy1002> |
helmfile [codfw] DONE helmfile.d/services/mwdebug: sync on pinkunicorn |
[production] |
16:55 |
<mwdebug-deploy@deploy1002> |
helmfile [codfw] START helmfile.d/services/mwdebug: apply on pinkunicorn |
[production] |
16:55 |
<mwdebug-deploy@deploy1002> |
helmfile [eqiad] DONE helmfile.d/services/mwdebug: sync on pinkunicorn |
[production] |
16:52 |
<mwdebug-deploy@deploy1002> |
helmfile [eqiad] START helmfile.d/services/mwdebug: apply on pinkunicorn |
[production] |
16:49 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Depooling es1028 (T299911)', diff saved to https://phabricator.wikimedia.org/P19208 and previous config saved to /var/cache/conftool/dbconfig/20220125-164900-ladsgroup.json |
[production] |
16:48 |
<ladsgroup@cumin1001> |
END (PASS) - Cookbook sre.hosts.downtime (exit_code=0) for 1 day, 0:00:00 on es1028.eqiad.wmnet with reason: Maintenance |
[production] |
16:48 |
<ladsgroup@cumin1001> |
START - Cookbook sre.hosts.downtime for 1 day, 0:00:00 on es1028.eqiad.wmnet with reason: Maintenance |
[production] |
16:46 |
<marostegui@cumin1001> |
dbctl commit (dc=all): 'es1022 (re)pooling @ 5%: repooling after reimage', diff saved to https://phabricator.wikimedia.org/P19207 and previous config saved to /var/cache/conftool/dbconfig/20220125-164645-root.json |
[production] |
16:46 |
<taavi> |
deploy updated patch for T285116 |
[production] |
16:43 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Make es1031 master of es3 T299911', diff saved to https://phabricator.wikimedia.org/P19206 and previous config saved to /var/cache/conftool/dbconfig/20220125-164324-ladsgroup.json |
[production] |
16:41 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance es1034 (T299911)', diff saved to https://phabricator.wikimedia.org/P19204 and previous config saved to /var/cache/conftool/dbconfig/20220125-164118-ladsgroup.json |
[production] |
16:37 |
<marostegui@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db1148 (T285149)', diff saved to https://phabricator.wikimedia.org/P19203 and previous config saved to /var/cache/conftool/dbconfig/20220125-163721-marostegui.json |
[production] |
16:31 |
<marostegui@cumin1001> |
dbctl commit (dc=all): 'es1022 (re)pooling @ 1%: repooling after reimage', diff saved to https://phabricator.wikimedia.org/P19202 and previous config saved to /var/cache/conftool/dbconfig/20220125-163141-root.json |
[production] |
16:30 |
<marostegui@cumin1001> |
dbctl commit (dc=all): 'es1026 (re)pooling @ 100%: repooling after reimage', diff saved to https://phabricator.wikimedia.org/P19201 and previous config saved to /var/cache/conftool/dbconfig/20220125-163054-root.json |
[production] |
16:26 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance es1034', diff saved to https://phabricator.wikimedia.org/P19200 and previous config saved to /var/cache/conftool/dbconfig/20220125-162613-ladsgroup.json |
[production] |
16:22 |
<marostegui@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db1148', diff saved to https://phabricator.wikimedia.org/P19199 and previous config saved to /var/cache/conftool/dbconfig/20220125-162217-marostegui.json |
[production] |
16:21 |
<cmjohnson1> |
updating firmware ganeti1005 T299527 |
[production] |
16:18 |
<cmjohnson1> |
updating firmware ganeti1014 T299527 |
[production] |
16:15 |
<marostegui@cumin1001> |
dbctl commit (dc=all): 'es1026 (re)pooling @ 75%: repooling after reimage', diff saved to https://phabricator.wikimedia.org/P19198 and previous config saved to /var/cache/conftool/dbconfig/20220125-161550-root.json |
[production] |
16:11 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance es1034', diff saved to https://phabricator.wikimedia.org/P19197 and previous config saved to /var/cache/conftool/dbconfig/20220125-161108-ladsgroup.json |
[production] |
16:07 |
<marostegui@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db1148', diff saved to https://phabricator.wikimedia.org/P19196 and previous config saved to /var/cache/conftool/dbconfig/20220125-160712-marostegui.json |
[production] |
16:06 |
<razzi@cumin1001> |
END (PASS) - Cookbook sre.hosts.downtime (exit_code=0) for 1 day, 0:00:00 on an-test-coord1001.eqiad.wmnet with reason: Still troubleshooting mariadb issues |
[production] |
16:06 |
<razzi@cumin1001> |
START - Cookbook sre.hosts.downtime for 1 day, 0:00:00 on an-test-coord1001.eqiad.wmnet with reason: Still troubleshooting mariadb issues |
[production] |
16:05 |
<volans@cumin1001> |
END (PASS) - Cookbook sre.hosts.reimage (exit_code=0) for host es1022.eqiad.wmnet with OS bullseye |
[production] |
16:05 |
<marostegui@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db1181 (T299827)', diff saved to https://phabricator.wikimedia.org/P19195 and previous config saved to /var/cache/conftool/dbconfig/20220125-160522-marostegui.json |
[production] |
16:00 |
<marostegui@cumin1001> |
dbctl commit (dc=all): 'es1026 (re)pooling @ 60%: repooling after reimage', diff saved to https://phabricator.wikimedia.org/P19194 and previous config saved to /var/cache/conftool/dbconfig/20220125-160047-root.json |
[production] |
15:56 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance es1034 (T299911)', diff saved to https://phabricator.wikimedia.org/P19193 and previous config saved to /var/cache/conftool/dbconfig/20220125-155604-ladsgroup.json |
[production] |
15:52 |
<ladsgroup@cumin1001> |
END (PASS) - Cookbook sre.hosts.reimage (exit_code=0) for host es1034.eqiad.wmnet with OS bullseye |
[production] |
15:52 |
<marostegui@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db1148 (T285149)', diff saved to https://phabricator.wikimedia.org/P19192 and previous config saved to /var/cache/conftool/dbconfig/20220125-155207-marostegui.json |
[production] |
15:51 |
<marostegui@cumin1001> |
dbctl commit (dc=all): 'Depooling db1148 (T285149)', diff saved to https://phabricator.wikimedia.org/P19191 and previous config saved to /var/cache/conftool/dbconfig/20220125-155101-marostegui.json |
[production] |
15:51 |
<marostegui@cumin1001> |
END (PASS) - Cookbook sre.hosts.downtime (exit_code=0) for 6:00:00 on db1148.eqiad.wmnet with reason: Maintenance |
[production] |
15:50 |
<marostegui@cumin1001> |
START - Cookbook sre.hosts.downtime for 6:00:00 on db1148.eqiad.wmnet with reason: Maintenance |
[production] |
15:50 |
<marostegui@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db1149 (T285149)', diff saved to https://phabricator.wikimedia.org/P19190 and previous config saved to /var/cache/conftool/dbconfig/20220125-155053-marostegui.json |
[production] |
15:50 |
<marostegui@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db1181', diff saved to https://phabricator.wikimedia.org/P19189 and previous config saved to /var/cache/conftool/dbconfig/20220125-155017-marostegui.json |
[production] |
15:45 |
<marostegui@cumin1001> |
dbctl commit (dc=all): 'es1026 (re)pooling @ 50%: repooling after reimage', diff saved to https://phabricator.wikimedia.org/P19187 and previous config saved to /var/cache/conftool/dbconfig/20220125-154543-root.json |
[production] |
15:38 |
<mmandere@cumin1001> |
END (PASS) - Cookbook sre.ganeti.makevm (exit_code=0) for new host ncredir6002.drmrs.wmnet |
[production] |
15:35 |
<marostegui@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db1149', diff saved to https://phabricator.wikimedia.org/P19186 and previous config saved to /var/cache/conftool/dbconfig/20220125-153548-marostegui.json |
[production] |
15:35 |
<marostegui@cumin1001> |
dbctl commit (dc=all): 'Repooling after maintenance db1181', diff saved to https://phabricator.wikimedia.org/P19185 and previous config saved to /var/cache/conftool/dbconfig/20220125-153511-marostegui.json |
[production] |
15:34 |
<volans@cumin1001> |
START - Cookbook sre.hosts.reimage for host es1022.eqiad.wmnet with OS bullseye |
[production] |
15:32 |
<jelto@cumin1001> |
START - Cookbook sre.ganeti.makevm for new host gitlab-runner1001.eqiad.wmnet |
[production] |