2023-01-24
ยง
|
13:11 |
<btullis@deploy1002> |
helmfile [staging] START helmfile.d/services/datahub: apply on main |
[production] |
13:10 |
<topranks> |
enabling tunnel services on cr2-eqdfw fpc 0 pic 1 |
[production] |
13:08 |
<jmm@cumin2002> |
START - Cookbook sre.dns.netbox |
[production] |
13:04 |
<jmm@cumin2002> |
START - Cookbook sre.hosts.decommission for hosts ping2002.codfw.wmnet |
[production] |
12:56 |
<zabe@deploy1002> |
Finished scap: Backport for [[gerrit:881468|Remove PoolCounter from extension-list (T327336)]] (duration: 44m 09s) |
[production] |
12:51 |
<elukey@deploy1002> |
helmfile [staging] DONE helmfile.d/services/changeprop: sync |
[production] |
12:51 |
<elukey@deploy1002> |
helmfile [staging] START helmfile.d/services/changeprop: sync |
[production] |
12:50 |
<mvernon@cumin2002> |
END (PASS) - Cookbook sre.swift.roll-restart-reboot-proxies (exit_code=0) rolling restart_daemons on A:eqiad and A:swift-fe or A:thanos-fe |
[production] |
12:48 |
<XioNoX> |
restart ulsfo switches for network maintenance |
[production] |
12:44 |
<ayounsi@cumin1001> |
END (PASS) - Cookbook sre.hosts.downtime (exit_code=0) for 2:00:00 on 36 hosts with reason: nework maintenance |
[production] |
12:43 |
<ayounsi@cumin1001> |
START - Cookbook sre.hosts.downtime for 2:00:00 on 36 hosts with reason: nework maintenance |
[production] |
12:40 |
<mvernon@cumin2002> |
START - Cookbook sre.swift.roll-restart-reboot-proxies rolling restart_daemons on A:eqiad and A:swift-fe or A:thanos-fe |
[production] |
12:38 |
<zabe@deploy1002> |
zabe: Backport for [[gerrit:881468|Remove PoolCounter from extension-list (T327336)]] synced to the testservers: mwdebug1002.eqiad.wmnet, mwdebug1001.eqiad.wmnet, mwdebug2002.codfw.wmnet, mwdebug2001.codfw.wmnet |
[production] |
12:21 |
<hnowlan@puppetmaster1001> |
conftool action : set/pooled=yes; selector: name=thumbor2004.codfw.wmnet |
[production] |
12:12 |
<zabe@deploy1002> |
Started scap: Backport for [[gerrit:881468|Remove PoolCounter from extension-list (T327336)]] |
[production] |
11:54 |
<volans> |
uploaded python3-gjson_1.0.0 to apt.wikimedia.org bullseye-wikimedia,unstable-wikimedia |
[production] |
11:49 |
<btullis@deploy1002> |
helmfile [staging] DONE helmfile.d/services/datahub: sync on main |
[production] |
11:42 |
<marostegui@cumin1001> |
dbctl commit (dc=all): 'db2165 (re)pooling @ 100%: After switchover', diff saved to https://phabricator.wikimedia.org/P43311 and previous config saved to /var/cache/conftool/dbconfig/20230124-114255-root.json |
[production] |
11:39 |
<btullis@deploy1002> |
helmfile [staging] START helmfile.d/services/datahub: apply on main |
[production] |
11:36 |
<jelto@cumin1001> |
END (PASS) - Cookbook sre.gitlab.reboot-runner (exit_code=0) rolling reboot on A:gitlab-runner |
[production] |
11:35 |
<jmm@cumin2002> |
END (PASS) - Cookbook sre.hosts.decommission (exit_code=0) for hosts ping3002.esams.wmnet |
[production] |
11:35 |
<jmm@cumin2002> |
END (PASS) - Cookbook sre.dns.netbox (exit_code=0) |
[production] |
11:34 |
<jmm@cumin2002> |
END (PASS) - Cookbook sre.puppet.sync-netbox-hiera (exit_code=0) generate netbox hiera data: "Triggered by cookbooks.sre.dns.netbox: ping3002.esams.wmnet decommissioned, removing all IPs except the asset tag one - jmm@cumin2002" |
[production] |
11:33 |
<btullis@deploy1002> |
helmfile [staging] DONE helmfile.d/services/datahub: sync on main |
[production] |
11:33 |
<jmm@cumin2002> |
START - Cookbook sre.puppet.sync-netbox-hiera generate netbox hiera data: "Triggered by cookbooks.sre.dns.netbox: ping3002.esams.wmnet decommissioned, removing all IPs except the asset tag one - jmm@cumin2002" |
[production] |
11:28 |
<jmm@cumin2002> |
START - Cookbook sre.dns.netbox |
[production] |
11:27 |
<marostegui@cumin1001> |
dbctl commit (dc=all): 'db2165 (re)pooling @ 75%: After switchover', diff saved to https://phabricator.wikimedia.org/P43310 and previous config saved to /var/cache/conftool/dbconfig/20230124-112750-root.json |
[production] |
11:26 |
<zabe@deploy1002> |
Finished scap: Backport for [[gerrit:881467|Stop loading PoolCounter extension (T327336)]] (duration: 09m 19s) |
[production] |
11:25 |
<marostegui@cumin1001> |
END (PASS) - Cookbook sre.hosts.reimage (exit_code=0) for host db1176.eqiad.wmnet with OS bullseye |
[production] |
11:23 |
<btullis@deploy1002> |
helmfile [staging] START helmfile.d/services/datahub: apply on main |
[production] |
11:22 |
<btullis@deploy1002> |
helmfile [staging] DONE helmfile.d/services/datahub: sync on main |
[production] |
11:21 |
<jmm@cumin2002> |
START - Cookbook sre.hosts.decommission for hosts ping3002.esams.wmnet |
[production] |
11:19 |
<zabe@deploy1002> |
zabe: Backport for [[gerrit:881467|Stop loading PoolCounter extension (T327336)]] synced to the testservers: mwdebug2001.codfw.wmnet, mwdebug1002.eqiad.wmnet, mwdebug1001.eqiad.wmnet, mwdebug2002.codfw.wmnet |
[production] |
11:17 |
<zabe@deploy1002> |
Started scap: Backport for [[gerrit:881467|Stop loading PoolCounter extension (T327336)]] |
[production] |
11:12 |
<marostegui@cumin1001> |
dbctl commit (dc=all): 'db2165 (re)pooling @ 50%: After switchover', diff saved to https://phabricator.wikimedia.org/P43308 and previous config saved to /var/cache/conftool/dbconfig/20230124-111245-root.json |
[production] |
11:11 |
<btullis@deploy1002> |
helmfile [staging] START helmfile.d/services/datahub: apply on main |
[production] |
11:11 |
<btullis@deploy1002> |
helmfile [staging] DONE helmfile.d/services/datahub: sync on main |
[production] |
11:08 |
<marostegui@cumin1001> |
END (PASS) - Cookbook sre.hosts.downtime (exit_code=0) for 2:00:00 on db1176.eqiad.wmnet with reason: host reimage |
[production] |
11:05 |
<marostegui@cumin1001> |
START - Cookbook sre.hosts.downtime for 2:00:00 on db1176.eqiad.wmnet with reason: host reimage |
[production] |
11:03 |
<jiji@cumin1001> |
conftool action : set/pooled=false; selector: dnsdisc=kartotherian,name=codfw |
[production] |
11:03 |
<elukey@deploy1002> |
helmfile [staging] DONE helmfile.d/services/changeprop: sync |
[production] |
11:03 |
<elukey@deploy1002> |
helmfile [staging] START helmfile.d/services/changeprop: sync |
[production] |
11:02 |
<effie> |
depooling maps (kartotherian) from codfw, leaving eqiad as pooled |
[production] |
11:00 |
<btullis@deploy1002> |
helmfile [staging] START helmfile.d/services/datahub: apply on main |
[production] |
10:59 |
<jelto@cumin1001> |
START - Cookbook sre.gitlab.reboot-runner rolling reboot on A:gitlab-runner |
[production] |
10:58 |
<btullis@deploy1002> |
helmfile [staging] START helmfile.d/services/datahub: apply on main |
[production] |
10:58 |
<elukey@deploy1002> |
helmfile [staging] DONE helmfile.d/services/changeprop: sync |
[production] |
10:58 |
<elukey@deploy1002> |
helmfile [staging] START helmfile.d/services/changeprop: sync |
[production] |
10:57 |
<marostegui@cumin1001> |
dbctl commit (dc=all): 'db2165 (re)pooling @ 25%: After switchover', diff saved to https://phabricator.wikimedia.org/P43306 and previous config saved to /var/cache/conftool/dbconfig/20230124-105740-root.json |
[production] |
10:55 |
<btullis@deploy1002> |
helmfile [staging] START helmfile.d/services/datahub: apply on main |
[production] |