2023-01-17
ยง
|
18:07 |
<jgiannelos@deploy1002> |
helmfile [codfw] DONE helmfile.d/services/proton: apply |
[production] |
18:05 |
<jgiannelos@deploy1002> |
helmfile [codfw] START helmfile.d/services/proton: apply |
[production] |
18:01 |
<cgoubert@cumin1001> |
conftool action : set/pooled=false; selector: dnsdisc=k8s-ingress-wikikube-rw,name=codfw |
[production] |
17:58 |
<jynus> |
restarted es5 codfw backup |
[production] |
17:54 |
<bblack> |
authdns1001: restart confd |
[production] |
17:27 |
<jiji@cumin1001> |
conftool action : set/pooled=true; selector: dnsdisc=aqs,name=codfw |
[production] |
17:19 |
<effie> |
pooling back codfw services |
[production] |
17:17 |
<bblack> |
removing errant 2620:0:860:118: IPs from primary interfaces of hosts in B2 |
[production] |
17:01 |
<effie> |
restarting confd on deploy1002 |
[production] |
16:59 |
<effie> |
pooling back depooled mw servers in codfw |
[production] |
16:44 |
<btullis@cumin1001> |
END (PASS) - Cookbook sre.hosts.downtime (exit_code=0) for 30 days, 0:00:00 on an-worker1086.eqiad.wmnet with reason: Shutting down for RAID controller BBU replacement |
[production] |
16:44 |
<btullis@cumin1001> |
START - Cookbook sre.hosts.downtime for 30 days, 0:00:00 on an-worker1086.eqiad.wmnet with reason: Shutting down for RAID controller BBU replacement |
[production] |
16:32 |
<sukhe> |
reprepro --ignore=wrongdistribution -C main include bullseye-wikimedia cadvisor_0.44.0+ds1-1_amd64.changes: T325557 |
[production] |
16:21 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'db1173 (re)pooling @ 100%: Maint over', diff saved to https://phabricator.wikimedia.org/P43179 and previous config saved to /var/cache/conftool/dbconfig/20230117-162100-ladsgroup.json |
[production] |
16:05 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'db1173 (re)pooling @ 75%: Maint over', diff saved to https://phabricator.wikimedia.org/P43178 and previous config saved to /var/cache/conftool/dbconfig/20230117-160555-ladsgroup.json |
[production] |
15:50 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'db1173 (re)pooling @ 25%: Maint over', diff saved to https://phabricator.wikimedia.org/P43177 and previous config saved to /var/cache/conftool/dbconfig/20230117-155050-ladsgroup.json |
[production] |
15:35 |
<ladsgroup@cumin1001> |
dbctl commit (dc=all): 'db1173 (re)pooling @ 10%: Maint over', diff saved to https://phabricator.wikimedia.org/P43175 and previous config saved to /var/cache/conftool/dbconfig/20230117-153545-ladsgroup.json |
[production] |
15:34 |
<ladsgroup@cumin1001> |
END (PASS) - Cookbook sre.hosts.downtime (exit_code=0) for 6:00:00 on db1173.eqiad.wmnet with reason: Maintenance |
[production] |
15:34 |
<ladsgroup@cumin1001> |
START - Cookbook sre.hosts.downtime for 6:00:00 on db1173.eqiad.wmnet with reason: Maintenance |
[production] |
15:33 |
<ladsgroup@cumin1001> |
END (PASS) - Cookbook sre.hosts.downtime (exit_code=0) for 8:00:00 on db1173.eqiad.wmnet with reason: Maintenance |
[production] |
15:33 |
<ladsgroup@cumin1001> |
START - Cookbook sre.hosts.downtime for 8:00:00 on db1173.eqiad.wmnet with reason: Maintenance |
[production] |
15:32 |
<ladsgroup@cumin1001> |
END (PASS) - Cookbook sre.hosts.downtime (exit_code=0) for 6:00:00 on db1173.eqiad.wmnet with reason: Maintenance |
[production] |
15:32 |
<ladsgroup@cumin1001> |
START - Cookbook sre.hosts.downtime for 6:00:00 on db1173.eqiad.wmnet with reason: Maintenance |
[production] |
15:31 |
<ladsgroup@cumin1001> |
END (PASS) - Cookbook sre.hosts.downtime (exit_code=0) for 6:00:00 on db1173.eqiad.wmnet with reason: Maintenance |
[production] |
15:31 |
<ladsgroup@cumin1001> |
START - Cookbook sre.hosts.downtime for 6:00:00 on db1173.eqiad.wmnet with reason: Maintenance |
[production] |
15:26 |
<ladsgroup@cumin1001> |
END (PASS) - Cookbook sre.hosts.downtime (exit_code=0) for 1 day, 0:00:00 on db1173.eqiad.wmnet with reason: Maintenance |
[production] |
15:26 |
<ladsgroup@cumin1001> |
START - Cookbook sre.hosts.downtime for 1 day, 0:00:00 on db1173.eqiad.wmnet with reason: Maintenance |
[production] |
14:56 |
<urandom> |
truncating hints for Cassandra nodes in codfw row b -- T327001 |
[production] |
14:52 |
<urandom> |
disabling Cassandra hinted-handoff for codfw -- T327001 |
[production] |
14:27 |
<jgiannelos@deploy1002> |
helmfile [staging] DONE helmfile.d/services/proton: apply |
[production] |
14:26 |
<jgiannelos@deploy1002> |
helmfile [staging] START helmfile.d/services/proton: apply |
[production] |
14:12 |
<_joe_> |
try to restart cassandra-a on aqs2005 |
[production] |
13:37 |
<jiji@cumin1001> |
conftool action : set/pooled=false; selector: dnsdisc=recommendation-api,name=codfw |
[production] |
13:35 |
<mvernon@cumin1001> |
conftool action : set/pooled=false; selector: dnsdisc=thanos-query,name=codfw |
[production] |
13:35 |
<mvernon@cumin1001> |
conftool action : set/pooled=false; selector: dnsdisc=thanos-swift,name=codfw |
[production] |
13:27 |
<jynus> |
restarting manually replication on es2020, may require data check afterwards |
[production] |
13:26 |
<_joe_> |
depooling all services in codfw |
[production] |
13:19 |
<oblivian@cumin1001> |
END (PASS) - Cookbook sre.discovery.service-route (exit_code=0) depool mobileapps in codfw: maintenance |
[production] |
13:15 |
<mvernon@cumin1001> |
conftool action : set/pooled=false; selector: dnsdisc=swift,name=codfw |
[production] |
13:14 |
<oblivian@cumin1001> |
START - Cookbook sre.discovery.service-route depool mobileapps in codfw: maintenance |
[production] |
13:13 |
<oblivian@cumin1001> |
END (PASS) - Cookbook sre.discovery.service-route (exit_code=0) check citoid: maintenance |
[production] |
13:13 |
<oblivian@cumin1001> |
START - Cookbook sre.discovery.service-route check citoid: maintenance |
[production] |
13:08 |
<jelto@cumin1001> |
END (FAIL) - Cookbook sre.gitlab.upgrade (exit_code=99) |
[production] |
13:01 |
<oblivian@puppetmaster1001> |
conftool action : set/pooled=false; selector: dnsdisc=restbase-async,name=codfw |
[production] |
13:01 |
<oblivian@puppetmaster1001> |
conftool action : set/pooled=true; selector: dnsdisc=restbase-async,name=.* |
[production] |
12:35 |
<jelto@cumin1001> |
START - Cookbook sre.gitlab.upgrade |
[production] |
12:35 |
<moritzm> |
installing ipython security updates |
[production] |
11:32 |
<jiji@cumin1001> |
END (PASS) - Cookbook sre.hosts.reimage (exit_code=0) for host mc1048.eqiad.wmnet with OS bullseye |
[production] |
11:18 |
<jiji@cumin1001> |
END (PASS) - Cookbook sre.hosts.downtime (exit_code=0) for 2:00:00 on mc1048.eqiad.wmnet with reason: host reimage |
[production] |
11:16 |
<jiji@cumin1001> |
START - Cookbook sre.hosts.downtime for 2:00:00 on mc1048.eqiad.wmnet with reason: host reimage |
[production] |