2024-06-14
ยง
|
13:10 |
<taavi@cloudcumin1001> |
START - Cookbook wmcs.openstack.migrate_server_to_ovs for server toolsbeta-sgebastion-05 |
[toolsbeta] |
13:09 |
<taavi@cloudcumin1001> |
END (PASS) - Cookbook wmcs.openstack.migrate_server_to_ovs (exit_code=0) for server toolsbeta-redis-1 |
[toolsbeta] |
13:08 |
<taavi@cloudcumin1001> |
START - Cookbook wmcs.openstack.migrate_server_to_ovs for server toolsbeta-redis-1 |
[toolsbeta] |
13:05 |
<jynus> |
restart db1150, db1171 |
[production] |
13:04 |
<taavi@cloudcumin1001> |
START - Cookbook wmcs.openstack.cloudvirt.drain on host 'cloudvirt1034.eqiad.wmnet' |
[admin] |
13:02 |
<taavi@cloudcumin1001> |
END (PASS) - Cookbook wmcs.openstack.migrate_server_to_ovs (exit_code=0) for server taavi-xmltest |
[testlabs] |
13:01 |
<taavi@cloudcumin1001> |
START - Cookbook wmcs.openstack.migrate_server_to_ovs for server taavi-xmltest |
[testlabs] |
12:59 |
<taavi@cloudcumin1001> |
END (FAIL) - Cookbook wmcs.openstack.migrate_server_to_ovs (exit_code=99) for server taavi-puppet7-server |
[testlabs] |
12:59 |
<taavi@cloudcumin1001> |
START - Cookbook wmcs.openstack.migrate_server_to_ovs for server taavi-puppet7-server |
[testlabs] |
12:58 |
<taavi@cloudcumin1001> |
END (PASS) - Cookbook wmcs.openstack.migrate_server_to_ovs (exit_code=0) for server taavi-puppet7-server |
[testlabs] |
12:58 |
<bking@deploy1002> |
helmfile [dse-k8s-eqiad] DONE helmfile.d/admin 'apply'. |
[production] |
12:58 |
<bking@deploy1002> |
helmfile [dse-k8s-eqiad] START helmfile.d/admin 'apply'. |
[production] |
12:58 |
<mvernon@cumin2002> |
END (PASS) - Cookbook sre.hosts.downtime (exit_code=0) for 2:00:00 on moss-be2001.codfw.wmnet with reason: host reimage |
[production] |
12:58 |
<taavi@cloudcumin1001> |
START - Cookbook wmcs.openstack.migrate_server_to_ovs for server taavi-puppet7-server |
[testlabs] |
12:55 |
<marostegui@cumin1002> |
dbctl commit (dc=all): 'Repooling after maintenance db2178', diff saved to https://phabricator.wikimedia.org/P64967 and previous config saved to /var/cache/conftool/dbconfig/20240614-125543-marostegui.json |
[production] |
12:54 |
<mvernon@cumin2002> |
START - Cookbook sre.hosts.downtime for 2:00:00 on moss-be2001.codfw.wmnet with reason: host reimage |
[production] |
12:51 |
<mvernon@cumin2002> |
START - Cookbook sre.hosts.reimage for host moss-be2001.codfw.wmnet with OS bookworm |
[production] |
12:45 |
<jelto@cumin1002> |
END (PASS) - Cookbook sre.hosts.downtime (exit_code=0) for 0:15:00 on gitlab2002.wikimedia.org with reason: GitLab upgrade |
[production] |
12:45 |
<jelto@cumin1002> |
START - Cookbook sre.hosts.downtime for 0:15:00 on gitlab2002.wikimedia.org with reason: GitLab upgrade |
[production] |
12:40 |
<marostegui@cumin1002> |
dbctl commit (dc=all): 'Repooling after maintenance db2178', diff saved to https://phabricator.wikimedia.org/P64966 and previous config saved to /var/cache/conftool/dbconfig/20240614-124036-marostegui.json |
[production] |
12:25 |
<marostegui@cumin1002> |
dbctl commit (dc=all): 'Repooling after maintenance db2178 (T367261)', diff saved to https://phabricator.wikimedia.org/P64964 and previous config saved to /var/cache/conftool/dbconfig/20240614-122530-marostegui.json |
[production] |
12:24 |
<taavi@cloudcumin1001> |
END (PASS) - Cookbook wmcs.openstack.migrate_server_to_ovs (exit_code=0) for server taavi-testpuppet-1 |
[testlabs] |
12:23 |
<mvernon@cumin2002> |
END (FAIL) - Cookbook sre.hosts.reimage (exit_code=93) for host moss-be2001.codfw.wmnet with OS bookworm |
[production] |
12:23 |
<taavi@cloudcumin1001> |
START - Cookbook wmcs.openstack.migrate_server_to_ovs for server taavi-testpuppet-1 |
[testlabs] |
12:23 |
<taavi@cloudcumin1001> |
END (PASS) - Cookbook wmcs.openstack.migrate_server_to_ovs (exit_code=0) for server taavi-loki-1 |
[testlabs] |
12:22 |
<marostegui@cumin1002> |
dbctl commit (dc=all): 'Depooling db2178 (T367261)', diff saved to https://phabricator.wikimedia.org/P64963 and previous config saved to /var/cache/conftool/dbconfig/20240614-122255-marostegui.json |
[production] |
12:22 |
<marostegui@cumin1002> |
END (PASS) - Cookbook sre.hosts.downtime (exit_code=0) for 12:00:00 on db2178.codfw.wmnet with reason: Maintenance |
[production] |
12:22 |
<marostegui@cumin1002> |
START - Cookbook sre.hosts.downtime for 12:00:00 on db2178.codfw.wmnet with reason: Maintenance |
[production] |
12:22 |
<marostegui@cumin1002> |
dbctl commit (dc=all): 'Repooling after maintenance db2171 (T367261)', diff saved to https://phabricator.wikimedia.org/P64962 and previous config saved to /var/cache/conftool/dbconfig/20240614-122233-marostegui.json |
[production] |
12:22 |
<taavi@cloudcumin1001> |
START - Cookbook wmcs.openstack.migrate_server_to_ovs for server taavi-loki-1 |
[testlabs] |
12:22 |
<ladsgroup@cumin1002> |
dbctl commit (dc=all): 'db1230 (re)pooling @ 100%: Maint over', diff saved to https://phabricator.wikimedia.org/P64961 and previous config saved to /var/cache/conftool/dbconfig/20240614-122210-ladsgroup.json |
[production] |
12:16 |
<taavi@cloudcumin1001> |
END (FAIL) - Cookbook wmcs.openstack.migrate_server_to_ovs (exit_code=99) for server 87975ac8-d625-44e7-a7db-c150d4430f45 |
[testlabs] |
12:15 |
<taavi@cloudcumin1001> |
START - Cookbook wmcs.openstack.migrate_server_to_ovs for server 87975ac8-d625-44e7-a7db-c150d4430f45 |
[testlabs] |
12:11 |
<taavi@cloudcumin1001> |
END (FAIL) - Cookbook wmcs.openstack.migrate_server_to_ovs (exit_code=99) for server taavi-vagrant |
[testlabs] |
12:10 |
<taavi@cloudcumin1001> |
START - Cookbook wmcs.openstack.migrate_server_to_ovs for server taavi-vagrant |
[testlabs] |
12:10 |
<taavi@cloudcumin1001> |
END (ERROR) - Cookbook wmcs.openstack.migrate_server_to_ovs (exit_code=97) for server taavi-vagrant |
[testlabs] |
12:10 |
<taavi@cloudcumin1001> |
START - Cookbook wmcs.openstack.migrate_server_to_ovs for server taavi-vagrant |
[testlabs] |
12:09 |
<ladsgroup@cumin1002> |
END (PASS) - Cookbook sre.hosts.downtime (exit_code=0) for 1 day, 0:00:00 on db1239.eqiad.wmnet with reason: Maintenance |
[production] |
12:09 |
<ladsgroup@cumin1002> |
START - Cookbook sre.hosts.downtime for 1 day, 0:00:00 on db1239.eqiad.wmnet with reason: Maintenance |
[production] |
12:09 |
<ladsgroup@cumin1002> |
dbctl commit (dc=all): 'Repooling after maintenance db1233 (T352010)', diff saved to https://phabricator.wikimedia.org/P64960 and previous config saved to /var/cache/conftool/dbconfig/20240614-120918-ladsgroup.json |
[production] |
12:09 |
<fnegri@cumin1002> |
END (PASS) - Cookbook sre.hosts.downtime (exit_code=0) for 5 days, 0:00:00 on clouddb1018.eqiad.wmnet with reason: hardware issues T367499 |
[production] |
12:08 |
<fnegri@cumin1002> |
START - Cookbook sre.hosts.downtime for 5 days, 0:00:00 on clouddb1018.eqiad.wmnet with reason: hardware issues T367499 |
[production] |
12:08 |
<fnegri@cumin1002> |
END (ERROR) - Cookbook sre.hosts.reboot-single (exit_code=97) for host clouddb1018.eqiad.wmnet |
[production] |
12:07 |
<marostegui@cumin1002> |
dbctl commit (dc=all): 'Repooling after maintenance db2171', diff saved to https://phabricator.wikimedia.org/P64959 and previous config saved to /var/cache/conftool/dbconfig/20240614-120727-marostegui.json |
[production] |
12:07 |
<ladsgroup@cumin1002> |
dbctl commit (dc=all): 'db1230 (re)pooling @ 75%: Maint over', diff saved to https://phabricator.wikimedia.org/P64958 and previous config saved to /var/cache/conftool/dbconfig/20240614-120704-ladsgroup.json |
[production] |
12:06 |
<taavi@cloudcumin1001> |
END (FAIL) - Cookbook wmcs.openstack.migrate_server_to_ovs (exit_code=99) for server taavi-hostkeys |
[testlabs] |
12:05 |
<taavi@cloudcumin1001> |
START - Cookbook wmcs.openstack.migrate_server_to_ovs for server taavi-hostkeys |
[testlabs] |
12:03 |
<taavi@cloudcumin1001> |
END (FAIL) - Cookbook wmcs.openstack.migrate_server_to_ovs (exit_code=99) for server taavi-hostkeys |
[testlabs] |
12:03 |
<taavi@cloudcumin1001> |
START - Cookbook wmcs.openstack.migrate_server_to_ovs for server taavi-hostkeys |
[testlabs] |
12:01 |
<jelto@cumin1002> |
END (FAIL) - Cookbook sre.gitlab.upgrade (exit_code=99) on GitLab host gitlab2002.wikimedia.org with reason: GitLab to new version |
[production] |