## NOTE namespaced keys (i.e. with ::) will NOT be looked up here
## See also https://phabricator.wikimedia.org/T209265
lookup_options:
abuse_networks:
merge: deep # Ease setting up a block on deployment-prep - T389181!
apereo_cas:
merge: deep
ldap:
merge: hash
profile::admin::groups:
merge: unique
"^profile::bgpalerter::(reports|monitors)$":
merge: unique
"^profile::pki::(.*)::db_pass$":
convert_to: 'Sensitive'
'^profile::mail::mx::[\w_]+_password$':
convert_to: 'Sensitive'
'^profile::mail::vrts::[\w_]+_password$':
convert_to: 'Sensitive'
profile::pki::client::auth_key:
convert_to: 'Sensitive'
profile::cache::varnish::frontend::runtime_params:
merge: deep
profile::cache::varnish::frontend::fe_vcl_config:
merge: hash
profile::cache::varnish::frontend::cache_be_opts:
merge: hash
profile::conftool2git::ssh_privkey:
convert_to: 'Sensitive'
profile::docker::engine::settings:
merge: hash
profile::debdeploy::client::filter_services:
merge: hash
labsldapconfig:
merge: hash
profile::idp::services:
merge: deep
profile::spicerack::alertmanager_config_data:
merge: deep
profile::spicerack::peeringdb_config_data:
merge: hash
profile::spicerack::mysql_config_data:
merge: hash
profile::statograph::api_key:
convert_to: 'Sensitive'
profile::statograph::page_id:
convert_to: 'Sensitive'
profile::ceph::auth::load_all::configuration:
merge: deep
profile::ceph::auth::deploy::configuration:
merge: deep
profile::cloudceph::auth::load_all::configuration:
merge: deep
profile::cloudceph::auth::deploy::configuration:
merge: deep
profile::wmcs::firewall::blocked_ips:
merge: unique
profile::gitlab::omniauth_providers:
merge: deep
mediabackup:
merge: hash
profile::query_service::blazegraph_extra_jvm_opts:
merge: unique
profile::opensearch::common_settings:
merge:
strategy: deep
profile::thanos::oidc::client_secret:
convert_to: 'Sensitive'
profile::thanos::oidc::cookie_secret:
convert_to: 'Sensitive'
profile::opensearch::dashboards::httpd_proxy::sso_client_secret:
convert_to: 'Sensitive'
profile::opensearch::dashboards::httpd_proxy::sso_cookie_secret:
convert_to: 'Sensitive'
profile::prometheus::web_idp::oidc_client_secret:
convert_to: 'Sensitive'
profile::prometheus::web_idp::oidc_cookie_secret:
convert_to: 'Sensitive'
profile::hadoop::common::config_override:
merge: deep
profile::cache::haproxykafka::kafka:
merge: deep
profile::liberica::bgp_config:
merge: hash
profile::acme_chief::certificates:
merge: deep
profile::pontoon::sssd_filter_users:
merge: deep
profile::pontoon::sssd_filter_groups:
merge: deep
# General variables that once would have been in realm.pp
cluster: misc
# TODO: move all of theses directly under P:monitoring::host
contactgroups: 'admins'
do_paging: true
nagios_group: "%{lookup('cluster')}_%{::site}"
datacenters:
- eqiad
- codfw
- esams
- ulsfo
- eqsin
- drmrs
- magru
enable_nrpe2nodexp: false
# This is used for setting the resolv.conf on the DNS hosts and the NTP peers
# that are consumed by P:systemd::timesyncd.
site_nearest_core:
eqiad: codfw
codfw: eqiad
esams: eqiad
ulsfo: codfw
eqsin: codfw
drmrs: eqiad
magru: eqiad # TODO change depending on real latency
public_domain: 'wikimedia.org'
# Swift cluster metadata
# The keys here are called "cluster_label" in puppet code
# This is an instance of the Swift::Clusters type
swift_clusters:
codfw:
cluster_name: 'codfw-prod'
ring_manager: 'ms-fe2009.codfw.wmnet'
rclone_host: 'ms-be2069.codfw.wmnet'
expirer_host: 'ms-be2068.codfw.wmnet'
eqiad:
cluster_name: 'eqiad-prod'
ring_manager: 'ms-fe1009.eqiad.wmnet'
rclone_host: 'ms-be1069.eqiad.wmnet'
expirer_host: 'ms-be1068.eqiad.wmnet'
thanos:
cluster_name: 'thanos-prod'
ring_manager: 'thanos-fe1004.eqiad.wmnet'
# Main statsd instance
statsd: statsd.eqiad.wmnet:8125
statsd_exporter_port: 9125
# Debmonitor instance
debmonitor_servers:
- debmonitor.discovery.wmnet
alertmanagers:
- alert1002.wikimedia.org
- alert2002.wikimedia.org
# NOTE: Do *not* add new clusters *per site* anymore,
# the site name will automatically be appended now,
# and a different IP prefix will be used.
wikimedia_clusters:
decommissioned:
description: "Decommissioned servers"
sites: {}
insetup:
description: "Servers being set up"
sites:
eqiad: []
codfw: []
esams: []
ulsfo: []
eqsin: []
drmrs: []
magru: []
lvs:
description: "LVS loadbalancers"
sites:
eqiad: []
codfw: []
esams: []
ulsfo: []
eqsin: []
drmrs: []
magru: []
puppet:
description: "Puppetmasters"
sites:
eqiad: []
codfw: []
search:
description: "Search"
sites: {}
mysql:
description: "MySQL"
sites:
eqiad: []
codfw: []
etcd:
description: "Etcd"
sites:
eqiad: []
codfw: []
kafka_main:
description: "Kafka main cluster"
sites:
eqiad: []
codfw: []
misc:
description: "Miscellaneous"
sites:
eqiad: []
codfw: []
esams: []
ulsfo: []
eqsin: []
drmrs: []
magru: []
kubernetes:
description: "Kubernetes"
sites:
eqiad: []
codfw: []
appserver:
description: "Application servers"
sites:
eqiad: []
codfw: []
api_appserver:
description: "API application servers"
sites:
eqiad: []
codfw: []
cache_text:
description: "Text caches"
sites:
eqiad: []
codfw: []
esams: []
ulsfo: []
eqsin: []
drmrs: []
magru: []
cache_upload:
description: "Upload caches"
sites:
eqiad: []
codfw: []
esams: []
ulsfo: []
eqsin: []
drmrs: []
magru: []
payments:
description: "Fundraiser payments"
sites: {}
ssl:
description: "SSL cluster"
sites: {}
swift:
description: "Swift"
sites:
eqiad: []
codfw: []
esams: []
jobrunner:
description: "Jobrunners"
sites:
eqiad: []
codfw: []
analytics:
description: "Analytics cluster"
sites:
eqiad: []
flink:
description: "Zookeeper cluster for Flink HA"
sites:
eqiad: []
codfw: []
memcached:
description: "Memcached"
sites:
eqiad: []
codfw: []
memcached_gutter:
description: "Memcached gutter pool"
sites:
codfw: []
eqiad: []
fundraising:
description: "Fundraising"
sites:
eqiad:
- pay-lvs1001.frack.eqiad.wmnet
- pay-lvs1002.frack.eqiad.wmnet
ceph: # Not used anymore
description: "Ceph"
sites: {}
parsoid:
description: "Parsoid"
sites:
eqiad: []
codfw: []
redis:
description: "Redis"
sites:
eqiad: []
codfw: []
elasticsearch:
description: "Elasticsearch cluster"
sites:
eqiad: []
codfw: []
logstash:
description: "Logstash cluster"
sites:
eqiad: []
codfw: []
apifeatureusage:
description: "API feature usage iog ingest cluster"
sites:
eqiad: []
codfw: []
restbase:
description: "Restbase"
sites:
eqiad: []
codfw: []
wdqs:
description: "Wikidata Query Service - Public cluster"
sites:
eqiad: []
codfw: []
maps:
description: "Maps Cluster"
sites:
eqiad: []
codfw: []
ganeti:
description: "Ganeti Virt cluster"
sites:
eqiad: []
codfw: []
ulsfo: []
esams: []
eqsin: []
drmrs: []
magru: []
aqs:
description: "Analytics Query Service"
sites:
eqiad: []
codfw: []
restbase_test:
description: "Restbase test"
sites:
eqiad: []
codfw: []
relforge:
description: "Elasticsearch relforge cluster"
sites:
eqiad: []
thumbor:
description: "Thumbor"
sites:
codfw: []
eqiad: []
kafka_jumbo:
description: "Kafka Jumbo Cluster"
sites:
eqiad: []
druid_analytics:
description: "Druid Analytics Cluster"
sites:
eqiad: []
druid_public:
description: "Druid Public Cluster"
sites:
eqiad: []
wdqs-internal-main:
description: "Wikidata Query Service internal - main graph"
sites:
eqiad: []
codfw: []
wdqs-internal-scholarly:
description: "Wikidata Query Service internal - scholarly graph"
sites:
eqiad: []
codfw: []
wdqs-legacy-full:
description: "Wikidata Query Service - Full cluster (legacy)"
sites:
codfw: []
wdqs-test:
description: "Wikidata Query Service - Test cluster"
sites:
eqiad: []
codfw: []
wdqs-main:
description: "Wikidata Query Service - Main cluster"
sites:
eqiad: []
codfw: []
wdqs-scholarly:
description: "Wikidata Query Service - Scholarly cluster"
sites:
eqiad: []
codfw: []
wdqs-test-alternatives:
description: "Wikidata Query Service - test alternatives cluster"
sites:
eqiad: []
codfw: []
dnsbox:
description: "DNS and NTP Combo Infra Boxes"
sites:
eqiad: []
codfw: []
esams: []
ulsfo: []
eqsin: []
drmrs: []
magru: []
spare:
description: "Spare servers"
sites:
eqiad: []
codfw: []
esams: []
ulsfo: []
eqsin: []
drmrs: []
magru: []
prometheus:
description: "Prometheus servers"
sites:
eqiad: []
codfw: []
esams: []
ulsfo: []
eqsin: []
drmrs: []
magru: []
graphite:
description: "Graphite servers"
sites:
eqiad: []
codfw: []
bastion:
description: "Bastion servers"
sites:
eqiad: []
codfw: []
esams: []
ulsfo: []
eqsin: []
drmrs: []
magru: []
alerting:
description: "Icinga"
sites:
eqiad: []
codfw: []
ci:
description: "Continuous Integration servers"
sites:
eqiad: []
codfw: []
management:
description: "Management servers"
sites:
eqiad: []
codfw: []
cloud_management:
description: "Management for WMCS servers"
sites:
eqiad: []
codfw: []
wmcs:
description: "WMCS servers"
sites:
eqiad: []
codfw: []
webperf:
description: "Web Performance servers"
sites:
eqiad: []
codfw: []
poolcounter:
description: "Poolcounter servers"
sites:
eqiad: []
codfw: []
syslog:
description: "Syslog servers"
sites:
eqiad: []
codfw: []
druid_test_analytics:
description: "Druid Analytics Test Cluster"
sites:
eqiad: []
sessionstore:
description: "Sessionstore cluster"
sites:
eqiad: []
codfw: []
cloudelastic:
description: "Elasticsearch cloudelastic cluster"
sites:
eqiad: []
acmechief:
description: "acme-chief hosts"
sites:
eqiad: []
codfw: []
eventschemas:
description: "Event platform schemas"
sites:
eqiad: []
codfw: []
thanos:
description: "Prometheus long-term storage"
sites:
eqiad: []
codfw: []
titan:
description: "Titan hosts Thanos components"
sites:
eqiad: []
codfw: []
kafka_test:
description: "Kafka Test Cluster"
sites:
eqiad: []
zookeeper_test:
description: "Zookeeper Test Cluster"
sites:
eqiad: []
ml_serve:
description: "ML Team serving clusters"
sites:
eqiad: []
codfw: []
ml_etcd:
description: "ML Team etcd clusters"
sites:
eqiad: []
codfw: []
pki:
description: "PKI (cfssl) infrastructure"
sites:
eqiad: []
codfw: []
ganeti_test:
description: "Ganeti Virt cluster test environment"
sites:
eqiad: []
codfw: []
backup:
description: "Backup cluster (bacula, databases and media)"
sites:
eqiad: []
codfw: []
wcqs:
description: "Wikimedia Commons Query Service - Public cluster"
sites:
eqiad: []
codfw: []
wikidough:
description: "DoH and DoT recursive resolver"
sites:
eqiad: []
codfw: []
esams: []
ulsfo: []
eqsin: []
drmrs: []
magru: []
durum:
description: "Wikidough check service"
sites:
eqiad: []
codfw: []
esams: []
ulsfo: []
eqsin: []
drmrs: []
magru: []
kubernetes-staging:
description: "Kubernetes staging"
sites:
eqiad: []
codfw: []
ml_staging_etcd:
description: "ML Team staging etcd clusters"
sites:
codfw: []
ml_staging:
description: "ML Team staging clusters"
sites:
codfw: []
ml_cache:
description: "ML Team cache and Feature Store clusters"
sites:
eqiad: []
codfw: []
dse_k8s:
description: "Kubernetes cluster for Data Science and Engineering (DSE) workloads"
sites:
eqiad: []
codfw: []
dse_k8s_etcd:
description: "Etcd cluster for the DSE Kubernetes cluster"
sites:
eqiad: []
codfw: []
aux_k8s_etcd:
description: "etcd cluster for aux kubernetes cluster"
sites:
eqiad: []
codfw: []
aux-k8s:
description: "control-plane cluster for aux kubernetes cluster"
sites:
eqiad: []
codfw: []
aux-k8s-workers:
description: "workers for aux kubernetes cluster"
sites:
eqiad: []
codfw: []
cassandra-dev:
description: "Cassandra dev & test cluster"
sites:
codfw: []
ncredir:
description: "non-canonical domain redirect service cluster"
sites:
eqiad: []
codfw: []
esams: []
ulsfo: []
eqsin: []
drmrs: []
magru: []
grafana:
description: "Grafana monitoring"
sites:
eqiad: []
codfw: []
cephosd:
description: "Ceph clusters providing services to the Data Platform teams"
sites:
codfw: []
eqiad: []
datahubsearch:
description: "OpenSearch cluster that services DataHub"
sites:
eqiad: []
airflow:
description: "Airflow instances and related database servers"
sites:
eqiad: []
presto:
description: "The Presto cluster used for querying Hive"
sites:
eqiad: []
apus:
description: "Apus, a replicated S3 service running on Ceph"
sites:
codfw: []
eqiad: []
liberica:
description: "Liberica loadbalancers"
sites:
eqiad: []
codfw: []
esams: []
ulsfo: []
eqsin: []
drmrs: []
magru: []
hcaptcha:
description: "hcaptcha proxy"
sites:
eqiad: []
codfw: []
esams: []
ulsfo: []
eqsin: []
drmrs: []
magru: []
puppetmaster: "puppet"
puppet_ca_server: puppetmaster1001.eqiad.wmnet
puppet_merge_server: puppetserver1001.eqiad.wmnet
# This list is mostly maintained for rsync hosts_allow.
# Servers listed here can rsync pull from each other.
statistics_servers:
- stat1008.eqiad.wmnet
- stat1009.eqiad.wmnet
- stat1010.eqiad.wmnet
- stat1011.eqiad.wmnet
- clouddumps1001.wikimedia.org
- clouddumps1002.wikimedia.org
- an-launcher1003.eqiad.wmnet
- an-test-client1002.eqiad.wmnet
# Dumps distribution servers actively serving NFS traffic
dumps_dist_nfs_servers: [clouddumps1001.wikimedia.org, clouddumps1002.wikimedia.org]
# Dumps distribution server currently serving traffic over NFS to cloud vps instances
dumps_dist_active_vps: clouddumps1001.wikimedia.org
# Dumps distribution server currently serving web and rsync mirror traffic
# Also serves stat* hosts over nfs
dumps_dist_active_web: clouddumps1002.wikimedia.org
# List of all zookeeper clusters in production.
# The number after each hostname represents the host's zkid
# within Zookeeper
zookeeper_clusters:
main-eqiad:
hosts:
conf1007.eqiad.wmnet: '1107'
conf1008.eqiad.wmnet: '1108'
conf1009.eqiad.wmnet: '1109'
main-codfw:
hosts:
conf2004.codfw.wmnet: '2001'
conf2005.codfw.wmnet: '2002'
conf2006.codfw.wmnet: '2003'
# ZK clusters for Flink HA T341792
flink-eqiad:
hosts:
flink-zk1001.eqiad.wmnet: '1001'
flink-zk1002.eqiad.wmnet: '1002'
flink-zk1003.eqiad.wmnet: '1003'
flink-codfw:
hosts:
flink-zk2001.codfw.wmnet: '2001'
flink-zk2002.codfw.wmnet: '2002'
flink-zk2003.codfw.wmnet: '2003'
# ZK cluster for Druid analytics-eqiad cluster (non public),
# colocated on druid hosts.
druid-analytics-eqiad:
hosts:
an-druid1003.eqiad.wmnet: '1003'
an-druid1004.eqiad.wmnet: '1004'
an-druid1005.eqiad.wmnet: '1005'
# ZK cluster for Druid public-eqiad cluster, (for wikistats, etc.)
# colocated on druid hosts.
druid-public-eqiad:
hosts:
druid1009.eqiad.wmnet: '1009'
druid1010.eqiad.wmnet: '1010'
druid1011.eqiad.wmnet: '1011'
# ZK cluster for Druid analytics-test-eqiad cluster (non public),
# colocated on druid hosts. This is mainly used as testing
# environment for the Hadoop Test cluster jobs.
# Temporarily removed due to hw refresh of the hadoop test cluster.
druid-analytics-test-eqiad:
hosts:
an-test-druid1001.eqiad.wmnet: '1001'
# ZK Cluster dedicated to the Hadoop cluster (and its satellite systems)
analytics-eqiad:
hosts:
an-conf1004.eqiad.wmnet: '1004'
an-conf1005.eqiad.wmnet: '1005'
an-conf1006.eqiad.wmnet: '1006'
# Test ZK cluster
test-eqiad:
hosts:
zookeeper-test1002.eqiad.wmnet: '1002'
# Zuul/CI ZK cluster
# https://phabricator.wikimedia.org/project/view/7592/
zuul-eqiad:
hosts:
zuul1001.eqiad.wmnet: '1001'
zuul-codfw:
hosts:
zuul2001.codfw.wmnet: '2001'
# Zuul/CI node names
zuul_main_nodes:
- zuul1001.eqiad.wmnet
- zuul2001.codfw.wmnet
zuul_executor_nodes:
- zuul1002.eqiad.wmnet
- zuul2002.codfw.wmnet
# Used to sync the setting between all Kafka clusters and clients.
kafka_message_max_bytes: 4194304
kafka_clusters:
main-eqiad:
zookeeper_cluster_name: main-eqiad
brokers:
kafka-main1006.eqiad.wmnet:
id: 1001
rack: A
kafka-main1007.eqiad.wmnet:
id: 1002
rack: B
kafka-main1008.eqiad.wmnet:
id: 1003
rack: C
kafka-main1009.eqiad.wmnet:
id: 1004
rack: D
kafka-main1010.eqiad.wmnet:
id: 1005
rack: D
main-codfw:
zookeeper_cluster_name: main-codfw
brokers:
kafka-main2006.codfw.wmnet:
id: 2001
rack: A
kafka-main2007.codfw.wmnet:
id: 2002
rack: B
kafka-main2008.codfw.wmnet:
id: 2003
rack: C
kafka-main2009.codfw.wmnet:
id: 2004
rack: D
kafka-main2010.codfw.wmnet:
id: 2005
rack: D
# NOTE: The 'rack' here is used by the confluent kafka module
# to assign broker.rack for Kafka rack awareness. We are actually setting
# the row letter, not the full row-rack number, since each of these brokers
# are in different racks anyway. We do awareness at the row level.
jumbo-eqiad:
zookeeper_cluster_name: main-eqiad
brokers:
kafka-jumbo1010.eqiad.wmnet:
id: 1010
rack: E
kafka-jumbo1011.eqiad.wmnet:
id: 1011
rack: E
kafka-jumbo1012.eqiad.wmnet:
id: 1012
rack: E
kafka-jumbo1013.eqiad.wmnet:
id: 1013
rack: F
kafka-jumbo1014.eqiad.wmnet:
id: 1014
rack: F
kafka-jumbo1015.eqiad.wmnet:
id: 1015
rack: F
kafka-jumbo1016.eqiad.wmnet:
id: 1016
rack: E
kafka-jumbo1017.eqiad.wmnet:
id: 1017
rack: F
kafka-jumbo1018.eqiad.wmnet:
id: 1018
rack: A
# Kafka clusters for logs, see also T206454
logging-eqiad:
zookeeper_cluster_name: main-eqiad
brokers:
kafka-logging1001.eqiad.wmnet:
id: 1001
rack: B
kafka-logging1002.eqiad.wmnet:
id: 1002
rack: C
kafka-logging1003.eqiad.wmnet:
id: 1006
rack: D
kafka-logging1004.eqiad.wmnet:
id: 1004
rack: E
kafka-logging1005.eqiad.wmnet:
id: 1005
rack: F
logging-codfw:
zookeeper_cluster_name: main-codfw
brokers:
kafka-logging2001.codfw.wmnet:
id: 2001
rack: A
kafka-logging2002.codfw.wmnet:
id: 2002
rack: C
kafka-logging2003.codfw.wmnet:
id: 2003
rack: D
kafka-logging2004.codfw.wmnet:
id: 2004
rack: B
kafka-logging2005.codfw.wmnet:
id: 2005
rack: D
test-eqiad:
zookeeper_cluster_name: test-eqiad
brokers:
kafka-test1006.eqiad.wmnet:
id: 1006
kafka-test1007.eqiad.wmnet:
id: 1007
kafka-test1008.eqiad.wmnet:
id: 1008
kafka-test1009.eqiad.wmnet:
id: 1009
kafka-test1010.eqiad.wmnet:
id: 1010
# Hive base configuration is common to multiple profiles, and must be kept
# in sync. Instead of having it repated multiple times it is convenient to
# have a single place in hiera to check/modify.
hive_services:
analytics-test-hive:
server_host: 'analytics-test-hive.eqiad.wmnet'
server_port: 10000
# Please note that this value is overridden by the coordinators to force
# them to use their local Metastore. Check the coordinators' role for more info.
metastore_host: 'analytics-test-hive.eqiad.wmnet'
metastore_jdbc_host: 'an-test-coord1001.eqiad.wmnet'
metastore_sasl_enabled: true
metastore_kerberos_principal: 'hive/analytics-test-hive.eqiad.wmnet@WIKIMEDIA'
server_authentication: 'KERBEROS'
metastore_kerberos_keytab_file: '/etc/security/keytabs/hive/hive.keytab'
server_authentication_kerberos_principal: 'hive/analytics-test-hive.eqiad.wmnet@WIKIMEDIA'
server_authentication_kerberos_keytab: '/etc/security/keytabs/hive/hive.keytab'
hive_metastore_disallow_incompatible_col_type_changes: false
java_home: '/usr/lib/jvm/java-8-openjdk-amd64/jre'
metastore_opts: '-Xms16g -Xmx16g -XX:+UseG1GC -XX:+UseStringDeduplication -XX:MaxGCPauseMillis=200 -Djava.net.preferIPv4Stack=false -XX:InitiatingHeapOccupancyPercent=30 -XX:G1ReservePercent=15 -XX:-PrintCommandLineFlags -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:+PrintGCApplicationStoppedTime -XX:+PrintAdaptiveSizePolicy -XX:+PrintTenuringDistribution -XX:+UseGCLogFileRotation -Xloggc:/var/log/hive/gc-metastore.log -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=10m -javaagent:/usr/share/java/prometheus/jmx_prometheus_javaagent.jar=[::]:9183:/etc/prometheus/hive_metastore_jmx_exporter.yaml'
server_opts: '-Xms6g -Xmx6g -XX:+UseG1GC -XX:+UseStringDeduplication -XX:MaxGCPauseMillis=1000 -Djava.net.preferIPv4Stack=false -XX:+ParallelRefProcEnabled -XX:MaxMetaspaceSize=1024m -XX:-PrintCommandLineFlags -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:+PrintGCApplicationStoppedTime -XX:+PrintAdaptiveSizePolicy -XX:+PrintTenuringDistribution -XX:+UseGCLogFileRotation -Xloggc:/var/log/hive/gc-hiveserver.log -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=10m -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/var/log/hive/hiveserver-heap-dump.bin -javaagent:/usr/share/java/prometheus/jmx_prometheus_javaagent.jar=[::]:10100:/etc/prometheus/hive_server_jmx_exporter.yaml'
# this allows us to do SHOW LOCKS - See #T386854 for details
support_concurrency: true
hive_site_extra_properties:
hive.txn.manager: org.apache.hadoop.hive.ql.lockmgr.DbTxnManager
# this enables an always running daemon as part of the Hive Metastore process that will remove deadlocked/expired locks from the HIVE_LOCKS table
# It also enables other Hive services that we do not need, but this is the best we can do with Hive 2.3.6.
hive.compactor.initiator.on: true
datanucleus.connectionPool.maxPoolSize: 50 # The default in hive version 2.3.6 is 10
hive.metastore.server.max.threads: 500 # The default in hive version 2.3.6 is 10
analytics-hive:
server_host: analytics-hive.eqiad.wmnet
server_port: 10000
# Please note that this value is overridden by the coordinators to force
# them to use their local Metastore. Check the coordinators' role for more info.
metastore_host: 'analytics-hive.eqiad.wmnet'
metastore_jdbc_host: 'an-mariadb1001.eqiad.wmnet'
metastore_sasl_enabled: true
metastore_kerberos_keytab_file: '/etc/security/keytabs/hive/hive.keytab'
metastore_kerberos_principal: 'hive/analytics-hive.eqiad.wmnet@WIKIMEDIA'
server_authentication: 'KERBEROS'
server_authentication_kerberos_principal: 'hive/analytics-hive.eqiad.wmnet@WIKIMEDIA'
server_authentication_kerberos_keytab: '/etc/security/keytabs/hive/hive.keytab'
hive_metastore_disallow_incompatible_col_type_changes: false
java_home: '/usr/lib/jvm/java-8-openjdk-amd64/jre'
metastore_opts: '-Xms16g -Xmx16g -XX:+UseG1GC -XX:+UseStringDeduplication -XX:MaxGCPauseMillis=200 -Djava.net.preferIPv4Stack=false -XX:InitiatingHeapOccupancyPercent=30 -XX:G1ReservePercent=15 -XX:-PrintCommandLineFlags -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:+PrintGCApplicationStoppedTime -XX:+PrintAdaptiveSizePolicy -XX:+PrintTenuringDistribution -XX:+UseGCLogFileRotation -Xloggc:/var/log/hive/gc-metastore.log -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=10m -javaagent:/usr/share/java/prometheus/jmx_prometheus_javaagent.jar=[::]:9183:/etc/prometheus/hive_metastore_jmx_exporter.yaml'
server_opts: '-Xms10g -Xmx10g -XX:+UseG1GC -XX:+UseStringDeduplication -XX:MaxGCPauseMillis=1000 -Djava.net.preferIPv4Stack=false -XX:-PrintCommandLineFlags -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:+PrintGCApplicationStoppedTime -XX:+PrintAdaptiveSizePolicy -XX:+PrintTenuringDistribution -XX:+UseGCLogFileRotation -Xloggc:/var/log/hive/gc-hiveserver.log -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=10m -javaagent:/usr/share/java/prometheus/jmx_prometheus_javaagent.jar=[::]:10100:/etc/prometheus/hive_server_jmx_exporter.yaml'
# this allows us to do SHOW LOCKS - See #T386854 for details
support_concurrency: true
hive_site_extra_properties:
hive.txn.manager: org.apache.hadoop.hive.ql.lockmgr.DbTxnManager
# this enables an always running daemon as part of the Hive Metastore process that will remove deadlocked/expired locks from the HIVE_LOCKS table
# It also enables other Hive services that we do not need, but this is the best we can do with Hive 2.3.6.
hive.compactor.initiator.on: true
datanucleus.connectionPool.maxPoolSize: 50 # The default in hive version 2.3.6 is 10
hive.metastore.server.max.threads: 500 # The default in hive version 2.3.6 is 10
# Hadoop base configuration is common to multiple profiles, and must be kept
# in sync. Instead of having it repated multiple times it is convenient to
# have a single place in hiera to check/modify.
hadoop_clusters:
analytics-test-hadoop:
zookeeper_cluster_name: analytics-eqiad
resourcemanager_hosts:
- an-test-master1001.eqiad.wmnet
- an-test-master1002.eqiad.wmnet
namenode_hosts:
- an-test-master1001.eqiad.wmnet
- an-test-master1002.eqiad.wmnet
journalnode_hosts:
- an-test-worker1001.eqiad.wmnet
- an-test-worker1002.eqiad.wmnet
- an-test-worker1003.eqiad.wmnet
net_topology:
an-test-worker1001.eqiad.wmnet: /eqiad/A/5
an-test-worker1002.eqiad.wmnet: /eqiad/C/5
an-test-worker1003.eqiad.wmnet: /eqiad/D/6
# The following hosts are excluded from HDFS and YARN. The values are also used to determine
# whether or not to start the hadoop-yarn-nodemanager service.
excluded_hosts: []
# https://community.hortonworks.com/articles/43839/scaling-the-hdfs-namenode-part-2.html
# 20 * log2(Cluster Size)
dfs_namenode_handler_count: 20
dfs_namenode_service_port: 8040
dfs_namenode_service_handler_count: 10
hadoop_var_directory: '/srv/hadoop'
yarn_scheduler_maximum_allocation_mb: 53248
yarn_resourcemanager_zk_state_store_parent_path: '/yarn-rmstore/analytics-test-hadoop'
yarn_resourcemanager_max_completed_applications: 1000
# yarn_nodemanager_resource_memory_mb is set using the formula:
# total-memory - yarn_nodemanager_os_reserved_memory_mb
# Used memory is: datanode + nodemanager + journalnode on some
# 4G + 6G + 4G
yarn_nodemanager_os_reserved_memory_mb: 14000
# Requires the Capacity scheduler to work
yarn_node_labels_enabled: true
hadoop_datanode_opts: "-Xms4096m -Xmx4096m -Djava.net.preferIPv4Stack=false -javaagent:/usr/share/java/prometheus/jmx_prometheus_javaagent.jar=[::]:51010:/etc/prometheus/hdfs_datanode_jmx_exporter.yaml"
hadoop_journalnode_opts: "-Xms4096m -Xmx4096m -Djava.net.preferIPv4Stack=false -javaagent:/usr/share/java/prometheus/jmx_prometheus_javaagent.jar=[::]:10485:/etc/prometheus/hdfs_journalnode_jmx_exporter.yaml"
yarn_nodemanager_opts: "-Xms6144m -Xmx6144m -Djava.net.preferIPv4Stack=false -javaagent:/usr/share/java/prometheus/jmx_prometheus_javaagent.jar=[::]:8141:/etc/prometheus/yarn_nodemanager_jmx_exporter.yaml"
hadoop_namenode_opts: "-Xms12288m -Xmx12288m -XX:+UseG1GC -XX:MaxGCPauseMillis=1000 -Djava.net.preferIPv4Stack=false -javaagent:/usr/share/java/prometheus/jmx_prometheus_javaagent.jar=[::]:10080:/etc/prometheus/hdfs_namenode_jmx_exporter.yaml"
yarn_resourcemanager_opts: "-Xms4096m -Xmx4096m -Djava.net.preferIPv4Stack=false -javaagent:/usr/share/java/prometheus/jmx_prometheus_javaagent.jar=[::]:10083:/etc/prometheus/yarn_resourcemanager_jmx_exporter.yaml"
mapreduce_history_java_opts: "-Djava.net.preferIPv4Stack=false -javaagent:/usr/share/java/prometheus/jmx_prometheus_javaagent.jar=[::]:10086:/etc/prometheus/mapreduce_history_jmx_exporter.yaml"
core_site_extra_properties:
# User used in the Yarn UI to check job logs/statuses/etc..
hadoop.http.staticuser.user: 'yarn'
yarn_site_extra_properties:
yarn.resourcemanager.scheduler.class: 'org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler'
yarn.resourcemanager.scheduler.monitor.enable: true
# Note: the extra space at the beginning is needed.
# If you enable ACLs the admin list is "everybody" by default, so the rules are not
# really respected if we don't set a more specific group.
# The 'hadoop.http.staticuser.user' needs to be among the admins to allow various UIs
# to see Yarn logs.
yarn.admin.acl: 'yarn analytics-admins'
yarn.acl.enable: true
hdfs_site_extra_properties:
# Avoid long waits if the Namenodes are not reachable
# from clients.
dfs.client.failover.max.attempts: 3
dfs.namenode.acls.enabled: true
# Since we have less space available on the test cluster (due to fewer workers),
# better to use a smaller replication factor (3 is the Hadoop's default).
dfs.replication: 2
yarn_use_multi_spark_shufflers: true
# Multiple spark shufflers are listed here with the keys as major.minor
# versions in quotes and the value represents the port number to be used.
#
# n.b. The first version listed here must use the default port of 7337.
# For some reason, yarn isn't using the config overlay for the first shuffler loaded.
# See https://phabricator.wikimedia.org/T344910#9262138
yarn_multi_spark_shuffler_versions:
'3.1': 7337
'3.3': 7338
'3.4': 7339
analytics-hadoop:
zookeeper_cluster_name: analytics-eqiad
resourcemanager_hosts:
- an-master1003.eqiad.wmnet
- an-master1004.eqiad.wmnet
namenode_hosts:
- an-master1003.eqiad.wmnet
- an-master1004.eqiad.wmnet
journalnode_hosts:
- an-worker1178.eqiad.wmnet # Row A2
- an-worker1151.eqiad.wmnet # Row C7
- an-worker1142.eqiad.wmnet # Row E1
- an-worker1219.eqiad.wmnet # ROW B7
- an-worker1213.eqiad.wmnet # Row A4
net_topology:
an-master1003.eqiad.wmnet: /eqiad/C/2
an-master1004.eqiad.wmnet: /eqiad/D/7
an-worker1178.eqiad.wmnet: /eqiad/A/2
an-worker1210.eqiad.wmnet: /eqiad/A/2
an-worker1211.eqiad.wmnet: /eqiad/A/2
an-worker1233.eqiad.wmnet: /eqiad/A/2
an-worker1212.eqiad.wmnet: /eqiad/A/4
an-worker1213.eqiad.wmnet: /eqiad/A/4
an-worker1149.eqiad.wmnet: /eqiad/A/7
an-worker1214.eqiad.wmnet: /eqiad/A/7
an-worker1215.eqiad.wmnet: /eqiad/A/7
an-worker1216.eqiad.wmnet: /eqiad/B/2
an-worker1217.eqiad.wmnet: /eqiad/B/2
an-worker1150.eqiad.wmnet: /eqiad/B/7
an-worker1179.eqiad.wmnet: /eqiad/B/7
an-worker1218.eqiad.wmnet: /eqiad/B/7
an-worker1219.eqiad.wmnet: /eqiad/B/7
an-worker1220.eqiad.wmnet: /eqiad/C/2
an-worker1221.eqiad.wmnet: /eqiad/C/2
an-worker1234.eqiad.wmnet: /eqiad/C/2
an-worker1222.eqiad.wmnet: /eqiad/C/4
an-worker1223.eqiad.wmnet: /eqiad/C/4
an-worker1224.eqiad.wmnet: /eqiad/C/7
an-worker1225.eqiad.wmnet: /eqiad/C/7
an-worker1226.eqiad.wmnet: /eqiad/C/7
an-worker1151.eqiad.wmnet: /eqiad/C/7
an-worker1175.eqiad.wmnet: /eqiad/D/2
an-worker1209.eqiad.wmnet: /eqiad/D/2
an-worker1227.eqiad.wmnet: /eqiad/D/2
an-worker1228.eqiad.wmnet: /eqiad/D/2
an-worker1229.eqiad.wmnet: /eqiad/D/4
an-worker1230.eqiad.wmnet: /eqiad/D/4
an-worker1152.eqiad.wmnet: /eqiad/D/7
an-worker1231.eqiad.wmnet: /eqiad/D/7
an-worker1232.eqiad.wmnet: /eqiad/D/7
an-worker1142.eqiad.wmnet: /eqiad/E/1
an-worker1147.eqiad.wmnet: /eqiad/E/1
an-worker1153.eqiad.wmnet: /eqiad/E/1
an-worker1181.eqiad.wmnet: /eqiad/E/1
an-worker1187.eqiad.wmnet: /eqiad/E/1
an-worker1235.eqiad.wmnet: /eqiad/E/1
an-worker1143.eqiad.wmnet: /eqiad/E/2
an-worker1182.eqiad.wmnet: /eqiad/E/2
an-worker1188.eqiad.wmnet: /eqiad/E/2
an-worker1189.eqiad.wmnet: /eqiad/E/2
an-worker1154.eqiad.wmnet: /eqiad/E/3
an-worker1176.eqiad.wmnet: /eqiad/E/3
an-worker1190.eqiad.wmnet: /eqiad/E/3
an-worker1191.eqiad.wmnet: /eqiad/E/3
an-worker1157.eqiad.wmnet: /eqiad/E/5
an-worker1158.eqiad.wmnet: /eqiad/E/5
an-worker1159.eqiad.wmnet: /eqiad/E/5
an-worker1180.eqiad.wmnet: /eqiad/E/5
an-worker1183.eqiad.wmnet: /eqiad/E/5
an-worker1192.eqiad.wmnet: /eqiad/E/5
an-worker1193.eqiad.wmnet: /eqiad/E/5
an-worker1160.eqiad.wmnet: /eqiad/E/6
an-worker1161.eqiad.wmnet: /eqiad/E/6
an-worker1162.eqiad.wmnet: /eqiad/E/6
an-worker1184.eqiad.wmnet: /eqiad/E/6
an-worker1194.eqiad.wmnet: /eqiad/E/6
an-worker1195.eqiad.wmnet: /eqiad/E/6
an-worker1163.eqiad.wmnet: /eqiad/E/7
an-worker1164.eqiad.wmnet: /eqiad/E/7
an-worker1165.eqiad.wmnet: /eqiad/E/7
an-worker1196.eqiad.wmnet: /eqiad/E/7
an-worker1197.eqiad.wmnet: /eqiad/E/7
an-worker1144.eqiad.wmnet: /eqiad/F/1
an-worker1155.eqiad.wmnet: /eqiad/F/1
an-worker1198.eqiad.wmnet: /eqiad/F/1
an-worker1236.eqiad.wmnet: /eqiad/F/1
an-worker1145.eqiad.wmnet: /eqiad/F/2
an-worker1199.eqiad.wmnet: /eqiad/F/2
an-worker1200.eqiad.wmnet: /eqiad/F/2
an-worker1146.eqiad.wmnet: /eqiad/F/3
an-worker1156.eqiad.wmnet: /eqiad/F/3
an-worker1177.eqiad.wmnet: /eqiad/F/3
an-worker1201.eqiad.wmnet: /eqiad/F/3
an-worker1202.eqiad.wmnet: /eqiad/F/3
an-worker1166.eqiad.wmnet: /eqiad/F/5
an-worker1167.eqiad.wmnet: /eqiad/F/5
an-worker1168.eqiad.wmnet: /eqiad/F/5
an-worker1203.eqiad.wmnet: /eqiad/F/5
an-worker1204.eqiad.wmnet: /eqiad/F/5
an-worker1170.eqiad.wmnet: /eqiad/F/6
an-worker1171.eqiad.wmnet: /eqiad/F/6
an-worker1186.eqiad.wmnet: /eqiad/F/6
an-worker1205.eqiad.wmnet: /eqiad/F/6
an-worker1206.eqiad.wmnet: /eqiad/F/6
an-worker1172.eqiad.wmnet: /eqiad/F/7
an-worker1173.eqiad.wmnet: /eqiad/F/7
an-worker1174.eqiad.wmnet: /eqiad/F/7
an-worker1207.eqiad.wmnet: /eqiad/F/7
an-worker1208.eqiad.wmnet: /eqiad/F/7
an-worker1169.eqiad.wmnet: /eqiad/F/8
an-worker1185.eqiad.wmnet: /eqiad/F/8
# The following hosts are excluded from HDFS and YARN. The values are also used to determine
# whether or not to start the hadoop-yarn-nodemanager service.
excluded_hosts:
- an-worker1148.eqiad.wmnet # Hardware issue (T411919)
core_site_extra_properties:
# User used in the Yarn UI to check job logs/statuses/etc..
hadoop.http.staticuser.user: 'yarn'
# Requires the Capacity scheduler to work
yarn_node_labels_enabled: true
yarn_site_extra_properties:
yarn.resourcemanager.scheduler.class: 'org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler'
yarn.resourcemanager.scheduler.monitor.enable: true
# Note: the extra space at the beginning is needed.
# If you enable ACLs the admin list is "everybody" by default, so the rules are not
# really respected if we don't set a more specific group.
# The 'hadoop.http.staticuser.user' needs to be among the admins to allow various UIs
# to see Yarn logs.
yarn.admin.acl: 'yarn analytics-admins'
yarn.acl.enable: true
hdfs_site_extra_properties:
# Quadruple the maximum number of items that a directory may contain. The default value is 1048576.
# If configured, this must be between 1 and 6400000. See #T380674 for more details.
dfs.namenode.fs-limits.max-directory-items: 4194304
# The datanode daemon by default begins the shutdown procedure as soon as
# on volume/disk failure is registered. In our use case we want to keep the
# datanode working in case of one/two (two is very unlikey on the same host)
# disk failures.
datanode_volumes_failed_tolerated: 2
# https://community.hortonworks.com/articles/43839/scaling-the-hdfs-namenode-part-2.html
# 20 * log2(Cluster Size)
dfs_namenode_handler_count: 127
# We have experienced some issues with hdfs saveNamespace with a number of handler
# threads lower than the number of total datanodes running in the cluster.
# More specifically, during saveNamespace it seemed as if all service handler
# threads got blocked on a read lock, apparently held by the thread saving the fsimage
# file. The idea is to keep the number of threads a little more than the number
# of datanodes, to allow room for other threads responding to the ZKFC health probes.
# More info: T283733
dfs_namenode_service_port: 8040
dfs_namenode_service_handler_count: 100
# Allow a job to request up to the smallest value of yarn_nodemanager_resource_memory_mb
# in the cluster. Yarn workers all have at least 188Gb available memory, and for
# some use-cases huge containers can be helpful.
yarn_scheduler_maximum_allocation_mb: 131072
yarn_resourcemanager_zk_state_store_parent_path: '/yarn-rmstore/analytics-hadoop'
# yarn_nodemanager_resource_memory_mb is set using the formula:
# total-memory - yarn_nodemanager_os_reserved_memory_mb
# Used memory is: datanode + nodemanager + journalnode on some
# 8G + 12G + 4G
yarn_nodemanager_os_reserved_memory_mb: 24000
hadoop_datanode_opts: "-Xms8192m -Xmx8192m -Djava.net.preferIPv4Stack=false -javaagent:/usr/share/java/prometheus/jmx_prometheus_javaagent.jar=[::]:51010:/etc/prometheus/hdfs_datanode_jmx_exporter.yaml"
hadoop_journalnode_opts: "-Xms4096m -Xmx4096m -Djava.net.preferIPv4Stack=false -javaagent:/usr/share/java/prometheus/jmx_prometheus_javaagent.jar=[::]:10485:/etc/prometheus/hdfs_journalnode_jmx_exporter.yaml"
yarn_nodemanager_opts: "-Xms12288m -Xmx12288m -Djava.net.preferIPv4Stack=false -XX:+UseG1GC -XX:+UseStringDeduplication -XX:MaxGCPauseMillis=1000 -javaagent:/usr/share/java/prometheus/jmx_prometheus_javaagent.jar=[::]:8141:/etc/prometheus/yarn_nodemanager_jmx_exporter.yaml"
# Following https://docs.cloudera.com/HDPDocuments/HDP2/HDP-2.6.3/bk_command-line-installation/content/configuring-namenode-heap-size.html
# and https://docs.cloudera.com/HDPDocuments/HDP2/HDP-2.6.2/bk_hdfs-administration/content/ch_g1gc_garbage_collector_tech_preview.html
# Also see T310293 and T380674 for reasons behind increases to the heap size
hadoop_namenode_opts: "-Xms196608m -Xmx196608m -XX:PermSize=128m -XX:MaxPermSize=256m -XX:NewSize=28672m -XX:MaxNewSize=28672m -Djava.net.preferIPv4Stack=false -XX:+UseG1GC -XX:MaxGCPauseMillis=4000 -XX:ParallelGCThreads=20 -javaagent:/usr/share/java/prometheus/jmx_prometheus_javaagent.jar=[::]:10080:/etc/prometheus/hdfs_namenode_jmx_exporter.yaml"
yarn_resourcemanager_opts: "-Xms4096m -Xmx4096m -Djava.net.preferIPv4Stack=false -javaagent:/usr/share/java/prometheus/jmx_prometheus_javaagent.jar=[::]:10083:/etc/prometheus/yarn_resourcemanager_jmx_exporter.yaml"
mapreduce_history_java_opts: "-Djava.net.preferIPv4Stack=false -javaagent:/usr/share/java/prometheus/jmx_prometheus_javaagent.jar=[::]:10086:/etc/prometheus/mapreduce_history_jmx_exporter.yaml"
mapreduce_history_heap_size: 4096
yarn_use_multi_spark_shufflers: true
# Multiple spark shufflers are listed here with the keys as major.minor
# versions in quotes and the value represents the port number to be used.
#
# n.b. The first version listed here must use the default port of 7337.
# For some reason, yarn isn't using the config overlay for the first shuffler loaded.
# See https://phabricator.wikimedia.org/T344910#9262138
yarn_multi_spark_shuffler_versions:
'3.1': 7337
'3.3': 7338
'3.4': 7339
# The internal service service endpoint to which the Spark history requests
# will be redirected to
yarn_spark_history_server_address: "https://spark-history.svc.eqiad.wmnet:30443"
deployment_server: deploy1003.eqiad.wmnet
netmon_server: netmon1003.wikimedia.org
netmon_servers_failover:
- 'netmon2002.wikimedia.org'
releases_server: releases2003.codfw.wmnet
releases_servers_failover:
- 'releases1003.eqiad.wmnet'
# Defines which Phabricator server is the active
# one to open needed firewall holes and decide where
# dumps are created.
# Note there is an additional list of all Phabricator
# servers in the role specific Hiera.
phabricator_active_server: phab1004.eqiad.wmnet
phabricator_passive_server: phab2002.codfw.wmnet
# Jenkins controllers are agents to themselves and to each other. They thus
# need to allow ssh access from the other controllers.
#
# We also need WMCS instances to allow inbound ssh connections since they might
# have ferm rules.
jenkins_controller_hosts:
- contint1002.wikimedia.org
- contint2002.wikimedia.org
kerberos_realm_name: WIKIMEDIA
# Bind the following set of domains to the kerberos realm,
# thus allowing services hosted at subdomains of these domains to
# rely on Kerberos authentication
kerberos_extra_realm_domains:
- 'discovery.wmnet'
kerberos_kadmin_server_primary: 'krb1002.eqiad.wmnet'
kerberos_kadmin_keytabs_repo:
- 'puppetserver1001.eqiad.wmnet'
# This list of servers is used between Kerberos servers. It is typically kept in sync
# with kerberos_kdc_servers_to_clients, but there are circumstances where they might differ,
# e.g. while provisioning a new server
kerberos_kdc_servers:
- 'krb1002.eqiad.wmnet'
- 'krb2002.codfw.wmnet'
# This list of servers passed to Kerberos clients. It is typically kept in sync
# with kerberos_kdc_servers, but there are circumstances where they might differ,
# e.g. while provisioning a new server
kerberos_kdc_servers_to_clients:
- 'krb1002.eqiad.wmnet'
- 'krb2002.codfw.wmnet'
# Etcd client global configuration
etcd_client_srv_domain: "conftool.%{::site}.wmnet"
etcd_host: ~
etcd_port: ~
# Conftool global prefix (will be per-dc)
conftool_prefix: "/conftool/v1"
# Logging: logstash, udp2log
logstash_host: "logstash.svc.eqiad.wmnet"
logstash_syslog_port: 10514
logstash_gelf_port: 12201
# TCP json_lines input
logstash_json_lines_port: 11514
# UDP logback/json input
logstash_logback_port: 11514
udp2log_aggregator: "udplog:8420"
tcpircbot_host: 'icinga.wikimedia.org'
tcpircbot_port: 9200
graphite_primary_host: graphite1005.eqiad.wmnet
lists_primary_host: lists1004.wikimedia.org
lists_standby_host: ['lists2001.wikimedia.org']
stewards_primary_host: stewards1001.eqiad.wmnet
stewards_standby_host: ~
# User for Jenkins controllers SSH connections to the agents
jenkins_agent_username: 'jenkins-agent'
# HTTP proxy.
# Provide these as seperate host and ports.
http_proxy_host: webproxy.%{::site}.wmnet
http_proxy_port: 8080
# And as a url (can be used as an env variable).
http_proxy: "http://%{lookup('http_proxy_host')}:%{lookup('http_proxy_port')}"
# This is the "live" authdns server set, which feeds into any other tooling
# that needs to operate on them (including themselves). It includes redundant
# IP address information so that authdns tooling can operate in the face of
# recdns issues.
authdns_servers:
'dns1004.wikimedia.org': 208.80.154.6
'dns1005.wikimedia.org': 208.80.154.153
'dns1006.wikimedia.org': 208.80.154.77
'dns2004.wikimedia.org': 208.80.153.48
'dns2005.wikimedia.org': 208.80.153.74
'dns2006.wikimedia.org': 208.80.153.107
'dns3003.wikimedia.org': 185.15.59.34
'dns3004.wikimedia.org': 185.15.59.2
'dns4003.wikimedia.org': 198.35.26.7
'dns4004.wikimedia.org': 198.35.26.8
'dns5003.wikimedia.org': 103.102.166.10
'dns5004.wikimedia.org': 103.102.166.8
'dns6001.wikimedia.org': 185.15.58.5
'dns6002.wikimedia.org': 185.15.58.37
'dns7001.wikimedia.org': 195.200.68.4
'dns7002.wikimedia.org': 195.200.68.37
authdns_addrs:
ns0-v4:
address: '208.80.154.238'
skip_loopback_site: eqiad
ns0-v6:
address: '2620:0:861:53::1'
skip_loopback_site: eqiad
ns1-v4:
address: '208.80.153.231'
skip_loopback_site: codfw
ns1-v6:
address: '2620:0:860:53::1'
skip_loopback_site: codfw
ns2-v4:
address: '198.35.27.27'
skip_loopback: true
ns2-v6:
address: '2a02:ec80:53::1'
skip_loopback: true
# acme-chief active host
acmechief_host: 'acmechief2002.codfw.wmnet'
# These are our servers - they all peer to each other and sync to upstream NTP
# pool servers.
ntp_peers:
eqiad:
- 'dns1004.wikimedia.org'
- 'dns1005.wikimedia.org'
- 'dns1006.wikimedia.org'
codfw:
- 'dns2004.wikimedia.org'
- 'dns2005.wikimedia.org'
- 'dns2006.wikimedia.org'
esams:
- 'dns3003.wikimedia.org'
- 'dns3004.wikimedia.org'
ulsfo:
- 'dns4003.wikimedia.org'
- 'dns4004.wikimedia.org'
eqsin:
- 'dns5003.wikimedia.org'
- 'dns5004.wikimedia.org'
drmrs:
- 'dns6001.wikimedia.org'
- 'dns6002.wikimedia.org'
magru:
- 'dns7001.wikimedia.org'
- 'dns7002.wikimedia.org'
# T366360, switch to NTP anycast servers. This is going to be the default and
# the above ntp_peers will be deprecated.
ntp_anycast_peers:
- 'ntp-a.anycast.wmnet'
- 'ntp-b.anycast.wmnet'
- 'ntp-c.anycast.wmnet'
# Url to use for reaching graphite
graphite_host: 'graphite-in.eqiad.wmnet'
graphite_url: "http://%{lookup('graphite_host')}"
cumin_masters:
- 10.64.16.154 # cumin1003.eqiad.wmnet
- 2620:0:861:102:10:64:16:154 # cumin1003.eqiad.wmnet
- 10.192.32.49 # cumin2002.codfw.wmnet
- 2620:0:860:103:10:192:32:49 # cumin2002.codfw.wmnet
unpriv_cumin_masters:
- 10.64.48.57 # cuminunpriv1001.eqiad.wmnet
- 2620:0:861:107:10:64:48:57 # cuminunpriv1001.eqiad.wmnet
cloud_cumin_masters:
- 10.64.48.148 # cloudcumin1001.eqiad.wmnet
- 2620:0:861:107:10:64:48:148 # cloudcumin1001.eqiad.wmnet
- 10.192.32.140 # cloudcumin2001.codfw.wmnet
- 2620:0:860:103:10:192:32:140 # cloudcumin2001.codfw.wmnet
bastion_hosts:
- 208.80.154.7 # bast1004.wikimedia.org
- 2620:0:861:1:208:80:154:7 # bast1004.wikimedia.org
- 208.80.153.110 # bast2003.wikimedia.org
- 2a02:ec80:300:3:185:15:59:99 # bast3007.wikimedia.org
- 185.15.59.99 # bast3007.wikimedia.org
- 2620:0:860:4:208:80:153:110 # bast2003.wikimedia.org
- 198.35.26.104 # bast4006.wikimedia.org
- 2620:0:863:3:198:35:26:104 # bast4006.wikimedia.org
- 185.15.58.6 # bast6003.wikimedia.org
- 2a02:ec80:600:1:185:15:58:6 # bast6003.wikimedia.org
- 195.200.68.99 # bast7002.wikimedia.org
- 2a02:ec80:700:3:195:200:68:99 # bast7002.wikimedia.org
cache_hosts:
- 10.64.0.79 # cp1100.eqiad.wmnet
- 2620:0:861:101:10:64:0:79 # cp1100.eqiad.wmnet
- 10.64.0.229 # cp1101.eqiad.wmnet
- 2620:0:861:101:10:64:0:229 # cp1101.eqiad.wmnet
- 10.64.0.14 # cp1102.eqiad.wmnet
- 2620:0:861:101:10:64:0:14 # cp1102.eqiad.wmnet
- 10.64.0.51 # cp1103.eqiad.wmnet
- 2620:0:861:101:10:64:0:51 # cp1103.eqiad.wmnet
- 10.64.16.241 # cp1104.eqiad.wmnet
- 2620:0:861:102:10:64:16:241 # cp1104.eqiad.wmnet
- 10.64.16.94 # cp1105.eqiad.wmnet
- 2620:0:861:102:10:64:16:94 # cp1105.eqiad.wmnet
- 10.64.16.95 # cp1106.eqiad.wmnet
- 2620:0:861:102:10:64:16:95 # cp1106.eqiad.wmnet
- 10.64.16.240 # cp1107.eqiad.wmnet
- 2620:0:861:102:10:64:16:240 # cp1107.eqiad.wmnet
- 10.64.32.14 # cp1108.eqiad.wmnet
- 2620:0:861:103:10:64:32:14 # cp1108.eqiad.wmnet
- 10.64.32.60 # cp1109.eqiad.wmnet
- 2620:0:861:103:10:64:32:60 # cp1109.eqiad.wmnet
- 10.64.32.15 # cp1110.eqiad.wmnet
- 2620:0:861:103:10:64:32:15 # cp1110.eqiad.wmnet
- 10.64.32.65 # cp1111.eqiad.wmnet
- 2620:0:861:103:10:64:32:65 # cp1111.eqiad.wmnet
- 10.64.48.16 # cp1112.eqiad.wmnet
- 2620:0:861:107:10:64:48:16 # cp1112.eqiad.wmnet
- 10.64.48.41 # cp1113.eqiad.wmnet
- 2620:0:861:107:10:64:48:41 # cp1113.eqiad.wmnet
- 10.64.48.27 # cp1114.eqiad.wmnet
- 2620:0:861:107:10:64:48:27 # cp1114.eqiad.wmnet
- 10.64.48.28 # cp1115.eqiad.wmnet
- 2620:0:861:107:10:64:48:28 # cp1115.eqiad.wmnet
- 10.192.23.26 # cp2043.codfw.wmnet
- 2620:0:860:113:10:192:23:26 # cp2043.codfw.wmnet
- 10.192.6.20 # cp2044.codfw.wmnet
- 2620:0:860:107:10:192:6:20 # cp2044.codfw.wmnet
- 10.192.12.35 # cp2045.codfw.wmnet
- 2620:0:860:10d:10:192:12:35 # cp2045.codfw.wmnet
- 10.192.14.25 # cp2046.codfw.wmnet
- 2620:0:860:10f:10:192:14:25 # cp2046.codfw.wmnet
- 10.192.4.22 # cp2047.codfw.wmnet
- 2620:0:860:100:10:192:4:22 # cp2047.codfw.wmnet
- 10.192.29.26 # cp2048.codfw.wmnet
- 2620:0:860:116:10:192:29:26 # cp2048.codfw.wmnet
- 10.192.30.29 # cp2049.codfw.wmnet
- 2620:0:860:119:10:192:30:29 # cp2049.codfw.wmnet
- 10.192.36.19 # cp2050.codfw.wmnet
- 2620:0:860:11b:10:192:36:19 # cp2050.codfw.wmnet
- 10.192.40.25 # cp2051.codfw.wmnet
- 2620:0:860:11f:10:192:40:25 # cp2051.codfw.wmnet
- 10.192.41.21 # cp2052.codfw.wmnet
- 2620:0:860:120:10:192:41:21 # cp2052.codfw.wmnet
- 10.192.56.3 # cp2053.codfw.wmnet
- 2620:0:860:12b:10:192:56:3 # cp2053.codfw.wmnet
- 10.192.56.4 # cp2054.codfw.wmnet
- 2620:0:860:12b:10:192:56:4 # cp2054.codfw.wmnet
- 10.192.57.3 # cp2055.codfw.wmnet
- 2620:0:860:12c:10:192:57:3 # cp2055.codfw.wmnet
- 10.192.58.2 # cp2056.codfw.wmnet
- 2620:0:860:12d:10:192:58:2 # cp2056.codfw.wmnet
- 10.192.58.3 # cp2057.codfw.wmnet
- 2620:0:860:12d:10:192:58:3 # cp2057.codfw.wmnet
- 10.192.59.2 # cp2058.codfw.wmnet
- 2620:0:860:12e:10:192:59:2 # cp2058.codfw.wmnet
- 10.80.0.14 # cp3066.esams.wmnet
- 2a02:ec80:300:101:10:80:0:14 # cp3066.esams.wmnet
- 10.80.1.11 # cp3067.esams.wmnet
- 2a02:ec80:300:102:10:80:1:11 # cp3067.esams.wmnet
- 10.80.0.13 # cp3068.esams.wmnet
- 2a02:ec80:300:101:10:80:0:13 # cp3068.esams.wmnet
- 10.80.1.9 # cp3069.esams.wmnet
- 2a02:ec80:300:102:10:80:1:9 # cp3069.esams.wmnet
- 10.80.0.12 # cp3070.esams.wmnet
- 2a02:ec80:300:101:10:80:0:12 # cp3070.esams.wmnet
- 10.80.1.7 # cp3071.esams.wmnet
- 2a02:ec80:300:102:10:80:1:7 # cp3071.esams.wmnet
- 10.80.0.11 # cp3072.esams.wmnet
- 2a02:ec80:300:101:10:80:0:11 # cp3072.esams.wmnet
- 10.80.1.6 # cp3073.esams.wmnet
- 2a02:ec80:300:102:10:80:1:6 # cp3073.esams.wmnet
- 10.80.0.10 # cp3074.esams.wmnet
- 2a02:ec80:300:101:10:80:0:10 # cp3074.esams.wmnet
- 10.80.1.5 # cp3075.esams.wmnet
- 2a02:ec80:300:102:10:80:1:5 # cp3075.esams.wmnet
- 10.80.0.8 # cp3076.esams.wmnet
- 2a02:ec80:300:101:10:80:0:8 # cp3076.esams.wmnet
- 10.80.1.4 # cp3077.esams.wmnet
- 2a02:ec80:300:102:10:80:1:4 # cp3077.esams.wmnet
- 10.80.0.7 # cp3078.esams.wmnet
- 2a02:ec80:300:101:10:80:0:7 # cp3078.esams.wmnet
- 10.80.1.3 # cp3079.esams.wmnet
- 2a02:ec80:300:102:10:80:1:3 # cp3079.esams.wmnet
- 10.80.0.6 # cp3080.esams.wmnet
- 2a02:ec80:300:101:10:80:0:6 # cp3080.esams.wmnet
- 10.80.1.2 # cp3081.esams.wmnet
- 2a02:ec80:300:102:10:80:1:2 # cp3081.esams.wmnet
- 10.128.0.19 # cp4037.ulsfo.wmnet
- 2620:0:863:101:10:128:0:19 # cp4037.ulsfo.wmnet
- 10.128.0.27 # cp4038.ulsfo.wmnet
- 2620:0:863:101:10:128:0:27 # cp4038.ulsfo.wmnet
- 10.128.0.22 # cp4039.ulsfo.wmnet
- 2620:0:863:101:10:128:0:22 # cp4039.ulsfo.wmnet
- 10.128.0.28 # cp4040.ulsfo.wmnet
- 2620:0:863:101:10:128:0:28 # cp4040.ulsfo.wmnet
- 10.128.0.25 # cp4041.ulsfo.wmnet
- 2620:0:863:101:10:128:0:25 # cp4041.ulsfo.wmnet
- 10.128.0.29 # cp4042.ulsfo.wmnet
- 2620:0:863:101:10:128:0:29 # cp4042.ulsfo.wmnet
- 10.128.0.26 # cp4043.ulsfo.wmnet
- 2620:0:863:101:10:128:0:26 # cp4043.ulsfo.wmnet
- 10.128.0.31 # cp4044.ulsfo.wmnet
- 2620:0:863:101:10:128:0:31 # cp4044.ulsfo.wmnet
- 10.128.0.14 # cp4045.ulsfo.wmnet
- 2620:0:863:101:10:128:0:14 # cp4045.ulsfo.wmnet
- 10.128.0.35 # cp4046.ulsfo.wmnet
- 2620:0:863:101:10:128:0:35 # cp4046.ulsfo.wmnet
- 10.128.0.21 # cp4047.ulsfo.wmnet
- 2620:0:863:101:10:128:0:21 # cp4047.ulsfo.wmnet
- 10.128.0.36 # cp4048.ulsfo.wmnet
- 2620:0:863:101:10:128:0:36 # cp4048.ulsfo.wmnet
- 10.128.0.24 # cp4049.ulsfo.wmnet
- 2620:0:863:101:10:128:0:24 # cp4049.ulsfo.wmnet
- 10.128.0.10 # cp4050.ulsfo.wmnet
- 2620:0:863:101:10:128:0:10 # cp4050.ulsfo.wmnet
- 10.128.0.37 # cp4051.ulsfo.wmnet
- 2620:0:863:101:10:128:0:37 # cp4051.ulsfo.wmnet
- 10.128.0.12 # cp4052.ulsfo.wmnet
- 2620:0:863:101:10:128:0:12 # cp4052.ulsfo.wmnet
- 10.132.0.17 # cp5017.eqsin.wmnet
- 2001:df2:e500:101:10:132:0:17 # cp5017.eqsin.wmnet
- 10.132.0.18 # cp5018.eqsin.wmnet
- 2001:df2:e500:101:10:132:0:18 # cp5018.eqsin.wmnet
- 10.132.0.19 # cp5019.eqsin.wmnet
- 2001:df2:e500:101:10:132:0:19 # cp5019.eqsin.wmnet
- 10.132.0.24 # cp5020.eqsin.wmnet
- 2001:df2:e500:101:10:132:0:24 # cp5020.eqsin.wmnet
- 10.132.0.29 # cp5021.eqsin.wmnet
- 2001:df2:e500:101:10:132:0:29 # cp5021.eqsin.wmnet
- 10.132.0.30 # cp5022.eqsin.wmnet
- 2001:df2:e500:101:10:132:0:30 # cp5022.eqsin.wmnet
- 10.132.0.34 # cp5023.eqsin.wmnet
- 2001:df2:e500:101:10:132:0:34 # cp5023.eqsin.wmnet
- 10.132.0.35 # cp5024.eqsin.wmnet
- 2001:df2:e500:101:10:132:0:35 # cp5024.eqsin.wmnet
- 10.132.0.36 # cp5025.eqsin.wmnet
- 2001:df2:e500:101:10:132:0:36 # cp5025.eqsin.wmnet
- 10.132.0.37 # cp5026.eqsin.wmnet
- 2001:df2:e500:101:10:132:0:37 # cp5026.eqsin.wmnet
- 10.132.0.38 # cp5027.eqsin.wmnet
- 2001:df2:e500:101:10:132:0:38 # cp5027.eqsin.wmnet
- 10.132.0.25 # cp5028.eqsin.wmnet
- 2001:df2:e500:101:10:132:0:25 # cp5028.eqsin.wmnet
- 10.132.0.26 # cp5029.eqsin.wmnet
- 2001:df2:e500:101:10:132:0:26 # cp5029.eqsin.wmnet
- 10.132.0.27 # cp5030.eqsin.wmnet
- 2001:df2:e500:101:10:132:0:27 # cp5030.eqsin.wmnet
- 10.132.0.28 # cp5031.eqsin.wmnet
- 2001:df2:e500:101:10:132:0:28 # cp5031.eqsin.wmnet
- 10.132.0.16 # cp5032.eqsin.wmnet
- 2001:df2:e500:101:10:132:0:16 # cp5032.eqsin.wmnet
- 10.136.0.6 # cp6001.drmrs.wmnet
- 2a02:ec80:600:101:10:136:0:6 # cp6001.drmrs.wmnet
- 10.136.1.6 # cp6002.drmrs.wmnet
- 2a02:ec80:600:102:10:136:1:6 # cp6002.drmrs.wmnet
- 10.136.0.7 # cp6003.drmrs.wmnet
- 2a02:ec80:600:101:10:136:0:7 # cp6003.drmrs.wmnet
- 10.136.1.7 # cp6004.drmrs.wmnet
- 2a02:ec80:600:102:10:136:1:7 # cp6004.drmrs.wmnet
- 10.136.0.8 # cp6005.drmrs.wmnet
- 2a02:ec80:600:101:10:136:0:8 # cp6005.drmrs.wmnet
- 10.136.1.8 # cp6006.drmrs.wmnet
- 2a02:ec80:600:102:10:136:1:8 # cp6006.drmrs.wmnet
- 10.136.0.9 # cp6007.drmrs.wmnet
- 2a02:ec80:600:101:10:136:0:9 # cp6007.drmrs.wmnet
- 10.136.1.9 # cp6008.drmrs.wmnet
- 2a02:ec80:600:102:10:136:1:9 # cp6008.drmrs.wmnet
- 10.136.0.10 # cp6009.drmrs.wmnet
- 2a02:ec80:600:101:10:136:0:10 # cp6009.drmrs.wmnet
- 10.136.1.10 # cp6010.drmrs.wmnet
- 2a02:ec80:600:102:10:136:1:10 # cp6010.drmrs.wmnet
- 10.136.0.11 # cp6011.drmrs.wmnet
- 2a02:ec80:600:101:10:136:0:11 # cp6011.drmrs.wmnet
- 10.136.1.11 # cp6012.drmrs.wmnet
- 2a02:ec80:600:102:10:136:1:11 # cp6012.drmrs.wmnet
- 10.136.0.12 # cp6013.drmrs.wmnet
- 2a02:ec80:600:101:10:136:0:12 # cp6013.drmrs.wmnet
- 10.136.1.12 # cp6014.drmrs.wmnet
- 2a02:ec80:600:102:10:136:1:12 # cp6014.drmrs.wmnet
- 10.136.0.13 # cp6015.drmrs.wmnet
- 2a02:ec80:600:101:10:136:0:13 # cp6015.drmrs.wmnet
- 10.136.1.13 # cp6016.drmrs.wmnet
- 2a02:ec80:600:102:10:136:1:13 # cp6016.drmrs.wmnet
- 10.140.0.3 # cp7001.magru.wmnet
- 2a02:ec80:700:101:10:140:0:3 # cp7001.magru.wmnet
- 10.140.1.4 # cp7002.magru.wmnet
- 2a02:ec80:700:102:10:140:1:4 # cp7002.magru.wmnet
- 10.140.0.4 # cp7003.magru.wmnet
- 2a02:ec80:700:101:10:140:0:4 # cp7003.magru.wmnet
- 10.140.1.5 # cp7004.magru.wmnet
- 2a02:ec80:700:102:10:140:1:5 # cp7004.magru.wmnet
- 10.140.0.5 # cp7005.magru.wmnet
- 2a02:ec80:700:101:10:140:0:5 # cp7005.magru.wmnet
- 10.140.1.6 # cp7006.magru.wmnet
- 2a02:ec80:700:102:10:140:1:6 # cp7006.magru.wmnet
- 10.140.0.6 # cp7007.magru.wmnet
- 2a02:ec80:700:101:10:140:0:6 # cp7007.magru.wmnet
- 10.140.1.7 # cp7008.magru.wmnet
- 2a02:ec80:700:102:10:140:1:7 # cp7008.magru.wmnet
- 10.140.0.7 # cp7009.magru.wmnet
- 2a02:ec80:700:101:10:140:0:7 # cp7009.magru.wmnet
- 10.140.1.8 # cp7010.magru.wmnet
- 2a02:ec80:700:102:10:140:1:8 # cp7010.magru.wmnet
- 10.140.0.8 # cp7011.magru.wmnet
- 2a02:ec80:700:101:10:140:0:8 # cp7011.magru.wmnet
- 10.140.1.9 # cp7012.magru.wmnet
- 2a02:ec80:700:102:10:140:1:9 # cp7012.magru.wmnet
- 10.140.0.9 # cp7013.magru.wmnet
- 2a02:ec80:700:101:10:140:0:9 # cp7013.magru.wmnet
- 10.140.1.10 # cp7014.magru.wmnet
- 2a02:ec80:700:102:10:140:1:10 # cp7014.magru.wmnet
- 10.140.0.10 # cp7015.magru.wmnet
- 2a02:ec80:700:101:10:140:0:10 # cp7015.magru.wmnet
- 10.140.1.11 # cp7016.magru.wmnet
- 2a02:ec80:700:102:10:140:1:11 # cp7016.magru.wmnet
kafka_brokers_main:
- 10.192.5.9 # kafka-main2006.codfw.wmnet #T363210
- 2620:0:860:106:10:192:5:9 # kafka-main2006.codfw.wmnet #T363210
- 10.192.22.6 # kafka-main2007.codfw.wmnet #T363210
- 2620:0:860:112:10:192:22:6 # kafka-main2007.codfw.wmnet #T363210
- 10.192.32.4 # kafka-main2008.codfw.wmnet #T363210
- 2620:0:860:103:10:192:32:4 # kafka-main2008.codfw.wmnet #T363210
- 10.192.48.33 # kafka-main2009.codfw.wmnet #T363210
- 2620:0:860:104:10:192:48:33 # kafka-main2009.codfw.wmnet #T363210
- 10.192.48.35 # kafka-main2010.codfw.wmnet #T363210
- 2620:0:860:104:10:192:48:35 # kafka-main2010.codfw.wmnet #T363210
- 10.64.0.101 # kafka-main1006.eqiad.wmnet #T363214
- 2620:0:861:101:10:64:0:101 # kafka-main1006.eqiad.wmnet #T363214
- 10.64.16.30 # kafka-main1007.eqiad.wmnet #T363214
- 2620:0:861:102:10:64:16:30 # kafka-main1007.eqiad.wmnet #T363214
- 10.64.32.45 # kafka-main1008.eqiad.wmnet #T363214
- 2620:0:861:103:10:64:32:45 # kafka-main1008.eqiad.wmnet #T363214
- 10.64.48.37 # kafka-main1009.eqiad.wmnet #T363214
- 2620:0:861:107:10:64:48:37 # kafka-main1009.eqiad.wmnet #T363214
- 10.64.152.5 # kafka-main1010.eqiad.wmnet #T363214
- 2620:0:861:120:10:64:152:5 # kafka-main1010.eqiad.wmnet #T363214
kafka_brokers_jumbo:
- 10.64.130.10 # kafka-jumbo1010.eqiad.wmnet
- 2620:0:861:109:10:64:130:10 # kafka-jumbo1010.eqiad.wmnet
- 10.64.131.16 # kafka-jumbo1011.eqiad.wmnet
- 2620:0:861:10a:10:64:131:16 # kafka-jumbo1011.eqiad.wmnet
- 10.64.132.21 # kafka-jumbo1012.eqiad.wmnet
- 2620:0:861:10b:10:64:132:21 # kafka-jumbo1012.eqiad.wmnet
- 10.64.134.9 # kafka-jumbo1013.eqiad.wmnet
- 2620:0:861:10d:10:64:134:9 # kafka-jumbo1013.eqiad.wmnet
- 10.64.135.16 # kafka-jumbo1014.eqiad.wmnet
- 2620:0:861:10e:10:64:135:16 # kafka-jumbo1014.eqiad.wmnet
- 10.64.136.11 # kafka-jumbo1015.eqiad.wmnet
- 2620:0:861:10f:10:64:136:11 # kafka-jumbo1015.eqiad.wmnet
- 10.64.154.15 # kafka-jumbo1016.eqiad.wmnet
- 2620:0:861:122:10:64:154:15 # kafka-jumbo1016.eqiad.wmnet
- 10.64.160.16 # kafka-jumbo1017.eqiad.wmnet
- 2620:0:861:128:10:64:160:16 # kafka-jumbo1017.eqiad.wmnet
- 10.64.0.126 # kafka-jumbo1018.eqiad.wmnet
- 2620:0:861:101:10:64:0:126 # kafka-jumbo1018.eqiad.wmnet
kafka_brokers_logging:
- 10.64.16.205 # kafka-logging1001.eqiad.wmnet
- 2620:0:861:102:10:64:16:205 # kafka-logging1001.eqiad.wmnet
- 10.64.32.142 # kafka-logging1002.eqiad.wmnet
- 2620:0:861:103:10:64:32:142 # kafka-logging1002.eqiad.wmnet
- 10.64.48.66 # kafka-logging1003.eqiad.wmnet
- 2620:0:861:107:10:64:48:66 # kafka-logging1003.eqiad.wmnet
- 10.64.131.13 # kafka-logging1004.eqiad.wmnet
- 2620:0:861:10a:10:64:131:13 # kafka-logging1004.eqiad.wmnet
- 10.64.135.13 # kafka-logging1005.eqiad.wmnet
- 2620:0:861:10e:10:64:135:13 # kafka-logging1005.eqiad.wmnet
- 10.192.0.94 # kafka-logging2001.codfw.wmnet
- 2620:0:860:101:10:192:0:94 # kafka-logging2001.codfw.wmnet
- 10.192.16.50 # kafka-logging2002.codfw.wmnet
- 2620:0:860:102:10:192:16:50 # kafka-logging2002.codfw.wmnet
- 10.192.32.24 # kafka-logging2003.codfw.wmnet
- 2620:0:860:103:10:192:32:24 # kafka-logging2003.codfw.wmnet
- 10.192.16.38 # kafka-logging2004.codfw.wmnet
- 2620:0:860:102:10:192:16:38 # kafka-logging2004.codfw.wmnet
- 10.192.48.85 # kafka-logging2005.codfw.wmnet
- 2620:0:860:104:10:192:48:85 # kafka-logging2005.codfw.wmnet
kafkamon_hosts:
- 10.64.32.11 # kafkamon1003.eqiad.wmnet
- 2620:0:861:103:10:64:32:11 # kafkamon1003.eqiad.wmnet
- 10.192.16.139 # kafkamon2003.codfw.wmnet
- 2620:0:860:102:10:192:16:139 # kafkamon2003.codfw.wmnet
zookeeper_hosts_main:
- 10.64.0.207 # conf1007.eqiad.wmnet
- 2620:0:861:101:10:64:0:207 # conf1007.eqiad.wmnet
- 10.64.16.110 # conf1008.eqiad.wmnet
- 2620:0:861:102:10:64:16:110 # conf1008.eqiad.wmnet
- 10.64.48.154 # conf1009.eqiad.wmnet
- 2620:0:861:107:10:64:48:154 # conf1009.eqiad.wmnet
- 10.192.16.45 # conf2004.codfw.wmnet
- 2620:0:860:102:10:192:16:45 # conf2004.codfw.wmnet
- 10.192.32.52 # conf2005.codfw.wmnet
- 2620:0:860:103:10:192:32:52 # conf2005.codfw.wmnet
- 10.192.48.59 # conf2006.codfw.wmnet
- 2620:0:860:104:10:192:48:59 # conf2006.codfw.wmnet
zookeeper_flink_hosts:
- 10.64.16.9 # flink-zk1001.eqiad.wmnet
- 2620:0:861:102:10:64:16:9 # flink-zk1001.eqiad.wmnet
- 10.64.0.8 # flink-zk1002.eqiad.wmnet
- 2620:0:861:101:10:64:0:8 # flink-zk1002.eqiad.wmnet
- 10.64.32.41 # flink-zk1003.eqiad.wmnet
- 2620:0:861:103:10:64:32:41 # flink-zk1003.eqiad.wmnet
- 10.192.16.227 # flink-zk2001.codfw.wmnet
- 2620:0:860:102:10:192:16:227 # flink-zk2001.codfw.wmnet
- 10.192.32.179 # flink-zk2002.codfw.wmnet
- 2620:0:860:103:10:192:32:179 # flink-zk2002.codfw.wmnet
- 10.192.48.219 # flink-zk2003.codfw.wmnet
- 2620:0:860:104:10:192:48:219 # flink-zk2003.codfw.wmnet
druid_public_hosts:
- 10.64.131.9 # druid1009.eqiad.wmnet
- 2620:0:861:10a:10:64:131:9 # druid1009.eqiad.wmnet
- 10.64.132.12 # druid1010.eqiad.wmnet
- 2620:0:861:10b:10:64:132:12 # druid1010.eqiad.wmnet
- 10.64.135.9 # druid1011.eqiad.wmnet
- 2620:0:861:10e:10:64:135:9 # druid1011.eqiad.wmnet
- 10.64.32.101 # druid1012.eqiad.wmnet
- 2620:0:861:103:10:64:32:101 # druid1012.eqiad.wmnet
- 10.64.48.185 # druid1013.eqiad.wmnet
- 2620:0:861:107:10:64:48:185 # druid1013.eqiad.wmnet
labstore_hosts:
- 208.80.154.142 # clouddumps1001.wikimedia.org
- 2620:0:861:2:208:80:154:142 # clouddumps1001.wikimedia.org
- 208.80.154.71 # clouddumps1002.wikimedia.org
- 2620:0:861:3:208:80:154:71 # clouddumps1002.wikimedia.org
mysql_root_clients:
# ipv6 interfaces are not yet allowed due to mysql grants
# do not put dns names or hostnames here, only ipv4
- 10.64.16.90 # db1215.eqiad.wmnet
- 10.192.16.191 # db2185.codfw.wmnet
- 10.64.16.154 # cumin1003.eqiad.wmnet
- 10.192.32.49 # cumin2002.codfw.wmnet
- 208.80.155.103 # dborch1001.wikimedia.org
- 208.80.154.9 # dborch1002.wikimedia.org
- 10.64.0.20 # dborch1003.eqiad.wmnet
monitoring_hosts:
- 208.80.154.78 # alert1002.wikimedia.org
- 2620:0:861:3:208:80:154:78 # alert1002.wikimedia.org
- 208.80.153.42 # alert2002.wikimedia.org
- 2620:0:860:2:208:80:153:42 # alert2002.wikimedia.org
deployment_hosts:
- 10.64.16.93 # deploy1003.eqiad.wmnet
- 2620:0:861:102:10:64:16:93 # deploy1003.eqiad.wmnet
- 10.192.32.7 # deploy2002.codfw.wmnet
- 2620:0:860:103:10:192:32:7 # deploy2002.codfw.wmnet
labsldapconfig: {}
ldap:
base-dn: 'dc=wikimedia,dc=org'
groups_cn: 'ou=groups'
users_cn: 'ou=people'
proxyagent: 'cn=proxyagent,ou=profile,dc=wikimedia,dc=org'
proxypass: "%{lookup('labsldapconfig.proxypass')}"
script_user_dn: 'cn=scriptuser,ou=profile,dc=wikimedia,dc=org'
script_user_pass: "%{lookup('labsldapconfig.script_user_pass')}"
netbox_api_url: https://netbox.discovery.wmnet/
apereo_cas:
production:
base_url: 'https://idp.wikimedia.org/'
login_url: 'https://idp.wikimedia.org/login'
validate_url: 'https://idp.wikimedia.org/serviceValidate'
validate_url_saml: 'https://idp.wikimedia.org/samlValidate'
oidc_endpoint: 'https://idp.wikimedia.org/oidc'
staging:
base_url: 'https://idp-test.wikimedia.org/'
login_url: 'https://idp-test.wikimedia.org/login'
validate_url: 'https://idp-test.wikimedia.org/serviceValidate'
validate_url_saml: 'https://idp-test.wikimedia.org/samlValidate'
oidc_endpoint: 'https://idp-test.wikimedia.org/oidc'
ripeatlas_measurements:
eqiad: # https://atlas.ripe.net/probes/7513/anchor
ipv4:
- '96496702' # anchors pings
- '96496701' # anchors traceroutes
- '96503799' # probes pings
- '96503798' # probes traceroutes
ipv6:
- '96496705' # anchors pings
- '96496704' # anchors traceroutes
- '96503802' # probes pings
- '96503801' # probes traceroutes
codfw: # https://atlas.ripe.net/frames/probes/7038/
ipv4:
- '32390538' # anchors pings
- '32390537' # anchors traceroutes
- '32391305' # probes pings
- '32391304' # probes traceroutes
ipv6:
- '32390541' # anchors pings
- '32390540' # anchors traceroutes
- '32391312' # probes pings
- '32391311' # probes traceroutes
esams: # https://atlas.ripe.net/frames/probes/7261/
ipv4:
- '59935536' # anchors pings
- '59935535' # anchors traceroutes
- '59940409' # probes pings
- '59940407' # probes traceroutes
ipv6:
- '59935539' # anchors pings
- '59935538' # anchors traceroutes
- '59940414' # probes pings
- '59940413' # probes traceroutes
eqsin: # https://atlas.ripe.net/probes/7509/anchor
ipv4:
- '95145503' # anchors pings
- '95145502' # anchors traceroutes
- '95152299' # probes pings
- '95152298' # probes traceroutes
ipv6:
- '95145506' # anchors pings
- '95145505' # anchors traceroutes
- '95152319' # probes pings
- '95152318' # probes traceroutes
magru: # https://atlas.ripe.net/probes/7508/anchor
ipv4:
- '95133212' # anchors pings
- '95133211' # anchors traceroutes
- '95140314' # probes pings
- '95140313' # probes traceroutes
ipv6:
- '95133216' # anchors pings
- '95133215' # anchors traceroutes
- '95140317' # probes pings
- '95140316' # probes traceroutes
asns:
eqiad: 65001
codfw: 65002
esams: 65003
ulsfo: 65004
eqsin: 65005
drmrs: 65006
magru: 65007
eqord: 65020
swift_storage_drives:
- '/dev/sdc'
- '/dev/sdd'
- '/dev/sde'
- '/dev/sdf'
- '/dev/sdg'
- '/dev/sdh'
- '/dev/sdi'
- '/dev/sdj'
- '/dev/sdk'
- '/dev/sdl'
- '/dev/sdm'
- '/dev/sdn'
swift_aux_partitions:
- '/dev/sda3'
- '/dev/sda4'
- '/dev/sdb3'
- '/dev/sdb4'
mediabackup:
batchsize: 100
dblists_path: '/srv/mediawiki-config/dblists'
mw_db_config_file: '/etc/mediabackup/mw_db.ini'
db_config_file: '/etc/mediabackup/mediabackups_db.ini'
storage_path: '/srv/objectstorage'
storage_port: 9000
console_port: 9001
# The hash of hostname -> data for infrastructure (e.g. not running Puppet) devices.
# See also Wmflib::Infra::Devices type and related for more information.
# Sites are separated/grouped by newline for easier navigation.
infra_devices:
cr1-eqiad:
ipv4: 208.80.154.196
ipv6: 2620:0:861:ffff::1
vrrp_peer: cr2-eqiad.wikimedia.org
alarms: true
site: eqiad
role: cr
cr2-eqiad:
ipv4: 208.80.154.197
ipv6: 2620:0:861:ffff::2
alarms: true
site: eqiad
role: cr
pfw1-eqiad:
ipv4: 208.80.154.219
site: eqiad
role: pfw
parents:
- cr1-eqiad
- cr2-eqiad
mr1-eqiad:
ipv4: '208.80.154.199'
ipv6: '2620:0:861:ffff::6'
parents:
- asw2-a-eqiad
site: eqiad
role: mr
msw1-eqiad:
ipv4: '10.65.0.10'
parents:
- mr1-eqiad
site: eqiad
role: msw
msw2-eqiad:
ipv4: '10.65.0.5'
parents:
- msw1-eqiad
site: eqiad
role: msw
asw2-a-eqiad:
ipv4: 10.65.0.21
site: eqiad
role: l2sw
asw2-b-eqiad:
ipv4: 10.65.0.25
site: eqiad
role: l2sw
fasw2-e15a-eqiad:
ipv4: 10.65.4.92
parents:
- msw2-eqiad
site: eqiad
role: l2sw
fasw2-e15b-eqiad:
ipv4: 10.65.4.93
parents:
- msw2-eqiad
site: eqiad
role: l2sw
fasw2-e16a-eqiad:
ipv4: 10.65.4.249
parents:
- msw2-eqiad
site: eqiad
role: l2sw
fasw2-e16b-eqiad:
ipv4: 10.65.4.250
parents:
- msw2-eqiad
site: eqiad
role: l2sw
scs-a8-eqiad:
ipv4: 10.65.0.11
parents:
- msw1-eqiad
site: eqiad
role: scs
scs-c1-eqiad:
ipv4: 10.65.0.22
parents:
- msw1-eqiad
site: eqiad
role: scs
scs-f8-eqiad:
ipv4: 10.65.0.4
parents:
- msw2-eqiad
site: eqiad
role: scs
cloudsw1-c8-eqiad:
ipv4: 10.64.146.252
ipv6: 2620:0:861:11b::252
site: eqiad
role: l3sw
cloudsw1-d5-eqiad:
ipv4: 10.64.146.253
ipv6: 2620:0:861:11b::253
site: eqiad
role: l3sw
cloudsw1-e4-eqiad:
ipv4: 10.64.146.254
ipv6: 2620:0:861:11b::254
site: eqiad
role: l3sw
cloudsw1-f4-eqiad:
ipv4: 10.64.146.255
ipv6: 2620:0:861:11b::255
site: eqiad
role: l3sw
ssw1-e1-eqiad:
ipv4: 10.64.146.1
ipv6: 2620:0:861:11b::1
site: eqiad
role: l3sw
lsw1-e1-eqiad:
ipv4: 10.64.146.3
ipv6: 2620:0:861:11b::3
site: eqiad
role: l3sw
lsw1-e2-eqiad:
ipv4: 10.64.146.4
ipv6: 2620:0:861:11b::4
site: eqiad
role: l3sw
lsw1-e3-eqiad:
ipv4: 10.64.146.5
ipv6: 2620:0:861:11b::5
site: eqiad
role: l3sw
lsw1-e5-eqiad:
ipv4: 10.64.146.6
ipv6: 2620:0:861:11b::6
site: eqiad
role: l3sw
lsw1-e6-eqiad:
ipv4: 10.64.146.10
ipv6: 2620:0:861:11b::a
site: eqiad
role: l3sw
lsw1-e7-eqiad:
ipv4: 10.64.146.11
ipv6: 2620:0:861:11b::b
site: eqiad
role: l3sw
lsw1-e8-eqiad:
ipv4: 10.64.146.16
ipv6: 2620:0:861:11b::10
site: eqiad
role: l3sw
ssw1-f1-eqiad:
ipv4: 10.64.146.2
ipv6: 2620:0:861:11b::2
site: eqiad
role: l3sw
lsw1-f1-eqiad:
ipv4: 10.64.146.7
ipv6: 2620:0:861:11b::7
site: eqiad
role: l3sw
lsw1-f2-eqiad:
ipv4: 10.64.146.8
ipv6: 2620:0:861:11b::8
site: eqiad
role: l3sw
lsw1-f3-eqiad:
ipv4: 10.64.146.9
ipv6: 2620:0:861:11b::9
site: eqiad
role: l3sw
lsw1-f5-eqiad:
ipv4: 10.64.146.12
ipv6: 2620:0:861:11b::c
site: eqiad
role: l3sw
lsw1-f6-eqiad:
ipv4: 10.64.146.13
ipv6: 2620:0:861:11b::d
site: eqiad
role: l3sw
lsw1-f7-eqiad:
ipv4: 10.64.146.15
ipv6: 2620:0:861:11b::f
site: eqiad
role: l3sw
lsw1-f8-eqiad:
ipv4: 10.64.146.17
ipv6: 2620:0:861:11b::11
site: eqiad
role: l3sw
ssw1-d1-eqiad:
ipv4: 10.64.168.1
ipv6: 2620:0:861:130::1
site: eqiad
role: l3sw
ssw1-d8-eqiad:
ipv4: 10.64.168.2
ipv6: 2620:0:861:130::2
site: eqiad
role: l3sw
lsw1-c2-eqiad:
ipv4: 10.64.168.3
ipv6: 2620:0:861:130::3
site: eqiad
role: l3sw
lsw1-c3-eqiad:
ipv4: 10.64.168.4
ipv6: 2620:0:861:130::4
site: eqiad
role: l3sw
lsw1-c4-eqiad:
ipv4: 10.64.168.5
ipv6: 2620:0:861:130::5
site: eqiad
role: l3sw
lsw1-c5-eqiad:
ipv4: 10.64.168.6
ipv6: 2620:0:861:130::6
site: eqiad
role: l3sw
lsw1-c6-eqiad:
ipv4: 10.64.168.7
ipv6: 2620:0:861:130::7
site: eqiad
role: l3sw
lsw1-c7-eqiad:
ipv4: 10.64.168.8
ipv6: 2620:0:861:130::8
site: eqiad
role: l3sw
lsw1-d1-eqiad:
ipv4: 10.64.168.9
ipv6: 2620:0:861:130::9
site: eqiad
role: l3sw
lsw1-d2-eqiad:
ipv4: 10.64.168.10
ipv6: 2620:0:861:130::a
site: eqiad
role: l3sw
lsw1-d3-eqiad:
ipv4: 10.64.168.11
ipv6: 2620:0:861:130::b
site: eqiad
role: l3sw
lsw1-d4-eqiad:
ipv4: 10.64.168.12
ipv6: 2620:0:861:130::c
site: eqiad
role: l3sw
lsw1-d6-eqiad:
ipv4: 10.64.168.13
ipv6: 2620:0:861:130::d
site: eqiad
role: l3sw
lsw1-d7-eqiad:
ipv4: 10.64.168.14
ipv6: 2620:0:861:130::e
site: eqiad
role: l3sw
lsw1-d8-eqiad:
ipv4: 10.64.168.15
ipv6: 2620:0:861:130::f
site: eqiad
role: l3sw
lswtest-d8-eqiad:
ipv4: 10.64.168.16
ipv6: 2620:0:861:130::10
site: eqiad
role: l3sw
cr2-eqord:
ipv4: 208.80.154.198
ipv6: 2620:0:861:ffff::5
site: eqord
role: cr
atlas1001:
ipv4: 208.80.155.69
ipv6: 2620:0:861:202:208:80:155:69
parents:
- asw2-b-eqiad
site: eqiad
role: atlas
cr1-codfw:
ipv4: 208.80.153.192
ipv6: 2620:0:860:ffff::1
alarms: true
site: codfw
role: cr
cr2-codfw:
ipv4: 208.80.153.193
ipv6: 2620:0:860:ffff::2
alarms: true
site: codfw
role: cr
pfw1-codfw:
ipv4: 208.80.153.197
site: codfw
role: pfw
mr1-codfw:
ipv4: 208.80.153.196
ipv6: 2620:0:860:ffff::6
parents:
- lsw1-a2-codfw
site: codfw
role: mr
msw1-codfw:
ipv4: 10.193.0.3
parents:
- mr1-codfw
site: codfw
role: msw
msw2-codfw:
ipv4: 10.193.1.89
parents:
- msw1-codfw
site: codfw
role: msw
ssw1-a1-codfw:
ipv4: 10.192.255.16
ipv6: 2620:0:860:13f::11
site: codfw
role: l3sw
ssw1-a8-codfw:
ipv4: 10.192.255.17
ipv6: 2620:0:860:13f::12
site: codfw
role: l3sw
lsw1-a2-codfw:
ipv4: 10.192.255.2
ipv6: 2620:0:860:13f::3
site: codfw
role: l3sw
lsw1-a3-codfw:
ipv4: 10.192.255.3
ipv6: 2620:0:860:13f::4
site: codfw
role: l3sw
lsw1-a4-codfw:
ipv4: 10.192.255.4
ipv6: 2620:0:860:13f::5
site: codfw
role: l3sw
lsw1-a5-codfw:
ipv4: 10.192.255.5
ipv6: 2620:0:860:13f::6
site: codfw
role: l3sw
lsw1-a6-codfw:
ipv4: 10.192.255.6
ipv6: 2620:0:860:13f::7
site: codfw
role: l3sw
lsw1-a7-codfw:
ipv4: 10.192.255.7
ipv6: 2620:0:860:13f::8
site: codfw
role: l3sw
lsw1-a8-codfw:
ipv4: 10.192.255.8
ipv6: 2620:0:860:13f::9
site: codfw
role: l3sw
lsw1-b2-codfw:
ipv4: 10.192.255.9
ipv6: 2620:0:860:13f::a
site: codfw
role: l3sw
lsw1-b3-codfw:
ipv4: 10.192.255.10
ipv6: 2620:0:860:13f::b
site: codfw
role: l3sw
lsw1-b4-codfw:
ipv4: 10.192.255.11
ipv6: 2620:0:860:13f::c
site: codfw
role: l3sw
lsw1-b5-codfw:
ipv4: 10.192.255.12
ipv6: 2620:0:860:13f::d
site: codfw
role: l3sw
lsw1-b6-codfw:
ipv4: 10.192.255.13
ipv6: 2620:0:860:13f::e
site: codfw
role: l3sw
lsw1-b7-codfw:
ipv4: 10.192.255.14
ipv6: 2620:0:860:13f::f
site: codfw
role: l3sw
lsw1-b8-codfw:
ipv4: 10.192.255.15
ipv6: 2620:0:860:13f::10
site: codfw
role: l3sw
lsw1-c1-codfw:
ipv4: 10.192.255.20
ipv6: 2620:0:860:13f::15
site: codfw
role: l3sw
lsw1-c2-codfw:
ipv4: 10.192.255.21
ipv6: 2620:0:860:13f::16
site: codfw
role: l3sw
lsw1-c3-codfw:
ipv4: 10.192.255.22
ipv6: 2620:0:860:13f::17
site: codfw
role: l3sw
lsw1-c4-codfw:
ipv4: 10.192.255.23
ipv6: 2620:0:860:13f::18
site: codfw
role: l3sw
lsw1-c5-codfw:
ipv4: 10.192.255.24
ipv6: 2620:0:860:13f::19
site: codfw
role: l3sw
lsw1-c6-codfw:
ipv4: 10.192.255.25
ipv6: 2620:0:860:13f::1a
site: codfw
role: l3sw
lsw1-c7-codfw:
ipv4: 10.192.255.26
ipv6: 2620:0:860:13f::1b
site: codfw
role: l3sw
ssw1-d1-codfw:
ipv4: 10.192.255.18
ipv6: 2620:0:860:13f::13
site: codfw
role: l3sw
ssw1-d8-codfw:
ipv4: 10.192.255.19
ipv6: 2620:0:860:13f::14
site: codfw
role: l3sw
lsw1-d1-codfw:
ipv4: 10.192.255.1
ipv6: 2620:0:860:13f::2
site: codfw
role: l3sw
lsw1-d2-codfw:
ipv4: 10.192.255.27
ipv6: 2620:0:860:13f::1c
site: codfw
role: l3sw
lsw1-d3-codfw:
ipv4: 10.192.255.28
ipv6: 2620:0:860:13f::1d
site: codfw
role: l3sw
lsw1-d4-codfw:
ipv4: 10.192.255.29
ipv6: 2620:0:860:13f::1e
site: codfw
role: l3sw
lsw1-d5-codfw:
ipv4: 10.192.255.30
ipv6: 2620:0:860:13f::1f
site: codfw
role: l3sw
lsw1-d6-codfw:
ipv4: 10.192.255.31
ipv6: 2620:0:860:13f::20
site: codfw
role: l3sw
lsw1-d7-codfw:
ipv4: 10.192.255.32
ipv6: 2620:0:860:13f::21
site: codfw
role: l3sw
lsw1-d8-codfw:
ipv4: 10.192.255.33
ipv6: 2620:0:860:13f::22
site: codfw
role: l3sw
ssw1-e1-codfw:
ipv4: 10.192.255.40
ipv6: 2620:0:860:13f::29
site: codfw
role: l3sw
ssw1-f1-codfw:
ipv4: 10.192.255.41
ipv6: 2620:0:860:13f::2a
site: codfw
role: l3sw
lsw1-e1-codfw:
ipv4: 10.192.255.42
ipv6: 2620:0:860:13f::2b
site: codfw
role: l3sw
lsw1-e2-codfw:
ipv4: 10.192.255.34
ipv6: 2620:0:860:13f::23
site: codfw
role: l3sw
lsw1-e3-codfw:
ipv4: 10.192.255.43
ipv6: 2620:0:860:13f::2c
site: codfw
role: l3sw
lsw1-e4-codfw:
ipv4: 10.192.255.35
ipv6: 2620:0:860:13f::24
site: codfw
role: l3sw
lsw1-e5-codfw:
ipv4: 10.192.255.36
ipv6: 2620:0:860:13f::25
site: codfw
role: l3sw
lsw1-f1-codfw:
ipv4: 10.192.255.44
ipv6: 2620:0:860:13f::2d
site: codfw
role: l3sw
lsw1-f2-codfw:
ipv4: 10.192.255.37
ipv6: 2620:0:860:13f::26
site: codfw
role: l3sw
lsw1-f3-codfw:
ipv4: 10.192.255.45
ipv6: 2620:0:860:13f::2e
site: codfw
role: l3sw
lsw1-f4-codfw:
ipv4: 10.192.255.38
ipv6: 2620:0:860:13f::27
site: codfw
role: l3sw
fasw2-c8a-codfw:
ipv4: 10.193.1.19
parents:
- msw1-codfw
site: codfw
role: l2sw
fasw2-c8b-codfw:
ipv4: 10.193.1.18
parents:
- msw1-codfw
site: codfw
role: l2sw
fasw1-f5a-codfw:
ipv4: 10.193.4.3
parents:
- msw2-codfw
site: codfw
role: l2sw
fasw1-f5b-codfw:
ipv4: 10.193.4.4
parents:
- msw2-codfw
site: codfw
role: l2sw
cloudsw1-b1-codfw:
ipv4: 10.192.255.255
ipv6: 2620:0:860:13f::1
site: codfw
role: l3sw
ripe-atlas-codfw:
ipv4: 208.80.152.244
ipv6: 2620:0:860:201:208:80:152:244
parents:
- lsw1-a2-codfw
site: codfw
role: atlas
scs-a1-codfw:
ipv4: 10.193.0.14
parents:
- msw1-codfw
site: codfw
role: scs
scs-c1-codfw:
ipv4: 10.193.0.15
parents:
- msw1-codfw
site: codfw
role: scs
scs-e3-codfw:
ipv4: 10.193.3.181
parents:
- msw2-codfw
site: codfw
role: scs
cr2-eqdfw:
ipv4: 208.80.153.198
ipv6: 2620:0:860:ffff::5
site: eqdfw
role: cr
cr1-esams:
ipv4: 185.15.59.128
ipv6: 2a02:ec80:300:ffff::1
site: esams
role: cr
cr2-esams:
ipv4: 185.15.59.129
ipv6: 2a02:ec80:300:ffff::2
site: esams
role: cr
mr1-esams:
ipv4: 185.15.59.130
ipv6: 2a02:ec80:300:ffff::3
site: esams
role: mr
scs-by27-esams:
ipv4: 10.80.128.6
parents:
- mr1-esams
site: esams
role: scs
asw1-by27-esams:
ipv4: 185.15.59.132
ipv6: 2a02:ec80:300:ffff::5
site: esams
role: l3sw
asw1-bw27-esams:
ipv4: 185.15.59.131
ipv6: 2a02:ec80:300:ffff::4
site: esams
role: l3sw
atlas3001:
ipv4: 185.15.59.74
ipv6: 2a02:ec80:300:202:185:15:59:74
site: esams
role: atlas
cr3-ulsfo:
ipv4: 198.35.26.128
ipv6: 2620:0:863:ffff::1
vrrp_peer: cr4-ulsfo.wikimedia.org
site: ulsfo
role: cr
cr4-ulsfo:
ipv4: 198.35.26.129
ipv6: 2620:0:863:ffff::2
site: ulsfo
role: cr
mr1-ulsfo:
ipv4: '198.35.26.130'
ipv6: '2620:0:863:ffff::3'
parents:
- asw2-ulsfo
site: ulsfo
role: mr
asw2-ulsfo:
ipv4: 10.128.128.7
site: ulsfo
role: l2sw
scs-ulsfo:
ipv4: 10.128.128.11
parents:
- mr1-ulsfo
site: ulsfo
role: scs
cr2-eqsin:
ipv4: 103.102.166.130
ipv6: 2001:df2:e500:ffff::3
site: eqsin
role: cr
cr3-eqsin:
ipv4: 103.102.166.131
ipv6: 2001:df2:e500:ffff::4
vrrp_peer: cr2-eqsin.wikimedia.org
site: eqsin
role: cr
mr1-eqsin:
ipv4: '103.102.166.128'
ipv6: '2001:df2:e500:ffff::1'
parents:
- asw1-eqsin
site: eqsin
role: mr
asw1-eqsin:
ipv4: 10.132.128.4
site: eqsin
role: l2sw
scs-eqsin:
ipv4: 10.132.128.5
parents:
- mr1-eqsin
site: eqsin
role: scs
atlas5001:
ipv4: 103.102.166.74
ipv6: 2001:df2:e500:202:103:102:166:74
site: eqsin
role: atlas
cr1-drmrs:
ipv4: 185.15.58.128
ipv6: 2a02:ec80:600:ffff::1
site: drmrs
role: cr
cr2-drmrs:
ipv4: 185.15.58.129
ipv6: 2a02:ec80:600:ffff::2
site: drmrs
role: cr
mr1-drmrs:
ipv4: '185.15.58.130'
ipv6: '2a02:ec80:600:ffff::3'
site: drmrs
role: mr
scs-drmrs:
ipv4: 10.136.128.5
parents:
- mr1-drmrs
site: drmrs
role: scs
asw1-b12-drmrs:
ipv4: 185.15.58.131
ipv6: 2a02:ec80:600:ffff::4
site: drmrs
role: l3sw
asw1-b13-drmrs:
ipv4: 185.15.58.132
ipv6: 2a02:ec80:600:ffff::5
site: drmrs
role: l3sw
cr1-magru:
ipv4: 195.200.68.128
ipv6: 2a02:ec80:700:ffff::1
site: magru
role: cr
cr2-magru:
ipv4: 195.200.68.129
ipv6: 2a02:ec80:700:ffff::2
site: magru
role: cr
mr1-magru:
ipv4: 195.200.68.132
ipv6: 2a02:ec80:700:ffff::5
site: magru
role: mr
scs-magru:
ipv4: 10.140.128.6
parents:
- mr1-magru
site: magru
role: scs
asw1-b3-magru:
ipv4: 195.200.68.130
ipv6: 2a02:ec80:700:ffff::3
site: magru
role: l3sw
asw1-b4-magru:
ipv4: 195.200.68.131
ipv6: 2a02:ec80:700:ffff::4
site: magru
role: l3sw
atlas7001:
ipv4: 195.200.68.66
ipv6: 2a02:ec80:700:201:195:200:68:66
site: magru
role: atlas
# A map of hostname -> metadata for Prometheus blackbox exporter to ping.
# The list is ported from Smokeping and manually maintained as of Jul
# 2022. Going forward the list must come from Netbox instead.
# See also Wmflib::Infra::Devices type and related for more information.
# Sites are separated/grouped by newline for easier navigation.
blackbox_smoke_hosts:
frbast-eqiad.wikimedia.org:
realm: frack
site: eqiad
rack: C1
payments-listener-eqiad.wikimedia.org:
realm: frack
site: eqiad
rack: C1
alert1002.wikimedia.org:
realm: production
site: eqiad
rack: C7
gerrit1003.wikimedia.org:
realm: production
site: eqiad
rack: B5
frbast-codfw.wikimedia.org:
realm: frack
site: codfw
rack: C8
payments-listener-codfw.wikimedia.org:
realm: frack
site: codfw
rack: C8
gitlab2003.wikimedia.org:
realm: production
site: codfw
rack: B5
alert2002.wikimedia.org:
realm: production
site: codfw
rack: B7
db2216.codfw.wmnet:
realm: production
site: codfw
rack: D5
ganeti4005.ulsfo.wmnet:
realm: production
site: ulsfo
rack: "103.02.22"
ganeti4008.ulsfo.wmnet:
realm: production
site: ulsfo
rack: "103.02.23"
ganeti5007.eqsin.wmnet:
realm: production
site: eqsin
rack: "603"
ganeti5004.eqsin.wmnet:
realm: production
site: eqsin
rack: "604"
dns6001.wikimedia.org:
realm: production
site: drmrs
rack: B12
ganeti6002.drmrs.wmnet:
realm: production
site: drmrs
rack: B13
# List of FQDNs for the Prometheus nodes monitoring all of Cloud VPS.
# These are for eqiad1, but are listed here to also apply to Pontoon.
metricsinfra_prometheus_nodes:
- metricsinfra-prometheus-2.metricsinfra.eqiad1.wikimedia.cloud
- metricsinfra-prometheus-3.metricsinfra.eqiad1.wikimedia.cloud
wikimedia_domains:
- wikipedia.org
- wikimedia.org
- wikibooks.org
- wikinews.org
- wikiquote.org
- wikisource.org
- wikiversity.org
- wikivoyage.org
- wikidata.org
- wikimediafoundation.org
- wikiworkshop.org
- wikifunctions.org
- wiktionary.org
- mediawiki.org
- wmfusercontent.org
- w.wiki
wmcs_domains:
- wmflabs.org
- toolforge.org
- wmcloud.org
# List of IP addresses used by HAProxy to route healthcheck
# requests to dedicated backend
haproxy_allowed_healthcheck_sources:
- 10.64.0.136 # lvs1016
- 10.64.16.60 # lvs1018
- 10.64.158.19 # lvs1019
- 10.64.166.19 # lvs1019
- 10.64.133.19 # lvs1019 private1-c2-eqiad
- 10.64.141.19 # lvs1019 private1-c3-eqiad
- 10.64.169.19 # lvs1019 private1-c4-eqiad
- 10.64.171.19 # lvs1019 private1-c5-eqiad
- 10.64.173.19 # lvs1019 private1-c6-eqiad
- 10.64.175.19 # lvs1019 private1-c7-eqiad
- 10.64.177.19 # lvs1019 private1-d1-eqiad
- 10.64.179.19 # lvs1019 private1-d2-eqiad
- 10.64.181.19 # lvs1019 private1-d3-eqiad
- 10.64.183.19 # lvs1019 private1-d4-eqiad
- 10.64.185.19 # lvs1019 private1-d6-eqiad
- 10.64.187.19 # lvs1019 private1-d7-eqiad
- 10.64.189.19 # lvs1019 private1-d8-eqiad
- 10.64.48.72 # lvs1020
- 10.64.37.17 # lvs1020
- 10.64.1.17 # lvs1020
- 10.64.17.17 # lvs1020
- 10.64.33.17 # lvs1020
- 10.64.130.20 # lvs1020
- 10.64.131.20 # lvs1020
- 10.64.132.20 # lvs1020
- 10.64.134.20 # lvs1020
- 10.64.135.20 # lvs1020
- 10.64.136.20 # lvs1020
- 10.64.158.20 # lvs1020
- 10.64.166.20 # lvs1020
- 10.64.133.20 # lvs1020 private1-c2-eqiad
- 10.64.141.20 # lvs1020 private1-c3-eqiad
- 10.64.169.20 # lvs1020 private1-c4-eqiad
- 10.64.171.20 # lvs1020 private1-c5-eqiad
- 10.64.173.20 # lvs1020 private1-c6-eqiad
- 10.64.175.20 # lvs1020 private1-c7-eqiad
- 10.64.177.20 # lvs1020 private1-d1-eqiad
- 10.64.179.20 # lvs1020 private1-d2-eqiad
- 10.64.181.20 # lvs1020 private1-d3-eqiad
- 10.64.183.20 # lvs1020 private1-d4-eqiad
- 10.64.185.20 # lvs1020 private1-d6-eqiad
- 10.64.187.20 # lvs1020 private1-d7-eqiad
- 10.64.189.20 # lvs1020 private1-d8-eqiad
- 2620:0:861:101::/64 # eqiad
- 2620:0:861:102::/64 # eqiad
- 2620:0:861:103::/64 # eqiad
- 2620:0:861:107::/64 # eqiad
- 2620:0:861:109::/64 # eqiad
- 2620:0:861:10a::/64 # eqiad
- 2620:0:861:10b::/64 # eqiad
- 2620:0:861:10d::/64 # eqiad
- 2620:0:861:10e::/64 # eqiad
- 2620:0:861:10f::/64 # eqiad
- 2620:0:861:119::/64 # eqiad
- 2620:0:861:10c::/64 # private1-c2-eqiad
- 2620:0:861:113::/64 # private1-c3-eqiad
- 2620:0:861:119::/64 # private1-c4-eqiad
- 2620:0:861:131::/64 # private1-c5-eqiad
- 2620:0:861:133::/64 # private1-c6-eqiad
- 2620:0:861:135::/64 # private1-c7-eqiad
- 2620:0:861:137::/64 # private1-d1-eqiad
- 2620:0:861:139::/64 # private1-d2-eqiad
- 2620:0:861:13b::/64 # private1-d3-eqiad
- 2620:0:861:13d::/64 # private1-d4-eqiad
- 2620:0:861:13f::/64 # private1-d6-eqiad
- 2620:0:861:142::/64 # private1-d7-eqiad
- 2620:0:861:144::/64 # private1-d8-eqiad
- 10.192.23.8 # lvs2011
- 10.192.0.29 # lvs2011
- 10.192.17.8 # lvs2011
- 10.192.33.8 # lvs2011
- 10.192.49.8 # lvs2011
- 10.192.23.2 # lvs2011
- 10.192.5.2 # lvs2011
- 10.192.6.2 # lvs2011
- 10.192.7.2 # lvs2011
- 10.192.8.2 # lvs2011
- 10.192.9.2 # lvs2011
- 10.192.10.2 # lvs2011
- 10.192.11.2 # lvs2011
- 10.192.12.2 # lvs2011
- 10.192.13.2 # lvs2011
- 10.192.14.2 # lvs2011
- 10.192.15.2 # lvs2011
- 10.192.21.2 # lvs2011
- 10.192.22.2 # lvs2011
- 10.192.4.2 # lvs2011
- 10.192.26.2 # lvs2011
- 10.192.27.2 # lvs2011
- 10.192.28.2 # lvs2011
- 10.192.29.2 # lvs2011
- 10.192.30.2 # lvs2011
- 10.192.31.2 # lvs2011
- 10.192.36.2 # lvs2011
- 10.192.37.2 # lvs2011
- 10.192.38.2 # lvs2011
- 10.192.39.2 # lvs2011
- 10.192.40.2 # lvs2011
- 10.192.41.2 # lvs2011
- 10.192.42.2 # lvs2011
- 10.192.43.2 # lvs2011
- 10.192.11.8 # lvs2012
- 10.192.16.140 # lvs2012
- 10.192.1.8 # lvs2012
- 10.192.33.9 # lvs2012
- 10.192.49.9 # lvs2012
- 10.192.23.3 # lvs2012
- 10.192.5.3 # lvs2012
- 10.192.6.3 # lvs2012
- 10.192.7.3 # lvs2012
- 10.192.8.3 # lvs2012
- 10.192.9.3 # lvs2012
- 10.192.10.3 # lvs2012
- 10.192.11.3 # lvs2012
- 10.192.12.3 # lvs2012
- 10.192.13.3 # lvs2012
- 10.192.14.3 # lvs2012
- 10.192.15.3 # lvs2012
- 10.192.21.3 # lvs2012
- 10.192.22.3 # lvs2012
- 10.192.4.3 # lvs2012
- 10.192.26.3 # lvs2012
- 10.192.27.3 # lvs2012
- 10.192.28.3 # lvs2012
- 10.192.29.3 # lvs2012
- 10.192.30.3 # lvs2012
- 10.192.31.3 # lvs2012
- 10.192.36.3 # lvs2012
- 10.192.37.3 # lvs2012
- 10.192.38.3 # lvs2012
- 10.192.39.4 # lvs2012
- 10.192.40.3 # lvs2012
- 10.192.41.3 # lvs2012
- 10.192.42.3 # lvs2012
- 10.192.43.3 # lvs2012
- 10.192.32.14 # lvs2013
- 10.192.1.9 # lvs2013
- 10.192.17.9 # lvs2013
- 10.192.49.10 # lvs2013
- 10.192.23.4 # lvs2013
- 10.192.5.4 # lvs2013
- 10.192.6.4 # lvs2013
- 10.192.7.4 # lvs2013
- 10.192.8.4 # lvs2013
- 10.192.9.4 # lvs2013
- 10.192.10.4 # lvs2013
- 10.192.11.4 # lvs2013
- 10.192.12.4 # lvs2013
- 10.192.13.4 # lvs2013
- 10.192.14.4 # lvs2013
- 10.192.15.4 # lvs2013
- 10.192.21.4 # lvs2013
- 10.192.22.4 # lvs2013
- 10.192.4.5 # lvs2013
- 10.192.26.5 # lvs2013
- 10.192.27.5 # lvs2013
- 10.192.28.5 # lvs2013
- 10.192.29.5 # lvs2013
- 10.192.30.5 # lvs2013
- 10.192.31.5 # lvs2013
- 10.192.36.5 # lvs2013
- 10.192.37.5 # lvs2013
- 10.192.38.5 # lvs2013
- 10.192.39.6 # lvs2013
- 10.192.40.5 # lvs2013
- 10.192.41.5 # lvs2013
- 10.192.42.5 # lvs2013
- 10.192.43.5 # lvs2013
- 10.192.48.213 # lvs2014
- 10.192.1.13 # lvs2014
- 10.192.17.10 # lvs2014
- 10.192.33.10 # lvs2014
- 10.192.23.5 # lvs2014
- 10.192.5.8 # lvs2014
- 10.192.6.5 # lvs2014
- 10.192.7.5 # lvs2014
- 10.192.8.5 # lvs2014
- 10.192.9.5 # lvs2014
- 10.192.10.5 # lvs2014
- 10.192.11.5 # lvs2014
- 10.192.12.5 # lvs2014
- 10.192.13.5 # lvs2014
- 10.192.14.5 # lvs2014
- 10.192.15.5 # lvs2014
- 10.192.21.5 # lvs2014
- 10.192.22.5 # lvs2014
- 10.192.4.5 # lvs2014
- 10.192.26.5 # lvs2014
- 10.192.27.5 # lvs2014
- 10.192.28.5 # lvs2014
- 10.192.29.5 # lvs2014
- 10.192.30.5 # lvs2014
- 10.192.31.5 # lvs2014
- 10.192.36.5 # lvs2014
- 10.192.37.5 # lvs2014
- 10.192.38.5 # lvs2014
- 10.192.39.6 # lvs2014
- 10.192.40.5 # lvs2014
- 10.192.41.5 # lvs2014
- 10.192.42.5 # lvs2014
- 10.192.43.5 # lvs2014
- 2620:0:860:101::/64 # codfw
- 2620:0:860:102::/64 # codfw
- 2620:0:860:103::/64 # codfw
- 2620:0:860:104::/64 # codfw
- 10.80.0.3 # lvs3008
- 10.80.1.8 # lvs3008
- 10.80.1.14 # lvs3009
- 10.80.0.9 # lvs3009
- 10.80.0.2 # lvs3010
- 10.80.1.10 # lvs3010
- 2a02:ec80:300:101::/64 # esams
- 2a02:ec80:300:102::/64 # esams
- 10.128.0.18 # lvs4008
- 10.128.0.9 # lvs4009
- 10.128.0.11 # lvs4010
- 2620:0:863:101::/64 # ulsfo
- 10.132.0.39 # lvs5004
- 10.132.0.6 # lvs5005
- 10.132.0.7 # lvs5006
- 2001:df2:e500:101::/64 # eqsin
- 10.136.0.16 # lvs6001
- 10.136.1.19 # lvs6001
- 10.136.1.15 # lvs6002
- 10.136.0.19 # lvs6002
- 10.136.0.17 # lvs6003
- 10.136.1.20 # lvs6003
- 2a02:ec80:600:101::/64 # drmrs
- 2a02:ec80:600:102::/64 # drmrs
- 10.140.0.13 # lvs7001
- 10.140.1.2 # lvs7001
- 10.140.1.14 # lvs7002
- 10.140.0.2 # lvs7002
- 10.140.0.14 # lvs7003
- 10.140.1.3 # lvs7003
- 2a02:ec80:700:101::/64 # magru: private1-b3-magru
- 2a02:ec80:700:102::/64 # magru: private1-b4-magru
private_wikis:
- 'advisorswiki'
- 'arbcom_cswiki'
- 'arbcom_dewiki'
- 'arbcom_enwiki'
- 'arbcom_fiwiki'
- 'arbcom_itwiki'
- 'arbcom_nlwiki'
- 'arbcom_plwiki'
- 'arbcom_ruwiki'
- 'arbcom_zhwiki'
- 'auditcomwiki'
- 'boardgovcomwiki'
- 'boardwiki'
- 'chairwiki'
- 'chapcomwiki'
- 'checkuserwiki'
- 'collabwiki'
- 'ecwikimedia'
- 'electcomwiki'
- 'execwiki'
- 'fdcwiki'
- 'grantswiki'
- 'id_internalwikimedia'
- 'iegcomwiki'
- 'ilwikimedia'
- 'internalwiki'
- 'legalteamwiki'
- 'movementroleswiki'
- 'noboard_chapterswikimedia'
- 'officewiki'
- 'ombudsmenwiki'
- 'otrs_wikiwiki'
- 'projectcomwiki'
- 'searchcomwiki'
- 'spcomwiki'
- 'stewardwiki'
- 'sysop_itwiki'
- 'sysop_plwiki'
- 'techconductwiki'
- 'transitionteamwiki'
- 'u4cwiki'
- 'wg_enwiki'
- 'wikimaniateamwiki'
- 'zerowiki'
# Use table catalog for adding new private tables!
private_tables:
- 'cur'
- 'hidden'
# needed to scrape the maintain_dbusers service, only one maintain_dbusers primary is needed in all DCs
wmcs_maintain_dbusers_primary: cloudcontrol1007.eqiad.wmnet
# Control maximum series for both thanos-sidecar (role::prometheus, role::prometheus::pop)
# and thanos-store (role::titan)
thanos_limits_request_series: 256000