code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
|---|---|---|---|---|---|---|---|
def get_authenticating_services(soa_dir: str = DEFAULT_SOA_DIR) -> Set[str]:
"""Load list of services participating in authenticated traffic"""
authenticating_services_conf_path = os.path.join(soa_dir, "authenticating.yaml")
config = service_configuration_lib.read_yaml_file(authenticating_services_conf_path)
return set(config.get("services", []))
|
Load list of services participating in authenticated traffic
|
get_authenticating_services
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def add_volumes_for_authenticating_services(
service_name: str,
config_volumes: List[ProjectedSAVolume],
soa_dir: str = DEFAULT_SOA_DIR,
) -> List[ProjectedSAVolume]:
"""Add projected service account volume to the list of volumes if service
participates in authenticated traffic. In case of changes, a new list is returned,
no updates in-place.
:param str service_name: name of the service
:param List[ProjectedSAVolume] config_volumes: existing projected volumes from service config
:param str soa_dir: path to SOA configurations directory
:return: updated list of projected service account volumes
"""
token_config = load_system_paasta_config().get_service_auth_token_volume_config()
if (
token_config
and service_name in get_authenticating_services(soa_dir)
and not any(volume == token_config for volume in config_volumes)
):
config_volumes = [token_config, *config_volumes]
return config_volumes
|
Add projected service account volume to the list of volumes if service
participates in authenticated traffic. In case of changes, a new list is returned,
no updates in-place.
:param str service_name: name of the service
:param List[ProjectedSAVolume] config_volumes: existing projected volumes from service config
:param str soa_dir: path to SOA configurations directory
:return: updated list of projected service account volumes
|
add_volumes_for_authenticating_services
|
python
|
Yelp/paasta
|
paasta_tools/kubernetes_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/kubernetes_tools.py
|
Apache-2.0
|
def get_healthcheck_mode(self) -> str:
"""Get the healthcheck mode for the service. In most cases, this will match the mode
of the service, but we do provide the opportunity for users to specify both. Default to the mode
if no healthcheck_mode is specified.
"""
healthcheck_mode = self.get("healthcheck_mode", None)
if not healthcheck_mode:
return self.get_mode()
else:
return healthcheck_mode
|
Get the healthcheck mode for the service. In most cases, this will match the mode
of the service, but we do provide the opportunity for users to specify both. Default to the mode
if no healthcheck_mode is specified.
|
get_healthcheck_mode
|
python
|
Yelp/paasta
|
paasta_tools/long_running_service_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/long_running_service_tools.py
|
Apache-2.0
|
def get_mode(self) -> str:
"""Get the mode that the service runs in and check that we support it.
If the mode is not specified, we check whether the service uses smartstack
in order to determine the appropriate default value. If proxy_port is specified
in the config, the service uses smartstack, and we can thus safely assume its mode is http.
If the mode is not defined and the service does not use smartstack, we set the mode to None.
"""
mode = self.get("mode", None)
if mode is None:
if not self.is_in_smartstack():
return None
else:
return "http"
elif mode in ["http", "tcp", "https"]:
return mode
else:
raise InvalidSmartstackMode("Unknown mode: %s" % mode)
|
Get the mode that the service runs in and check that we support it.
If the mode is not specified, we check whether the service uses smartstack
in order to determine the appropriate default value. If proxy_port is specified
in the config, the service uses smartstack, and we can thus safely assume its mode is http.
If the mode is not defined and the service does not use smartstack, we set the mode to None.
|
get_mode
|
python
|
Yelp/paasta
|
paasta_tools/long_running_service_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/long_running_service_tools.py
|
Apache-2.0
|
def get_longest_timeout_ms(self) -> int:
"""Calculate the longest amount of time a connection to this service might stay open."""
return max(
[self.get_timeout_server_ms()]
+ list(self.get("endpoint_timeouts", {}).values())
)
|
Calculate the longest amount of time a connection to this service might stay open.
|
get_longest_timeout_ms
|
python
|
Yelp/paasta
|
paasta_tools/long_running_service_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/long_running_service_tools.py
|
Apache-2.0
|
def get_drain_method(self, service_namespace_config: ServiceNamespaceConfig) -> str:
"""Get the drain method specified in the service's configuration.
:param service_config: The service instance's configuration dictionary
:returns: The drain method specified in the config, or 'noop' if not specified"""
default = "noop"
# Default to hacheck draining if the service is in smartstack
if service_namespace_config.is_in_smartstack():
default = "hacheck"
return self.config_dict.get("drain_method", default)
|
Get the drain method specified in the service's configuration.
:param service_config: The service instance's configuration dictionary
:returns: The drain method specified in the config, or 'noop' if not specified
|
get_drain_method
|
python
|
Yelp/paasta
|
paasta_tools/long_running_service_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/long_running_service_tools.py
|
Apache-2.0
|
def get_drain_method_params(
self, service_namespace_config: ServiceNamespaceConfig
) -> Dict:
"""Get the drain method parameters specified in the service's configuration.
:param service_config: The service instance's configuration dictionary
:returns: The drain_method_params dictionary specified in the config, or {} if not specified"""
default: Dict = {}
if service_namespace_config.is_in_smartstack():
default = {"delay": 60}
return self.config_dict.get("drain_method_params", default)
|
Get the drain method parameters specified in the service's configuration.
:param service_config: The service instance's configuration dictionary
:returns: The drain_method_params dictionary specified in the config, or {} if not specified
|
get_drain_method_params
|
python
|
Yelp/paasta
|
paasta_tools/long_running_service_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/long_running_service_tools.py
|
Apache-2.0
|
def get_instances(self, with_limit: bool = True) -> int:
"""Gets the number of instances for a service, ignoring whether the user has requested
the service to be started or stopped"""
if self.is_autoscaling_enabled():
autoscaled_instances = self.get_autoscaled_instances()
if autoscaled_instances is None:
return self.get_max_instances()
else:
limited_instances = (
self.limit_instance_count(autoscaled_instances)
if with_limit
else autoscaled_instances
)
return limited_instances
else:
instances = self.config_dict.get("instances", 1)
log.debug("Autoscaling not enabled, returning %d instances" % instances)
return instances
|
Gets the number of instances for a service, ignoring whether the user has requested
the service to be started or stopped
|
get_instances
|
python
|
Yelp/paasta
|
paasta_tools/long_running_service_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/long_running_service_tools.py
|
Apache-2.0
|
def get_desired_instances(self) -> int:
"""Get the number of instances specified in zookeeper or the service's configuration.
If the number of instances in zookeeper is less than min_instances, returns min_instances.
If the number of instances in zookeeper is greater than max_instances, returns max_instances.
Defaults to 0 if not specified in the config.
:returns: The number of instances specified in the config, 0 if not
specified or if desired_state is not 'start'.
"""
if self.get_desired_state() == "start":
return self.get_instances()
else:
log.debug("Instance is set to stop. Returning '0' instances")
return 0
|
Get the number of instances specified in zookeeper or the service's configuration.
If the number of instances in zookeeper is less than min_instances, returns min_instances.
If the number of instances in zookeeper is greater than max_instances, returns max_instances.
Defaults to 0 if not specified in the config.
:returns: The number of instances specified in the config, 0 if not
specified or if desired_state is not 'start'.
|
get_desired_instances
|
python
|
Yelp/paasta
|
paasta_tools/long_running_service_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/long_running_service_tools.py
|
Apache-2.0
|
def get_healthcheck_for_instance(
service: str,
instance: str,
service_manifest: LongRunningServiceConfig,
random_port: int,
soa_dir: str = DEFAULT_SOA_DIR,
) -> Tuple[Optional[str], Optional[str]]:
"""
Returns healthcheck for a given service instance in the form of a tuple (mode, healthcheck_command)
or (None, None) if no healthcheck
"""
namespace = service_manifest.get_nerve_namespace()
smartstack_config = load_service_namespace_config(
service=service, namespace=namespace, soa_dir=soa_dir
)
mode = service_manifest.get_healthcheck_mode(smartstack_config)
hostname = socket.getfqdn()
if mode == "http" or mode == "https":
path = service_manifest.get_healthcheck_uri(smartstack_config)
healthcheck_command = "%s://%s:%d%s" % (mode, hostname, random_port, path)
elif mode == "tcp":
healthcheck_command = "%s://%s:%d" % (mode, hostname, random_port)
elif mode == "cmd":
healthcheck_command = service_manifest.get_healthcheck_cmd()
else:
mode = None
healthcheck_command = None
return (mode, healthcheck_command)
|
Returns healthcheck for a given service instance in the form of a tuple (mode, healthcheck_command)
or (None, None) if no healthcheck
|
get_healthcheck_for_instance
|
python
|
Yelp/paasta
|
paasta_tools/long_running_service_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/long_running_service_tools.py
|
Apache-2.0
|
def load_service_namespace_config(
service: str, namespace: str, soa_dir: str = DEFAULT_SOA_DIR
) -> ServiceNamespaceConfig:
"""Attempt to read the configuration for a service's namespace in a more strict fashion.
Retrieves the following keys:
- proxy_port: the proxy port defined for the given namespace
- healthcheck_mode: the mode for the healthcheck (http or tcp)
- healthcheck_port: An alternate port to use for health checking
- healthcheck_uri: URI target for healthchecking
- healthcheck_timeout_s: healthcheck timeout in seconds
- healthcheck_body_expect: an expected string in healthcheck response body
- updown_timeout_s: updown_service timeout in seconds
- timeout_connect_ms: proxy frontend timeout in milliseconds
- timeout_server_ms: proxy server backend timeout in milliseconds
- retries: the number of retries on a proxy backend
- mode: the mode the service is run in (http or tcp)
- routes: a list of tuples of (source, destination)
- discover: the scope at which to discover services e.g. 'habitat'
- advertise: a list of scopes to advertise services at e.g. ['habitat', 'region']
- extra_advertise: a list of tuples of (source, destination)
e.g. [('region:dc6-prod', 'region:useast1-prod')]
- extra_healthcheck_headers: a dict of HTTP headers that must
be supplied when health checking. E.g. { 'Host': 'example.com' }
- lb_policy: Envoy load balancer policies. E.g. "ROUND_ROBIN"
:param service: The service name
:param namespace: The namespace to read
:param soa_dir: The SOA config directory to read from
:returns: A dict of the above keys, if they were defined
"""
smartstack_config = service_configuration_lib.read_extra_service_information(
service_name=service,
extra_info="smartstack",
soa_dir=soa_dir,
deepcopy=False,
)
namespace_config_from_file = smartstack_config.get(namespace, {})
service_namespace_config = ServiceNamespaceConfig()
# We can't really use .get, as we don't want the key to be in the returned
# dict at all if it doesn't exist in the config file.
# We also can't just copy the whole dict, as we only care about some keys
# and there's other things that appear in the smartstack section in
# several cases.
key_whitelist = {
"healthcheck_mode",
"healthcheck_uri",
"healthcheck_port",
"healthcheck_timeout_s",
"healthcheck_body_expect",
"updown_timeout_s",
"proxy_port",
"timeout_connect_ms",
"timeout_server_ms",
"retries",
"mode",
"discover",
"advertise",
"extra_healthcheck_headers",
"lb_policy",
"endpoint_timeouts",
}
for key, value in namespace_config_from_file.items():
if key in key_whitelist:
service_namespace_config[key] = value
# Other code in paasta_tools checks 'mode' after the config file
# is loaded, so this ensures that it is set to the appropriate default
# if not otherwise specified, even if appropriate default is None.
service_namespace_config["mode"] = service_namespace_config.get_mode()
if "routes" in namespace_config_from_file:
service_namespace_config["routes"] = [
(route["source"], dest)
for route in namespace_config_from_file["routes"]
for dest in route["destinations"]
]
if "extra_advertise" in namespace_config_from_file:
service_namespace_config["extra_advertise"] = [
(src, dst)
for src in namespace_config_from_file["extra_advertise"]
for dst in namespace_config_from_file["extra_advertise"][src]
]
return service_namespace_config
|
Attempt to read the configuration for a service's namespace in a more strict fashion.
Retrieves the following keys:
- proxy_port: the proxy port defined for the given namespace
- healthcheck_mode: the mode for the healthcheck (http or tcp)
- healthcheck_port: An alternate port to use for health checking
- healthcheck_uri: URI target for healthchecking
- healthcheck_timeout_s: healthcheck timeout in seconds
- healthcheck_body_expect: an expected string in healthcheck response body
- updown_timeout_s: updown_service timeout in seconds
- timeout_connect_ms: proxy frontend timeout in milliseconds
- timeout_server_ms: proxy server backend timeout in milliseconds
- retries: the number of retries on a proxy backend
- mode: the mode the service is run in (http or tcp)
- routes: a list of tuples of (source, destination)
- discover: the scope at which to discover services e.g. 'habitat'
- advertise: a list of scopes to advertise services at e.g. ['habitat', 'region']
- extra_advertise: a list of tuples of (source, destination)
e.g. [('region:dc6-prod', 'region:useast1-prod')]
- extra_healthcheck_headers: a dict of HTTP headers that must
be supplied when health checking. E.g. { 'Host': 'example.com' }
- lb_policy: Envoy load balancer policies. E.g. "ROUND_ROBIN"
:param service: The service name
:param namespace: The namespace to read
:param soa_dir: The SOA config directory to read from
:returns: A dict of the above keys, if they were defined
|
load_service_namespace_config
|
python
|
Yelp/paasta
|
paasta_tools/long_running_service_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/long_running_service_tools.py
|
Apache-2.0
|
def get_proxy_port_for_instance(
service_config: LongRunningServiceConfig,
) -> Optional[int]:
"""Get the proxy_port defined in the first namespace configuration for a
service instance.
This means that the namespace first has to be loaded from the service instance's
configuration, and then the proxy_port has to loaded from the smartstack configuration
for that namespace.
:param service_config: The instance of the services LongRunningServiceConfig
:returns: The proxy_port for the service instance, or None if not defined"""
registration = service_config.get_registrations()[0]
service, namespace, _, __ = decompose_job_id(registration)
nerve_dict = load_service_namespace_config(
service=service, namespace=namespace, soa_dir=service_config.soa_dir
)
return nerve_dict.get("proxy_port")
|
Get the proxy_port defined in the first namespace configuration for a
service instance.
This means that the namespace first has to be loaded from the service instance's
configuration, and then the proxy_port has to loaded from the smartstack configuration
for that namespace.
:param service_config: The instance of the services LongRunningServiceConfig
:returns: The proxy_port for the service instance, or None if not defined
|
get_proxy_port_for_instance
|
python
|
Yelp/paasta
|
paasta_tools/long_running_service_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/long_running_service_tools.py
|
Apache-2.0
|
def host_passes_blacklist(
host_attributes: Mapping[str, str], blacklist: DeployBlacklist
) -> bool:
"""
:param host: A single host attributes dict
:param blacklist: A list of lists like [["location_type", "location"], ["foo", "bar"]]
:returns: boolean, True if the host gets passed the blacklist
"""
try:
for location_type, location in blacklist:
if host_attributes.get(location_type) == location:
return False
except ValueError as e:
log.error(f"Errors processing the following blacklist: {blacklist}")
log.error("I will assume the host does not pass\nError was: %s" % e)
return False
return True
|
:param host: A single host attributes dict
:param blacklist: A list of lists like [["location_type", "location"], ["foo", "bar"]]
:returns: boolean, True if the host gets passed the blacklist
|
host_passes_blacklist
|
python
|
Yelp/paasta
|
paasta_tools/long_running_service_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/long_running_service_tools.py
|
Apache-2.0
|
def host_passes_whitelist(
host_attributes: Mapping[str, str], whitelist: DeployWhitelist
) -> bool:
"""
:param host: A single host attributes dict.
:param whitelist: A 2 item list like ["location_type", ["location1", 'location2']]
:returns: boolean, True if the host gets past the whitelist
"""
# No whitelist, so disable whitelisting behavior.
if whitelist is None or len(whitelist) == 0:
return True
try:
(location_type, locations) = whitelist
if host_attributes.get(location_type) in locations:
return True
except ValueError as e:
log.error(f"Errors processing the following whitelist: {whitelist}")
log.error("I will assume the host does not pass\nError was: %s" % e)
return False
return False
|
:param host: A single host attributes dict.
:param whitelist: A 2 item list like ["location_type", ["location1", 'location2']]
:returns: boolean, True if the host gets past the whitelist
|
host_passes_whitelist
|
python
|
Yelp/paasta
|
paasta_tools/long_running_service_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/long_running_service_tools.py
|
Apache-2.0
|
def get_all_namespaces(
soa_dir: str = DEFAULT_SOA_DIR,
) -> Sequence[Tuple[str, ServiceNamespaceConfig]]:
"""Get all the smartstack namespaces across all services.
This is mostly so synapse can get everything it needs in one call.
:param soa_dir: The SOA config directory to read from
:returns: A list of tuples of the form (service.namespace, namespace_config)"""
rootdir = os.path.abspath(soa_dir)
namespace_list: List[Tuple[str, ServiceNamespaceConfig]] = []
for srv_dir in os.listdir(rootdir):
namespace_list.extend(get_all_namespaces_for_service(srv_dir, soa_dir))
return namespace_list
|
Get all the smartstack namespaces across all services.
This is mostly so synapse can get everything it needs in one call.
:param soa_dir: The SOA config directory to read from
:returns: A list of tuples of the form (service.namespace, namespace_config)
|
get_all_namespaces
|
python
|
Yelp/paasta
|
paasta_tools/long_running_service_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/long_running_service_tools.py
|
Apache-2.0
|
def get_all_namespaces_for_service(
service: str, soa_dir: str = DEFAULT_SOA_DIR, full_name: bool = True
) -> Sequence[Tuple[str, ServiceNamespaceConfig]]:
"""Get all the smartstack namespaces listed for a given service name.
:param service: The service name
:param soa_dir: The SOA config directory to read from
:param full_name: A boolean indicating if the service name should be prepended to the namespace in the
returned tuples as described below (Default: True)
:returns: A list of tuples of the form (service<SPACER>namespace, namespace_config) if full_name is true,
otherwise of the form (namespace, namespace_config)
"""
service_config = service_configuration_lib.read_service_configuration(
service, soa_dir
)
smartstack = service_config.get("smartstack", {})
namespace_list = []
for namespace in smartstack:
if full_name:
name = compose_job_id(service, namespace)
else:
name = namespace
namespace_list.append((name, smartstack[namespace]))
return namespace_list
|
Get all the smartstack namespaces listed for a given service name.
:param service: The service name
:param soa_dir: The SOA config directory to read from
:param full_name: A boolean indicating if the service name should be prepended to the namespace in the
returned tuples as described below (Default: True)
:returns: A list of tuples of the form (service<SPACER>namespace, namespace_config) if full_name is true,
otherwise of the form (namespace, namespace_config)
|
get_all_namespaces_for_service
|
python
|
Yelp/paasta
|
paasta_tools/long_running_service_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/long_running_service_tools.py
|
Apache-2.0
|
def get_expected_instance_count_for_namespace(
service: str,
namespace: str,
instance_type_class: Type[LongRunningServiceConfig],
cluster: str = None,
soa_dir: str = DEFAULT_SOA_DIR,
) -> int:
"""Get the number of expected instances for a namespace, based on the number
of instances set to run on that namespace as specified in service configuration files.
:param service: The service's name
:param namespace: The namespace for that service to check
instance_type_class: The type of the instance, options are e.g. KubernetesDeploymentConfig,
:param soa_dir: The SOA configuration directory to read from
:returns: An integer value of the # of expected instances for the namespace"""
total_expected = 0
if not cluster:
cluster = load_system_paasta_config().get_cluster()
pscl = PaastaServiceConfigLoader(
service=service, soa_dir=soa_dir, load_deployments=False
)
for job_config in pscl.instance_configs(
cluster=cluster, instance_type_class=instance_type_class
):
if f"{service}.{namespace}" in job_config.get_registrations():
total_expected += job_config.get_instances()
return total_expected
|
Get the number of expected instances for a namespace, based on the number
of instances set to run on that namespace as specified in service configuration files.
:param service: The service's name
:param namespace: The namespace for that service to check
instance_type_class: The type of the instance, options are e.g. KubernetesDeploymentConfig,
:param soa_dir: The SOA configuration directory to read from
:returns: An integer value of the # of expected instances for the namespace
|
get_expected_instance_count_for_namespace
|
python
|
Yelp/paasta
|
paasta_tools/long_running_service_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/long_running_service_tools.py
|
Apache-2.0
|
def reserve_unique_mac_address(lock_directory):
"""Pick and reserve a unique mac address for a container
returns (mac_address, lockfile)
where the mac address is a string in the form of 00:00:00:00:00:00
and lockfile is a file object that holds an exclusive lock
"""
for x in range(100):
random_hex = "{:08x}".format(random.getrandbits(32))
mac_address = ":".join(
MAC_ADDRESS_PREFIX
+ (random_hex[0:2], random_hex[2:4], random_hex[4:6], random_hex[6:8])
)
lock_filepath = os.path.join(lock_directory, mac_address)
lock_file = obtain_lock(lock_filepath)
if lock_file is not None:
return (mac_address, lock_file)
raise MacAddressException("Unable to pick unique MAC address")
|
Pick and reserve a unique mac address for a container
returns (mac_address, lockfile)
where the mac address is a string in the form of 00:00:00:00:00:00
and lockfile is a file object that holds an exclusive lock
|
reserve_unique_mac_address
|
python
|
Yelp/paasta
|
paasta_tools/mac_address.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mac_address.py
|
Apache-2.0
|
def obtain_lock(lock_filepath):
"""Open and obtain a flock on the parameter. Returns a file if successful, None if not"""
lock_file = open(lock_filepath, "w")
try:
fcntl.flock(lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB)
return lock_file
except IOError as err:
if err.errno != errno.EAGAIN:
raise
lock_file.close()
return None
|
Open and obtain a flock on the parameter. Returns a file if successful, None if not
|
obtain_lock
|
python
|
Yelp/paasta
|
paasta_tools/mac_address.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mac_address.py
|
Apache-2.0
|
def base_api(mesos_config_path: Optional[str] = None):
"""Helper function for making all API requests
:returns: a function that can be called to make a request
"""
leader = get_mesos_leader(mesos_config_path)
def execute_request(method, endpoint, timeout=(3, 2), **kwargs):
url = "http://%s:%d%s" % (leader, MESOS_MASTER_PORT, endpoint)
s = Session()
s.auth = (get_principal(), get_secret())
req = Request(method, url, **kwargs)
prepared = s.prepare_request(req)
try:
resp = s.send(prepared, timeout=timeout)
resp.raise_for_status()
return resp
except HTTPError:
raise HTTPError("Error executing API request calling %s." % url)
return execute_request
|
Helper function for making all API requests
:returns: a function that can be called to make a request
|
base_api
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def master_api(mesos_config_path: Optional[str] = None):
"""Helper function for making API requests to the /master API endpoints
:returns: a function that can be called to make a request to /master
"""
def execute_master_api_request(method, endpoint, **kwargs):
base_api_client = base_api(mesos_config_path=mesos_config_path)
return base_api_client(method, "/master%s" % endpoint, **kwargs)
return execute_master_api_request
|
Helper function for making API requests to the /master API endpoints
:returns: a function that can be called to make a request to /master
|
master_api
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def reserve_api():
"""Helper function for making API requests to the /reserve API endpoints
:returns: a function that can be called to make a request to /reserve
"""
def execute_reserve_api_request(method, endpoint, **kwargs):
master_api_client = master_api()
return master_api_client(method, "/reserve%s" % endpoint, **kwargs)
return execute_reserve_api_request
|
Helper function for making API requests to the /reserve API endpoints
:returns: a function that can be called to make a request to /reserve
|
reserve_api
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def unreserve_api():
"""Helper function for making API requests to the /unreserve API endpoints
:returns: a function that can be called to make a request to /unreserve
"""
def execute_unreserve_api_request(method, endpoint, **kwargs):
master_api_client = master_api()
return master_api_client(method, "/unreserve%s" % endpoint, **kwargs)
return execute_unreserve_api_request
|
Helper function for making API requests to the /unreserve API endpoints
:returns: a function that can be called to make a request to /unreserve
|
unreserve_api
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def maintenance_api():
"""Helper function for making API requests to the /master/maintenance API endpoints
:returns: a function that can be called to make a request to /master/maintenance
"""
def execute_schedule_api_request(method, endpoint, **kwargs):
master_api_client = master_api()
return master_api_client(
method, "/maintenance%s" % endpoint, timeout=(3, 10), **kwargs
)
return execute_schedule_api_request
|
Helper function for making API requests to the /master/maintenance API endpoints
:returns: a function that can be called to make a request to /master/maintenance
|
maintenance_api
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def get_schedule_client():
"""Helper function for making API requests to the /master/maintenance/schedule API endpoints
:returns: a function that can be called to make a request to /master/maintenance/schedule
"""
def execute_schedule_api_request(method, endpoint, **kwargs):
maintenance_api_client = maintenance_api()
return maintenance_api_client(method, "/schedule%s" % endpoint, **kwargs)
return execute_schedule_api_request
|
Helper function for making API requests to the /master/maintenance/schedule API endpoints
:returns: a function that can be called to make a request to /master/maintenance/schedule
|
get_schedule_client
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def get_maintenance_schedule():
"""Makes a GET_MAINTENANCE_SCHEDULE request to the operator api
:returns: a GET_MAINTENANCE_SCHEDULE response
"""
client_fn = operator_api()
return client_fn(data={"type": "GET_MAINTENANCE_SCHEDULE"})
|
Makes a GET_MAINTENANCE_SCHEDULE request to the operator api
:returns: a GET_MAINTENANCE_SCHEDULE response
|
get_maintenance_schedule
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def get_maintenance_status(mesos_config_path: Optional[str] = None):
"""Makes a GET_MAINTENANCE_STATUS request to the operator api
:returns: a GET_MAINTENANCE_STATUS response
"""
client_fn = operator_api(mesos_config_path=mesos_config_path)
return client_fn(data={"type": "GET_MAINTENANCE_STATUS"})
|
Makes a GET_MAINTENANCE_STATUS request to the operator api
:returns: a GET_MAINTENANCE_STATUS response
|
get_maintenance_status
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def schedule():
"""Get the Mesos maintenance schedule. This contains hostname/ip mappings and their maintenance window.
:returns: GET_MAINTENANCE_SCHEDULE response text
"""
try:
schedule = get_maintenance_schedule()
except HTTPError:
raise HTTPError("Error getting maintenance schedule.")
return schedule.text
|
Get the Mesos maintenance schedule. This contains hostname/ip mappings and their maintenance window.
:returns: GET_MAINTENANCE_SCHEDULE response text
|
schedule
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def get_hosts_with_state(
state, system_paasta_config: Optional[SystemPaastaConfig] = None
) -> List[str]:
"""Helper function to check the maintenance status and return all hosts
listed as being in a current state
:param state: State we are interested in ('down_machines' or 'draining_machines')
:returns: A list of hostnames in the specified state or an empty list if no machines
"""
mesos_config_path = get_mesos_config_path(system_paasta_config)
try:
status = get_maintenance_status(mesos_config_path).json()
status = status["get_maintenance_status"]["status"]
except HTTPError:
raise HTTPError("Error getting maintenance status.")
if not status or state not in status:
return []
if "id" in status[state][0]:
return [machine["id"]["hostname"] for machine in status[state]]
else:
return [machine["hostname"] for machine in status[state]]
|
Helper function to check the maintenance status and return all hosts
listed as being in a current state
:param state: State we are interested in ('down_machines' or 'draining_machines')
:returns: A list of hostnames in the specified state or an empty list if no machines
|
get_hosts_with_state
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def get_draining_hosts(system_paasta_config: Optional[SystemPaastaConfig] = None):
"""Returns a list of hostnames that are marked as draining
:returns: a list of strings representing hostnames
"""
return get_hosts_with_state(
state="draining_machines", system_paasta_config=system_paasta_config
)
|
Returns a list of hostnames that are marked as draining
:returns: a list of strings representing hostnames
|
get_draining_hosts
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def get_hosts_forgotten_draining(grace=0):
"""Find hosts that are still marked as draining (rather than down) after the start
of their maintenance window.
:param grace: integer number of nanoseconds to allow a host to be left in the draining
state after the start of its maintenance window before we consider it forgotten.
:returns: a list of hostnames of hosts forgotten draining
"""
draining_hosts = get_draining_hosts()
log.debug("draining_hosts: %s" % draining_hosts)
hosts_past_maintenance_start = get_hosts_past_maintenance_start(grace=grace)
log.debug("hosts_past_maintenance_start: %s" % hosts_past_maintenance_start)
forgotten_draining = list(
set(draining_hosts).intersection(hosts_past_maintenance_start)
)
log.debug("forgotten_draining: %s" % forgotten_draining)
return forgotten_draining
|
Find hosts that are still marked as draining (rather than down) after the start
of their maintenance window.
:param grace: integer number of nanoseconds to allow a host to be left in the draining
state after the start of its maintenance window before we consider it forgotten.
:returns: a list of hostnames of hosts forgotten draining
|
get_hosts_forgotten_draining
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def get_hosts_forgotten_down(grace=0):
"""Find hosts that are still marked as down (rather than up) after the end
of their maintenance window.
:param grace: integer number of nanoseconds to allow a host to be left in the down
state after the end of its maintenance window before we consider it forgotten.
:returns: a list of hostnames of hosts forgotten down
"""
down_hosts = get_down_hosts()
log.debug("down_hosts: %s" % down_hosts)
hosts_past_maintenance_end = get_hosts_past_maintenance_end(grace=grace)
log.debug("hosts_past_maintenance_end: %s" % hosts_past_maintenance_end)
forgotten_down = list(set(down_hosts).intersection(hosts_past_maintenance_end))
log.debug("forgotten_down: %s" % forgotten_down)
return forgotten_down
|
Find hosts that are still marked as down (rather than up) after the end
of their maintenance window.
:param grace: integer number of nanoseconds to allow a host to be left in the down
state after the end of its maintenance window before we consider it forgotten.
:returns: a list of hostnames of hosts forgotten down
|
get_hosts_forgotten_down
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def parse_timedelta(value):
"""Return the delta in nanoseconds.
:param value: a string containing a time format supported by :mod:`pytimeparse`
:returns: an integer (or float) representing the specified delta in nanoseconds
"""
error_msg = "'%s' is not a valid time expression" % value
try:
seconds = timeparse.timeparse(value)
except TypeError:
raise argparse.ArgumentTypeError(error_msg)
if not seconds:
raise argparse.ArgumentTypeError(error_msg)
return seconds_to_nanoseconds(seconds)
|
Return the delta in nanoseconds.
:param value: a string containing a time format supported by :mod:`pytimeparse`
:returns: an integer (or float) representing the specified delta in nanoseconds
|
parse_timedelta
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def parse_datetime(value):
"""Return the datetime in nanoseconds.
:param value: a string containing a datetime supported by :mod:`dateutil.parser`
:returns: an integer (or float) representing the specified datetime in nanoseconds
"""
error_msg = "'%s' is not a valid datetime expression" % value
try:
dt = parser.parse(value)
except Exception:
raise argparse.ArgumentTypeError(error_msg)
if not dt:
raise argparse.ArgumentTypeError(error_msg)
return datetime_to_nanoseconds(dt)
|
Return the datetime in nanoseconds.
:param value: a string containing a datetime supported by :mod:`dateutil.parser`
:returns: an integer (or float) representing the specified datetime in nanoseconds
|
parse_datetime
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def build_maintenance_payload(hostnames, maint_type):
"""Creates the JSON payload necessary to bring the specified hostnames up/down for maintenance.
:param hostnames: a list of hostnames
:returns: a dictionary representing the list of machines to bring up/down for maintenance
"""
return {
"type": maint_type.upper(),
maint_type.lower(): {"machines": get_machine_ids(hostnames)},
}
|
Creates the JSON payload necessary to bring the specified hostnames up/down for maintenance.
:param hostnames: a list of hostnames
:returns: a dictionary representing the list of machines to bring up/down for maintenance
|
build_maintenance_payload
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def hostnames_to_components(hostnames, resolve=False):
"""Converts a list of 'host[|ip]' entries into namedtuples containing 'host' and 'ip' attributes,
optionally performing a DNS lookup to resolve the hostname into an IP address
:param hostnames: a list of hostnames where each hostname can be of the form 'host[|ip]'
:param resolve: boolean representing whether to lookup the IP address corresponding to the hostname via DNS
:returns: a namedtuple containing the hostname and IP components
"""
components = []
for hostname in hostnames:
# This is to allow specifying a hostname as "hostname|ipaddress"
# to avoid querying DNS for the IP.
if "|" in hostname:
(host, ip) = hostname.split("|")
components.append(Hostname(host=host, ip=ip))
else:
try:
ip = gethostbyname(hostname) if resolve else None
except gaierror:
log.error(f"Failed to resolve IP for {hostname}, continuing regardless")
continue
components.append(Hostname(host=hostname, ip=ip))
return components
|
Converts a list of 'host[|ip]' entries into namedtuples containing 'host' and 'ip' attributes,
optionally performing a DNS lookup to resolve the hostname into an IP address
:param hostnames: a list of hostnames where each hostname can be of the form 'host[|ip]'
:param resolve: boolean representing whether to lookup the IP address corresponding to the hostname via DNS
:returns: a namedtuple containing the hostname and IP components
|
hostnames_to_components
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def get_machine_ids(hostnames):
"""Helper function to convert a list of hostnames into a JSON list of hostname/ip pairs.
:param hostnames: a list of hostnames
:returns: a dictionary representing the list of machines to bring up/down for maintenance
"""
machine_ids = []
components = hostnames_to_components(hostnames, resolve=True)
for component in components:
machine_id = {"hostname": component.host, "ip": component.ip}
machine_ids.append(machine_id)
return machine_ids
|
Helper function to convert a list of hostnames into a JSON list of hostname/ip pairs.
:param hostnames: a list of hostnames
:returns: a dictionary representing the list of machines to bring up/down for maintenance
|
get_machine_ids
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def build_reservation_payload(resources):
"""Creates the JSON payload needed to dynamically (un)reserve resources in mesos.
:param resources: list of Resource named tuples specifying the name and amount of the resource to (un)reserve
:returns: a dictionary that can be sent to Mesos to (un)reserve resources
"""
payload = []
for resource in resources:
payload.append(
{
"name": resource.name,
"type": "SCALAR",
"scalar": {"value": resource.amount},
"role": MAINTENANCE_ROLE,
"reservation": {"principal": get_principal()},
}
)
return payload
|
Creates the JSON payload needed to dynamically (un)reserve resources in mesos.
:param resources: list of Resource named tuples specifying the name and amount of the resource to (un)reserve
:returns: a dictionary that can be sent to Mesos to (un)reserve resources
|
build_reservation_payload
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def build_maintenance_schedule_payload(
hostnames, start=None, duration=None, drain=True
):
"""Creates the JSON payload needed to (un)schedule maintenance on the specified hostnames.
:param hostnames: a list of hostnames
:param start: the time to start the maintenance, represented as number of nanoseconds since the epoch
:param duration: length of the maintenance window, represented as number of nanoseconds since the epoch
:param drain: boolean to note whether we are draining (True) the specified hosts or undraining (False) them
:returns: a dictionary that can be sent to Mesos to (un)schedule maintenance
"""
schedule = get_maintenance_schedule().json()["get_maintenance_schedule"]["schedule"]
machine_ids = get_machine_ids(hostnames)
if drain:
unavailability = dict()
unavailability["start"] = dict()
unavailability["start"]["nanoseconds"] = int(start)
unavailability["duration"] = dict()
unavailability["duration"]["nanoseconds"] = int(duration)
window = dict()
window["machine_ids"] = machine_ids
window["unavailability"] = unavailability
if schedule:
for existing_window in schedule["windows"]:
for existing_machine_id in existing_window["machine_ids"]:
# If we already have a maintenance window scheduled for one of the hosts,
# replace it with the new window.
if existing_machine_id in machine_ids:
existing_window["machine_ids"].remove(existing_machine_id)
if not existing_window["machine_ids"]:
schedule["windows"].remove(existing_window)
if drain:
windows = schedule["windows"] + [window]
else:
windows = schedule["windows"]
elif drain:
windows = [window]
else:
windows = []
payload = dict()
payload["windows"] = windows
return {
"type": "UPDATE_MAINTENANCE_SCHEDULE",
"update_maintenance_schedule": {"schedule": payload},
}
|
Creates the JSON payload needed to (un)schedule maintenance on the specified hostnames.
:param hostnames: a list of hostnames
:param start: the time to start the maintenance, represented as number of nanoseconds since the epoch
:param duration: length of the maintenance window, represented as number of nanoseconds since the epoch
:param drain: boolean to note whether we are draining (True) the specified hosts or undraining (False) them
:returns: a dictionary that can be sent to Mesos to (un)schedule maintenance
|
build_maintenance_schedule_payload
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def load_credentials(mesos_secrets="/nail/etc/mesos-slave-secret"):
"""Loads the mesos-slave credentials from the specified file. These credentials will be used for all
maintenance API requests.
:param mesos_secrets: optional argument specifying the path to the file containing the mesos-slave credentials
:returns: a tuple of the form (username, password)
"""
try:
with open(mesos_secrets) as data_file:
data = json.load(data_file)
except EnvironmentError:
log.error(
"maintenance calls must be run on a Mesos slave containing valid credentials (%s)"
% mesos_secrets
)
raise
try:
username = data["principal"]
password = data["secret"]
except KeyError:
log.error(
"%s does not contain Mesos slave credentials in the expected format. "
"See http://mesos.apache.org/documentation/latest/authentication/ for details"
% mesos_secrets
)
raise
return Credentials(file=mesos_secrets, principal=username, secret=password)
|
Loads the mesos-slave credentials from the specified file. These credentials will be used for all
maintenance API requests.
:param mesos_secrets: optional argument specifying the path to the file containing the mesos-slave credentials
:returns: a tuple of the form (username, password)
|
load_credentials
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def reserve(slave_id, resources):
"""Dynamically reserve resources in mesos to prevent tasks from using them.
:param slave_id: the id of the mesos slave
:param resources: list of Resource named tuples specifying the name and amount of the resource to (un)reserve
:returns: boolean where 0 represents success and 1 is a failure
"""
log.info(f"Dynamically reserving resources on {slave_id}: {resources}")
payload = _make_operator_reservation_request_payload(
slave_id=slave_id,
payload=build_reservation_payload(resources),
request_type="reserve_resources",
)
client_fn = operator_api()
try:
print(payload)
reserve_output = client_fn(data=payload).text
except HTTPError:
raise HTTPError("Error adding dynamic reservation.")
return reserve_output
|
Dynamically reserve resources in mesos to prevent tasks from using them.
:param slave_id: the id of the mesos slave
:param resources: list of Resource named tuples specifying the name and amount of the resource to (un)reserve
:returns: boolean where 0 represents success and 1 is a failure
|
reserve
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def unreserve(slave_id, resources):
"""Dynamically unreserve resources in mesos to allow tasks to using them.
:param slave_id: the id of the mesos slave
:param resources: list of Resource named tuples specifying the name and amount of the resource to (un)reserve
:returns: boolean where 0 represents success and 1 is a failure
"""
log.info(f"Dynamically unreserving resources on {slave_id}: {resources}")
payload = _make_operator_reservation_request_payload(
slave_id=slave_id,
payload=build_reservation_payload(resources),
request_type="unreserve_resources",
)
client_fn = operator_api()
try:
unreserve_output = client_fn(data=payload).text
except HTTPError:
raise HTTPError("Error adding dynamic unreservation.")
return unreserve_output
|
Dynamically unreserve resources in mesos to allow tasks to using them.
:param slave_id: the id of the mesos slave
:param resources: list of Resource named tuples specifying the name and amount of the resource to (un)reserve
:returns: boolean where 0 represents success and 1 is a failure
|
unreserve
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def components_to_hosts(components):
"""Convert a list of Component namedtuples to a list of their hosts
:param components: a list of Component namedtuples
:returns: list of the hosts associated with each Component
"""
hosts = []
for component in components:
hosts.append(component.host)
return hosts
|
Convert a list of Component namedtuples to a list of their hosts
:param components: a list of Component namedtuples
:returns: list of the hosts associated with each Component
|
components_to_hosts
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def reserve_all_resources(hostnames):
"""Dynamically reserve all available resources on the specified hosts
:param hostnames: list of hostnames to reserve resources on
"""
mesos_state = a_sync.block(get_mesos_master().state_summary)
components = hostnames_to_components(hostnames)
hosts = components_to_hosts(components)
known_slaves = [
slave for slave in mesos_state["slaves"] if slave["hostname"] in hosts
]
for slave in known_slaves:
hostname = slave["hostname"]
log.info("Reserving all resources on %s" % hostname)
slave_id = slave["id"]
resources = []
for resource in ["disk", "mem", "cpus", "gpus"]:
free_resource = (
slave["resources"][resource] - slave["used_resources"][resource]
)
for role in slave["reserved_resources"]:
free_resource -= slave["reserved_resources"][role][resource]
resources.append(Resource(name=resource, amount=free_resource))
try:
reserve(slave_id=slave_id, resources=resources)
except HTTPError:
raise HTTPError(
f"Failed reserving all of the resources on {hostname} ({slave_id}). Aborting."
)
|
Dynamically reserve all available resources on the specified hosts
:param hostnames: list of hostnames to reserve resources on
|
reserve_all_resources
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def unreserve_all_resources(hostnames):
"""Dynamically unreserve all available resources on the specified hosts
:param hostnames: list of hostnames to unreserve resources on
"""
mesos_state = a_sync.block(get_mesos_master().state_summary)
components = hostnames_to_components(hostnames)
hosts = components_to_hosts(components)
known_slaves = [
slave for slave in mesos_state["slaves"] if slave["hostname"] in hosts
]
for slave in known_slaves:
hostname = slave["hostname"]
log.info("Unreserving all resources on %s" % hostname)
slave_id = slave["id"]
resources = []
if MAINTENANCE_ROLE in slave["reserved_resources"]:
for resource in ["disk", "mem", "cpus", "gpus"]:
reserved_resource = slave["reserved_resources"][MAINTENANCE_ROLE][
resource
]
resources.append(Resource(name=resource, amount=reserved_resource))
try:
unreserve(slave_id=slave_id, resources=resources)
except HTTPError:
raise HTTPError(
f"Failed unreserving all of the resources on {hostname} ({slave_id}). Aborting."
)
|
Dynamically unreserve all available resources on the specified hosts
:param hostnames: list of hostnames to unreserve resources on
|
unreserve_all_resources
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def drain(hostnames, start, duration, reserve_resources=True):
"""Schedules a maintenance window for the specified hosts and marks them as draining.
:param hostnames: a list of hostnames
:param start: the time to start the maintenance, represented as number of nanoseconds since the epoch
:param duration: length of the maintenance window, represented as number of nanoseconds since the epoch
:param reserve_resources: bool setting to also reserve the free resources on the agent before the drain call
:returns: None
"""
log.info("Draining: %s" % hostnames)
if reserve_resources:
try:
reserve_all_resources(hostnames)
except HTTPError as e:
log.warning("Failed to reserve resources, will continue to drain: %s" % e)
payload = build_maintenance_schedule_payload(hostnames, start, duration, drain=True)
client_fn = operator_api()
try:
drain_output = client_fn(data=payload).text
except HTTPError:
raise HTTPError("Error performing maintenance drain.")
return drain_output
|
Schedules a maintenance window for the specified hosts and marks them as draining.
:param hostnames: a list of hostnames
:param start: the time to start the maintenance, represented as number of nanoseconds since the epoch
:param duration: length of the maintenance window, represented as number of nanoseconds since the epoch
:param reserve_resources: bool setting to also reserve the free resources on the agent before the drain call
:returns: None
|
drain
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def undrain(hostnames, unreserve_resources=True):
"""Unschedules the maintenance window for the specified hosts and unmarks them as draining. They are ready for
regular use.
:param hostnames: a list of hostnames
:param unreserve_resources: bool setting to also unreserve resources on the agent before the undrain call
:returns: None
"""
log.info("Undraining: %s" % hostnames)
if unreserve_resources:
try:
unreserve_all_resources(hostnames)
except HTTPError as e:
log.warning(
"Failed to unreserve resources, will continue to undrain: %s" % e
)
payload = build_maintenance_schedule_payload(hostnames, drain=False)
client_fn = get_schedule_client()
client_fn = operator_api()
try:
undrain_output = client_fn(data=payload).text
except HTTPError:
raise HTTPError("Error performing maintenance undrain.")
return undrain_output
|
Unschedules the maintenance window for the specified hosts and unmarks them as draining. They are ready for
regular use.
:param hostnames: a list of hostnames
:param unreserve_resources: bool setting to also unreserve resources on the agent before the undrain call
:returns: None
|
undrain
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def down(hostnames):
"""Marks the specified hostnames as being down for maintenance, and makes them unavailable for use.
:param hostnames: a list of hostnames
:returns: None
"""
log.info("Bringing down: %s" % hostnames)
payload = build_maintenance_payload(hostnames, "start_maintenance")
client_fn = operator_api()
try:
down_output = client_fn(data=payload).text
except HTTPError:
raise HTTPError("Error performing maintenance down.")
return down_output
|
Marks the specified hostnames as being down for maintenance, and makes them unavailable for use.
:param hostnames: a list of hostnames
:returns: None
|
down
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def up(hostnames):
"""Marks the specified hostnames as no longer being down for maintenance, and makes them available for use.
:param hostnames: a list of hostnames
:returns: None
"""
log.info("Bringing up: %s" % hostnames)
payload = build_maintenance_payload(hostnames, "stop_maintenance")
client_fn = operator_api()
try:
up_output = client_fn(data=payload).text
except HTTPError:
raise HTTPError("Error performing maintenance up.")
return up_output
|
Marks the specified hostnames as no longer being down for maintenance, and makes them available for use.
:param hostnames: a list of hostnames
:returns: None
|
up
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def raw_status():
"""Get the Mesos maintenance status. This contains hostname/ip mappings for hosts that are either marked as being
down for maintenance or draining.
:returns: Response Object containing status
"""
try:
status = get_maintenance_status()
except HTTPError:
raise HTTPError("Error performing maintenance status.")
return status
|
Get the Mesos maintenance status. This contains hostname/ip mappings for hosts that are either marked as being
down for maintenance or draining.
:returns: Response Object containing status
|
raw_status
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def friendly_status():
"""Display the Mesos maintenance status in a human-friendly way.
:returns: Text representation of the human-friendly status
"""
status = raw_status().json()["get_maintenance_status"]["status"]
ret = ""
for machine in status.get("draining_machines", []):
ret += "{} ({}): Draining\n".format(
machine["id"]["hostname"], machine["id"]["ip"]
)
for machine in status.get("down_machines", []):
ret += "{} ({}): Down\n".format(machine["hostname"], machine["ip"])
return ret
|
Display the Mesos maintenance status in a human-friendly way.
:returns: Text representation of the human-friendly status
|
friendly_status
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def is_host_drained(hostname):
"""Checks if a host has drained successfully by confirming it is
draining and currently running 0 tasks
:param hostname: hostname to check
:returns: True or False
"""
return (
is_host_draining(hostname=hostname)
and get_count_running_tasks_on_slave(hostname) == 0
)
|
Checks if a host has drained successfully by confirming it is
draining and currently running 0 tasks
:param hostname: hostname to check
:returns: True or False
|
is_host_drained
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def get_hosts_past_maintenance_start(grace=0):
"""Get a list of hosts that have reached the start of their maintenance window
:param grace: integer number of nanoseconds to allow a host to be left in the draining
state after the start of its maintenance window before we consider it past its maintenance start
:returns: List of hostnames
"""
schedules = get_maintenance_schedule().json()["get_maintenance_schedule"][
"schedule"
]
current_time = datetime_to_nanoseconds(now()) - grace
ret = []
if "windows" in schedules:
for window in schedules["windows"]:
if window["unavailability"]["start"]["nanoseconds"] < current_time:
ret += [host["hostname"] for host in window["machine_ids"]]
log.debug(f"Hosts past maintenance start: {ret}")
return ret
|
Get a list of hosts that have reached the start of their maintenance window
:param grace: integer number of nanoseconds to allow a host to be left in the draining
state after the start of its maintenance window before we consider it past its maintenance start
:returns: List of hostnames
|
get_hosts_past_maintenance_start
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def get_hosts_past_maintenance_end(grace=0):
"""Get a list of hosts that have reached the end of their maintenance window
:param grace: integer number of nanoseconds to allow a host to be left in the down
state after the end of its maintenance window before we consider it past its maintenance end
:returns: List of hostnames
"""
schedules = get_maintenance_schedule().json()["get_maintenance_schedule"][
"schedule"
]
current_time = datetime_to_nanoseconds(now()) - grace
ret = []
if "windows" in schedules:
for window in schedules["windows"]:
end = (
window["unavailability"]["start"]["nanoseconds"]
+ window["unavailability"]["duration"]["nanoseconds"]
)
if end < current_time:
ret += [host["hostname"] for host in window["machine_ids"]]
log.debug(f"Hosts past maintenance end: {ret}")
return ret
|
Get a list of hosts that have reached the end of their maintenance window
:param grace: integer number of nanoseconds to allow a host to be left in the down
state after the end of its maintenance window before we consider it past its maintenance end
:returns: List of hostnames
|
get_hosts_past_maintenance_end
|
python
|
Yelp/paasta
|
paasta_tools/mesos_maintenance.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_maintenance.py
|
Apache-2.0
|
def get_mesos_config_path(
system_paasta_config: Optional[SystemPaastaConfig] = None,
) -> str:
"""
Determine where to find the configuration for mesos-cli.
"""
if system_paasta_config is None:
system_paasta_config = load_system_paasta_config()
return system_paasta_config.get_mesos_cli_config().get(
"path", DEFAULT_MESOS_CLI_CONFIG_LOCATION
)
|
Determine where to find the configuration for mesos-cli.
|
get_mesos_config_path
|
python
|
Yelp/paasta
|
paasta_tools/mesos_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py
|
Apache-2.0
|
def get_mesos_leader(mesos_config_path: Optional[str] = None) -> str:
"""Get the current mesos-master leader's hostname.
Attempts to determine this by using mesos.cli to query ZooKeeper.
:returns: The current mesos-master hostname"""
try:
url = get_mesos_master(mesos_config_path).host
except mesos_exceptions.MasterNotAvailableException:
log.debug("mesos.cli failed to provide the master host")
raise
log.debug("mesos.cli thinks the master host is: %s" % url)
hostname = urlparse(url).hostname
log.debug("The parsed master hostname is: %s" % hostname)
# This check is necessary, as if we parse a value such as 'localhost:5050',
# it won't have a hostname attribute
if hostname:
try:
host = socket.gethostbyaddr(hostname)[0]
fqdn = socket.getfqdn(host)
except (socket.error, socket.herror, socket.gaierror, socket.timeout):
log.debug("Failed to convert mesos leader hostname to fqdn!")
raise
log.debug("Mesos Leader: %s" % fqdn)
return fqdn
else:
raise ValueError("Expected to receive a valid URL, got: %s" % url)
|
Get the current mesos-master leader's hostname.
Attempts to determine this by using mesos.cli to query ZooKeeper.
:returns: The current mesos-master hostname
|
get_mesos_leader
|
python
|
Yelp/paasta
|
paasta_tools/mesos_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py
|
Apache-2.0
|
def find_mesos_leader(cluster):
"""Find the leader with redirect given one mesos master."""
master = (
load_system_paasta_config().get_cluster_fqdn_format().format(cluster=cluster)
)
if master is None:
raise ValueError("Mesos master is required to find leader")
url = f"http://{master}:{MESOS_MASTER_PORT}/redirect"
try:
# Timeouts here are for connect, read
response = requests.get(url, timeout=(5, 30))
except Exception as e:
raise MesosLeaderUnavailable(e)
hostname = urlparse(response.url).hostname
return f"{hostname}:{MESOS_MASTER_PORT}"
|
Find the leader with redirect given one mesos master.
|
find_mesos_leader
|
python
|
Yelp/paasta
|
paasta_tools/mesos_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py
|
Apache-2.0
|
async def get_current_tasks(job_id: str) -> List[Task]:
"""Returns a list of all the tasks with a given job id.
:param job_id: the job id of the tasks.
:return tasks: a list of mesos.cli.Task.
"""
mesos_master = get_mesos_master()
framework_tasks = await mesos_master.tasks(fltr=job_id, active_only=False)
return framework_tasks
|
Returns a list of all the tasks with a given job id.
:param job_id: the job id of the tasks.
:return tasks: a list of mesos.cli.Task.
|
get_current_tasks
|
python
|
Yelp/paasta
|
paasta_tools/mesos_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py
|
Apache-2.0
|
async def get_running_tasks_from_frameworks(job_id=""):
"""Will include tasks from active and completed frameworks
but NOT orphaned tasks
"""
active_framework_tasks = await get_current_tasks(job_id)
running_tasks = filter_running_tasks(active_framework_tasks)
return running_tasks
|
Will include tasks from active and completed frameworks
but NOT orphaned tasks
|
get_running_tasks_from_frameworks
|
python
|
Yelp/paasta
|
paasta_tools/mesos_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py
|
Apache-2.0
|
async def get_all_running_tasks() -> Collection[Task]:
"""Will include all running tasks; for now orphans are not included"""
framework_tasks = await get_current_tasks("")
mesos_master = get_mesos_master()
framework_tasks += await mesos_master.orphan_tasks()
running_tasks = filter_running_tasks(framework_tasks)
return running_tasks
|
Will include all running tasks; for now orphans are not included
|
get_all_running_tasks
|
python
|
Yelp/paasta
|
paasta_tools/mesos_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py
|
Apache-2.0
|
async def get_cached_list_of_all_current_tasks():
"""Returns a cached list of all mesos tasks.
This function is used by 'paasta status' and 'paasta_serviceinit status'
to avoid re-querying mesos master and re-parsing json to get mesos.Task objects.
The async_ttl_cache decorator caches the list for 600 seconds.
ttl doesn't really matter for this function because when we run 'paasta status'
the corresponding HTTP request to mesos master is cached by requests_cache.
:return tasks: a list of mesos.Task
"""
return await get_current_tasks("")
|
Returns a cached list of all mesos tasks.
This function is used by 'paasta status' and 'paasta_serviceinit status'
to avoid re-querying mesos master and re-parsing json to get mesos.Task objects.
The async_ttl_cache decorator caches the list for 600 seconds.
ttl doesn't really matter for this function because when we run 'paasta status'
the corresponding HTTP request to mesos master is cached by requests_cache.
:return tasks: a list of mesos.Task
|
get_cached_list_of_all_current_tasks
|
python
|
Yelp/paasta
|
paasta_tools/mesos_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py
|
Apache-2.0
|
async def get_cached_list_of_running_tasks_from_frameworks():
"""Returns a cached list of all running mesos tasks.
See the docstring for get_cached_list_of_all_current_tasks().
:return tasks: a list of mesos.Task
"""
return [
task
for task in filter_running_tasks(await get_cached_list_of_all_current_tasks())
]
|
Returns a cached list of all running mesos tasks.
See the docstring for get_cached_list_of_all_current_tasks().
:return tasks: a list of mesos.Task
|
get_cached_list_of_running_tasks_from_frameworks
|
python
|
Yelp/paasta
|
paasta_tools/mesos_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py
|
Apache-2.0
|
async def get_cached_list_of_not_running_tasks_from_frameworks():
"""Returns a cached list of mesos tasks that are NOT running.
See the docstring for get_cached_list_of_all_current_tasks().
:return tasks: a list of mesos.Task"""
return [
task
for task in filter_not_running_tasks(
await get_cached_list_of_all_current_tasks()
)
]
|
Returns a cached list of mesos tasks that are NOT running.
See the docstring for get_cached_list_of_all_current_tasks().
:return tasks: a list of mesos.Task
|
get_cached_list_of_not_running_tasks_from_frameworks
|
python
|
Yelp/paasta
|
paasta_tools/mesos_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py
|
Apache-2.0
|
def get_first_status_timestamp_string(task: Task) -> str:
"""Gets the first status timestamp from a task id and returns a human
readable string with the local time and a humanized duration:
``2015-01-30T08:45 (an hour ago)``
"""
first_status_timestamp = get_first_status_timestamp(task)
if first_status_timestamp is None:
return "Unknown"
else:
first_status_datetime = datetime.datetime.fromtimestamp(first_status_timestamp)
return "{} ({})".format(
first_status_datetime.strftime("%Y-%m-%dT%H:%M"),
humanize.naturaltime(first_status_datetime),
)
|
Gets the first status timestamp from a task id and returns a human
readable string with the local time and a humanized duration:
``2015-01-30T08:45 (an hour ago)``
|
get_first_status_timestamp_string
|
python
|
Yelp/paasta
|
paasta_tools/mesos_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py
|
Apache-2.0
|
async def get_cpu_usage(task: Task) -> str:
"""Calculates a metric of used_cpu/allocated_cpu
To do this, we take the total number of cpu-seconds the task has consumed,
(the sum of system and user time), OVER the total cpu time the task
has been allocated.
The total time a task has been allocated is the total time the task has
been running (https://github.com/mesosphere/mesos/blob/0b092b1b0/src/webui/master/static/js/controllers.js#L140)
multiplied by the "shares" a task has.
"""
try:
start_time = round(task["statuses"][0]["timestamp"])
current_time = int(datetime.datetime.now().strftime("%s"))
duration_seconds = current_time - start_time
cpu_shares = await get_cpu_shares(task)
allocated_seconds = duration_seconds * cpu_shares
task_stats = await task.stats()
used_seconds = task_stats.get("cpus_system_time_secs", 0.0) + task_stats.get(
"cpus_user_time_secs", 0.0
)
if allocated_seconds == 0:
return "Undef"
percent = round(100 * (used_seconds / allocated_seconds), 1)
percent_string = "%s%%" % percent
if percent > 90:
return PaastaColors.red(percent_string)
else:
return percent_string
except (AttributeError, SlaveDoesNotExist):
return "None"
except TimeoutError:
return "Timed Out"
|
Calculates a metric of used_cpu/allocated_cpu
To do this, we take the total number of cpu-seconds the task has consumed,
(the sum of system and user time), OVER the total cpu time the task
has been allocated.
The total time a task has been allocated is the total time the task has
been running (https://github.com/mesosphere/mesos/blob/0b092b1b0/src/webui/master/static/js/controllers.js#L140)
multiplied by the "shares" a task has.
|
get_cpu_usage
|
python
|
Yelp/paasta
|
paasta_tools/mesos_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py
|
Apache-2.0
|
async def format_running_mesos_task_row(
task: Task, get_short_task_id: Callable[[str], str]
) -> Tuple[str, ...]:
"""Returns a pretty formatted string of a running mesos task attributes"""
short_task_id = get_short_task_id(task["id"])
short_hostname_future = asyncio.ensure_future(
results_or_unknown(get_short_hostname_from_task(task))
)
mem_usage_future = asyncio.ensure_future(results_or_unknown(get_mem_usage(task)))
cpu_usage_future = asyncio.ensure_future(results_or_unknown(get_cpu_usage(task)))
first_status_timestamp = get_first_status_timestamp_string(task)
await asyncio.wait([short_hostname_future, mem_usage_future, cpu_usage_future])
return (
short_task_id,
short_hostname_future.result(),
mem_usage_future.result(),
cpu_usage_future.result(),
first_status_timestamp,
)
|
Returns a pretty formatted string of a running mesos task attributes
|
format_running_mesos_task_row
|
python
|
Yelp/paasta
|
paasta_tools/mesos_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py
|
Apache-2.0
|
def zip_tasks_verbose_output(table, stdstreams):
"""Zip a list of strings (table) with a list of lists (stdstreams)
:param table: a formatted list of tasks
:param stdstreams: for each task, a list of lines from stdout/stderr tail
"""
if len(table) != len(stdstreams):
raise ValueError("Can only zip same-length lists")
output = []
for i in range(len(table)):
output.append(table[i])
output.extend([line for line in stdstreams[i]])
return output
|
Zip a list of strings (table) with a list of lists (stdstreams)
:param table: a formatted list of tasks
:param stdstreams: for each task, a list of lines from stdout/stderr tail
|
zip_tasks_verbose_output
|
python
|
Yelp/paasta
|
paasta_tools/mesos_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py
|
Apache-2.0
|
async def format_task_list(
tasks: Sequence[Task],
list_title: str,
table_header: Sequence[str],
get_short_task_id: Callable[[str], str],
format_task_row: Callable[
[Task, Callable[[str], str]], Awaitable[Union[Sequence[str], str]]
],
grey: bool,
tail_lines: int,
) -> List[str]:
"""Formats a list of tasks, returns a list of output lines
:param tasks: List of tasks as returned by get_*_tasks_from_all_frameworks.
:param list_title: 'Running Tasks:' or 'Non-Running Tasks'.
:param table_header: List of column names used in the tasks table.
:param get_short_task_id: A function which given a task_id returns a short task_id suitable for printing.
:param format_task_row: Formatting function, works on a task and a get_short_task_id function.
:param tail_lines (int): number of lines of stdout/stderr to tail, as obtained from the Mesos sandbox.
:param grey: If True, the list will be made less visually prominent.
:return output: Formatted output (list of output lines).
"""
if not grey:
def colorize(x):
return x
else:
def colorize(x):
return PaastaColors.grey(x)
output = []
output.append(colorize(" %s" % list_title))
table_rows: List[Union[str, Sequence[str]]] = [
[colorize(th) for th in table_header]
]
if tasks:
task_row_futures = [
asyncio.ensure_future(format_task_row(task, get_short_task_id))
for task in tasks
]
await asyncio.wait(task_row_futures)
for future in task_row_futures:
table_rows.append(future.result())
tasks_table = [" %s" % row for row in format_table(table_rows)]
if tail_lines == 0:
output.extend(tasks_table)
else:
stdstreams = []
for task in tasks:
stdstreams.append(
await format_stdstreams_tail_for_task(
task, get_short_task_id, nlines=tail_lines
)
)
output.append(tasks_table[0]) # header
output.extend(zip_tasks_verbose_output(tasks_table[1:], stdstreams))
return output
|
Formats a list of tasks, returns a list of output lines
:param tasks: List of tasks as returned by get_*_tasks_from_all_frameworks.
:param list_title: 'Running Tasks:' or 'Non-Running Tasks'.
:param table_header: List of column names used in the tasks table.
:param get_short_task_id: A function which given a task_id returns a short task_id suitable for printing.
:param format_task_row: Formatting function, works on a task and a get_short_task_id function.
:param tail_lines (int): number of lines of stdout/stderr to tail, as obtained from the Mesos sandbox.
:param grey: If True, the list will be made less visually prominent.
:return output: Formatted output (list of output lines).
|
format_task_list
|
python
|
Yelp/paasta
|
paasta_tools/mesos_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py
|
Apache-2.0
|
async def status_mesos_tasks_verbose(
filter_string: str, get_short_task_id: Callable[[str], str], tail_lines: int = 0
) -> str:
"""Returns detailed information about the mesos tasks for a service.
:param filter_string: An id used for looking up Mesos tasks
:param get_short_task_id: A function which given a
task_id returns a short task_id suitable for
printing.
:param tail_lines: int representing the number of lines of stdout/err to
report.
"""
output: List[str] = []
running_and_active_tasks = select_tasks_by_id(
await get_cached_list_of_running_tasks_from_frameworks(), filter_string
)
list_title = "Running Tasks:"
table_header = [
"Mesos Task ID",
"Host deployed to",
"Ram",
"CPU",
"Deployed at what localtime",
]
output.extend(
await format_task_list(
tasks=running_and_active_tasks,
list_title=list_title,
table_header=table_header,
get_short_task_id=get_short_task_id,
format_task_row=format_running_mesos_task_row,
grey=False,
tail_lines=tail_lines,
)
)
non_running_tasks = select_tasks_by_id(
await get_cached_list_of_not_running_tasks_from_frameworks(), filter_string
)
# Order the tasks by timestamp
non_running_tasks.sort(key=lambda task: get_first_status_timestamp_string(task))
non_running_tasks_ordered = list(reversed(non_running_tasks[-10:]))
list_title = "Non-Running Tasks"
table_header = [
"Mesos Task ID",
"Host deployed to",
"Deployed at what localtime",
"Status",
]
output.extend(
await format_task_list(
tasks=non_running_tasks_ordered,
list_title=list_title,
table_header=table_header,
get_short_task_id=get_short_task_id,
format_task_row=format_non_running_mesos_task_row,
grey=True,
tail_lines=tail_lines,
)
)
return "\n".join(output)
|
Returns detailed information about the mesos tasks for a service.
:param filter_string: An id used for looking up Mesos tasks
:param get_short_task_id: A function which given a
task_id returns a short task_id suitable for
printing.
:param tail_lines: int representing the number of lines of stdout/err to
report.
|
status_mesos_tasks_verbose
|
python
|
Yelp/paasta
|
paasta_tools/mesos_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py
|
Apache-2.0
|
def get_local_slave_state(hostname=None):
"""Fetches mesos slave state and returns it as a dict.
:param hostname: The host from which to fetch slave state. If not specified, defaults to the local machine."""
if hostname is None:
hostname = socket.getfqdn()
stats_uri = f"http://{hostname}:{MESOS_SLAVE_PORT}/state"
try:
headers = {"User-Agent": get_user_agent()}
response = requests.get(stats_uri, timeout=10, headers=headers)
if response.status_code == 404:
fallback_stats_uri = f"http://{hostname}:{MESOS_SLAVE_PORT}/state.json"
response = requests.get(fallback_stats_uri, timeout=10, headers=headers)
except requests.ConnectionError as e:
raise MesosSlaveConnectionError(
"Could not connect to the mesos slave to see which services are running\n"
"on %s. Is the mesos-slave running?\n"
"Error was: %s\n" % (e.request.url, str(e))
)
response.raise_for_status()
return json.loads(response.text)
|
Fetches mesos slave state and returns it as a dict.
:param hostname: The host from which to fetch slave state. If not specified, defaults to the local machine.
|
get_local_slave_state
|
python
|
Yelp/paasta
|
paasta_tools/mesos_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py
|
Apache-2.0
|
def get_all_tasks_from_state(
mesos_state: MesosState, include_orphans: bool = False
) -> Sequence[MesosTask]:
"""Given a mesos state, find the tasks from all frameworks.
:param mesos_state: the mesos_state
:returns: a list of tasks
"""
tasks = [
task
for framework in mesos_state.get("frameworks", [])
for task in framework.get("tasks", [])
]
if include_orphans:
tasks += mesos_state.get("orphan_tasks", [])
return tasks
|
Given a mesos state, find the tasks from all frameworks.
:param mesos_state: the mesos_state
:returns: a list of tasks
|
get_all_tasks_from_state
|
python
|
Yelp/paasta
|
paasta_tools/mesos_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py
|
Apache-2.0
|
def get_zookeeper_config(state):
"""Returns dict, containing the zookeeper hosts and path.
:param state: mesos state dictionary"""
re_zk = re.match(r"^zk://([^/]*)/(.*)$", state["flags"]["zk"])
return {"hosts": re_zk.group(1), "path": re_zk.group(2)}
|
Returns dict, containing the zookeeper hosts and path.
:param state: mesos state dictionary
|
get_zookeeper_config
|
python
|
Yelp/paasta
|
paasta_tools/mesos_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py
|
Apache-2.0
|
def get_number_of_mesos_masters(host, path):
"""Returns an array, containing mesos masters
:param zk_config: dict containing information about zookeeper config.
Masters register themselves in zookeeper by creating ``info_`` entries.
We count these entries to get the number of masters.
"""
zk = KazooClient(hosts=host, read_only=True)
zk.start()
try:
root_entries = zk.get_children(path)
result = [
info
for info in root_entries
if info.startswith("json.info_") or info.startswith("info_")
]
return len(result)
finally:
zk.stop()
zk.close()
|
Returns an array, containing mesos masters
:param zk_config: dict containing information about zookeeper config.
Masters register themselves in zookeeper by creating ``info_`` entries.
We count these entries to get the number of masters.
|
get_number_of_mesos_masters
|
python
|
Yelp/paasta
|
paasta_tools/mesos_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py
|
Apache-2.0
|
def get_all_slaves_for_blacklist_whitelist(
blacklist: DeployBlacklist, whitelist: DeployWhitelist
):
"""
A wrapper function to get all slaves and filter according to
provided blacklist and whitelist.
:param blacklist: a blacklist, used to filter mesos slaves by attribute
:param whitelist: a whitelist, used to filter mesos slaves by attribute
:returns: a list of mesos slave objects, filtered by those which are acceptable
according to the provided blacklist and whitelists.
"""
all_slaves = get_slaves()
return filter_mesos_slaves_by_blacklist(all_slaves, blacklist, whitelist)
|
A wrapper function to get all slaves and filter according to
provided blacklist and whitelist.
:param blacklist: a blacklist, used to filter mesos slaves by attribute
:param whitelist: a whitelist, used to filter mesos slaves by attribute
:returns: a list of mesos slave objects, filtered by those which are acceptable
according to the provided blacklist and whitelists.
|
get_all_slaves_for_blacklist_whitelist
|
python
|
Yelp/paasta
|
paasta_tools/mesos_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py
|
Apache-2.0
|
def get_mesos_slaves_grouped_by_attribute(slaves, attribute):
"""Returns a dictionary of unique values and the corresponding hosts for a given Mesos attribute
:param slaves: a list of mesos slaves to group
:param attribute: an attribute to filter
:returns: a dictionary of the form {'<attribute_value>': [<list of hosts with attribute=attribute_value>]}
(response can contain multiple 'attribute_value)
"""
sorted_slaves = sorted(
slaves,
key=lambda slave: (
slave["attributes"].get(attribute) is None,
slave["attributes"].get(attribute),
),
)
return {
key: list(group)
for key, group in itertools.groupby(
sorted_slaves, key=lambda slave: slave["attributes"].get(attribute)
)
if key
}
|
Returns a dictionary of unique values and the corresponding hosts for a given Mesos attribute
:param slaves: a list of mesos slaves to group
:param attribute: an attribute to filter
:returns: a dictionary of the form {'<attribute_value>': [<list of hosts with attribute=attribute_value>]}
(response can contain multiple 'attribute_value)
|
get_mesos_slaves_grouped_by_attribute
|
python
|
Yelp/paasta
|
paasta_tools/mesos_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py
|
Apache-2.0
|
def filter_mesos_slaves_by_blacklist(
slaves, blacklist: DeployBlacklist, whitelist: DeployWhitelist
):
"""Takes an input list of slaves and filters them based on the given blacklist.
The blacklist is in the form of:
[["location_type", "location]]
Where the list inside is something like ["region", "uswest1-prod"]
:returns: The list of mesos slaves after the filter
"""
filtered_slaves = []
for slave in slaves:
if host_passes_blacklist(
slave["attributes"], blacklist
) and host_passes_whitelist(slave["attributes"], whitelist):
filtered_slaves.append(slave)
return filtered_slaves
|
Takes an input list of slaves and filters them based on the given blacklist.
The blacklist is in the form of:
[["location_type", "location]]
Where the list inside is something like ["region", "uswest1-prod"]
:returns: The list of mesos slaves after the filter
|
filter_mesos_slaves_by_blacklist
|
python
|
Yelp/paasta
|
paasta_tools/mesos_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py
|
Apache-2.0
|
async def get_mesos_task_count_by_slave(
mesos_state: MesosState,
slaves_list: Sequence[Dict] = None,
pool: Optional[str] = None,
) -> List[Dict]:
"""Get counts of running tasks per mesos slave.
:param mesos_state: mesos state dict
:param slaves_list: a list of slave dicts to count running tasks for.
:param pool: pool of slaves to return (None means all)
:returns: list of slave dicts {'task_count': SlaveTaskCount}
"""
all_mesos_tasks = await get_all_running_tasks() # empty string = all app ids
slaves = {
slave["id"]: {"count": 0, "slave": slave}
for slave in mesos_state.get("slaves", [])
}
for task in all_mesos_tasks:
try:
task_slave = await task.slave()
if task_slave["id"] not in slaves:
log.debug("Slave {} not found for task".format(task_slave["id"]))
continue
else:
slaves[task_slave["id"]]["count"] += 1
task_framework = await task.framework()
log.debug(f"Task framework: {task_framework.name}")
except SlaveDoesNotExist:
log.debug(
"Tried to get mesos slaves for task {}, but none existed.".format(
task["id"]
)
)
continue
if slaves_list:
for slave in slaves_list:
slave["task_counts"] = SlaveTaskCount(
**slaves[slave["task_counts"].slave["id"]]
)
slaves_with_counts = list(slaves_list)
elif pool:
slaves_with_counts = [
{"task_counts": SlaveTaskCount(**slave_counts)}
for slave_counts in slaves.values()
if slave_counts["slave"]["attributes"].get("pool", "default") == pool
]
else:
slaves_with_counts = [
{"task_counts": SlaveTaskCount(**slave_counts)}
for slave_counts in slaves.values()
]
for slave in slaves_with_counts:
log.debug(
"Slave: {}, running {} tasks".format(
slave["task_counts"].slave["hostname"],
slave["task_counts"].count,
)
)
return slaves_with_counts
|
Get counts of running tasks per mesos slave.
:param mesos_state: mesos state dict
:param slaves_list: a list of slave dicts to count running tasks for.
:param pool: pool of slaves to return (None means all)
:returns: list of slave dicts {'task_count': SlaveTaskCount}
|
get_mesos_task_count_by_slave
|
python
|
Yelp/paasta
|
paasta_tools/mesos_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py
|
Apache-2.0
|
def get_count_running_tasks_on_slave(hostname: str) -> int:
"""Return the number of tasks running on a particular slave
or 0 if the slave is not found.
:param hostname: hostname of the slave
:returns: integer count of mesos tasks"""
mesos_state = a_sync.block(get_mesos_master().state_summary)
task_counts = a_sync.block(get_mesos_task_count_by_slave, mesos_state)
counts = [
slave["task_counts"].count
for slave in task_counts
if slave["task_counts"].slave["hostname"] == hostname
]
if counts:
return counts[0]
else:
return 0
|
Return the number of tasks running on a particular slave
or 0 if the slave is not found.
:param hostname: hostname of the slave
:returns: integer count of mesos tasks
|
get_count_running_tasks_on_slave
|
python
|
Yelp/paasta
|
paasta_tools/mesos_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py
|
Apache-2.0
|
def mesos_services_running_here(
framework_filter, parse_service_instance_from_executor_id, hostname=None
):
"""See what paasta_native services are being run by a mesos-slave on this host.
:param framework_filter: a function that returns true if we should consider a given framework.
:param parse_service_instance_from_executor_id: A function that returns a tuple of (service, instance) from the
executor ID.
:param hostname: Hostname to fetch mesos slave state from. See get_local_slave_state.
:returns: A list of triples of (service, instance, port)"""
slave_state = get_local_slave_state(hostname=hostname)
frameworks = [
fw for fw in slave_state.get("frameworks", []) if framework_filter(fw)
]
executors = [
ex
for fw in frameworks
for ex in fw.get("executors", [])
if "TASK_RUNNING" in [t["state"] for t in ex.get("tasks", [])]
]
srv_list = []
for executor in executors:
try:
srv_name, srv_instance = parse_service_instance_from_executor_id(
executor["id"]
)
except ValueError:
log.error(
"Failed to decode paasta service instance from {}".format(
executor["id"]
)
)
continue
if "ports" in executor["resources"]:
srv_port = int(re.findall("[0-9]+", executor["resources"]["ports"])[0])
else:
srv_port = None
srv_list.append((srv_name, srv_instance, srv_port))
return srv_list
|
See what paasta_native services are being run by a mesos-slave on this host.
:param framework_filter: a function that returns true if we should consider a given framework.
:param parse_service_instance_from_executor_id: A function that returns a tuple of (service, instance) from the
executor ID.
:param hostname: Hostname to fetch mesos slave state from. See get_local_slave_state.
:returns: A list of triples of (service, instance, port)
|
mesos_services_running_here
|
python
|
Yelp/paasta
|
paasta_tools/mesos_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/mesos_tools.py
|
Apache-2.0
|
def get_sensu_team_data(team):
"""Takes a team and returns the dictionary of Sensu configuration
settings for that team. The data is in this format:
https://github.com/Yelp/sensu_handlers#teams
Returns an empty dictionary if there is nothing to return.
Not all teams specify all the different types of configuration settings.
for example, a team may not specify a `notification_email`. It is up
to the caller of this function to handle that case.
"""
global_team_data = _load_sensu_team_data()["team_data"]
return global_team_data.get(team, {})
|
Takes a team and returns the dictionary of Sensu configuration
settings for that team. The data is in this format:
https://github.com/Yelp/sensu_handlers#teams
Returns an empty dictionary if there is nothing to return.
Not all teams specify all the different types of configuration settings.
for example, a team may not specify a `notification_email`. It is up
to the caller of this function to handle that case.
|
get_sensu_team_data
|
python
|
Yelp/paasta
|
paasta_tools/monitoring_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/monitoring_tools.py
|
Apache-2.0
|
def read_monitoring_config(service, soa_dir=DEFAULT_SOA_DIR):
"""Read a service's monitoring.yaml file.
:param service: The service name
:param soa_dir: THe SOA configuration directory to read from
:returns: A dictionary of whatever was in soa_dir/name/monitoring.yaml"""
rootdir = os.path.abspath(soa_dir)
monitoring_file = os.path.join(rootdir, service, "monitoring.yaml")
monitor_conf = service_configuration_lib.read_monitoring(monitoring_file)
return monitor_conf
|
Read a service's monitoring.yaml file.
:param service: The service name
:param soa_dir: THe SOA configuration directory to read from
:returns: A dictionary of whatever was in soa_dir/name/monitoring.yaml
|
read_monitoring_config
|
python
|
Yelp/paasta
|
paasta_tools/monitoring_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/monitoring_tools.py
|
Apache-2.0
|
def list_teams():
"""Loads team data from the system. Returns a set of team names (or empty
set).
"""
team_data = _load_sensu_team_data()
teams = set(team_data.get("team_data", {}).keys())
return teams
|
Loads team data from the system. Returns a set of team names (or empty
set).
|
list_teams
|
python
|
Yelp/paasta
|
paasta_tools/monitoring_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/monitoring_tools.py
|
Apache-2.0
|
def check_under_replication(
instance_config: LongRunningServiceConfig,
expected_count: int,
num_available: int,
sub_component: Optional[str] = None,
) -> Tuple[bool, str, str]:
"""Check if a component/sub_component is under-replicated and returns both the result of the check in the form of a
boolean and a human-readable text to be used in logging or monitoring events.
"""
crit_threshold = instance_config.get_replication_crit_percentage()
# Keep output short, with rest of context in description. This is because
# by default, Slack-Sensu messages have a 400 char limit, incl. the output.
# If it is too long, the runbook and tip won't show up.
if sub_component is not None:
output = ("{} has {}/{} replicas of {} available (threshold: {}%)").format(
instance_config.job_id,
num_available,
expected_count,
sub_component,
crit_threshold,
)
else:
output = ("{} has {}/{} replicas available (threshold: {}%)").format(
instance_config.job_id, num_available, expected_count, crit_threshold
)
under_replicated, _ = is_under_replicated(
num_available, expected_count, crit_threshold
)
if under_replicated:
description = (
"This replication alert means that PaaSTA can't keep the\n"
"requested number of replicas up and healthy in the cluster for "
"the instance {service}.{instance}.\n"
"\n"
"Reasons this might be happening:\n"
"\n"
" The service may simply be unhealthy. There also may not be enough resources\n"
" in the cluster to support the requested instance count.\n"
"\n"
"Things you can do:\n"
"\n"
" * Increase the instance count\n"
" * Fix the cause of the unhealthy service. Try running:\n"
"\n"
" paasta status -s {service} -i {instance} -c {cluster} -vv\n"
).format(
service=instance_config.service,
instance=instance_config.instance,
cluster=instance_config.cluster,
)
else:
description = (
"{} is well-replicated because it has over {}% of its "
"expected replicas up."
).format(instance_config.job_id, crit_threshold)
return under_replicated, output, description
|
Check if a component/sub_component is under-replicated and returns both the result of the check in the form of a
boolean and a human-readable text to be used in logging or monitoring events.
|
check_under_replication
|
python
|
Yelp/paasta
|
paasta_tools/monitoring_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/monitoring_tools.py
|
Apache-2.0
|
def load_monkrelaycluster_instance_config(
service: str,
instance: str,
cluster: str,
load_deployments: bool = True,
soa_dir: str = DEFAULT_SOA_DIR,
) -> MonkRelayClusterDeploymentConfig:
"""Read a service instance's configuration for MonkRelayCluster.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param service: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance"""
general_config = service_configuration_lib.read_service_configuration(
service, soa_dir=soa_dir
)
instance_config = load_service_instance_config(
service, instance, "monkrelays", cluster, soa_dir=soa_dir
)
general_config = deep_merge_dictionaries(
overrides=instance_config, defaults=general_config
)
branch_dict: Optional[BranchDictV2] = None
if load_deployments:
deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir)
temp_instance_config = MonkRelayClusterDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=None,
soa_dir=soa_dir,
)
branch = temp_instance_config.get_branch()
deploy_group = temp_instance_config.get_deploy_group()
branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group)
return MonkRelayClusterDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=branch_dict,
soa_dir=soa_dir,
)
|
Read a service instance's configuration for MonkRelayCluster.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param service: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance
|
load_monkrelaycluster_instance_config
|
python
|
Yelp/paasta
|
paasta_tools/monkrelaycluster_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/monkrelaycluster_tools.py
|
Apache-2.0
|
def load_nrtsearchserviceeks_instance_config(
service: str,
instance: str,
cluster: str,
load_deployments: bool = True,
soa_dir: str = DEFAULT_SOA_DIR,
) -> NrtsearchServiceEksDeploymentConfig:
"""Read a service instance's configuration for Nrtsearch.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param service: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance"""
general_config = service_configuration_lib.read_service_configuration(
service, soa_dir=soa_dir
)
instance_config = load_service_instance_config(
service, instance, "nrtsearchserviceeks", cluster, soa_dir=soa_dir
)
general_config = deep_merge_dictionaries(
overrides=instance_config, defaults=general_config
)
branch_dict: Optional[BranchDictV2] = None
if load_deployments:
deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir)
temp_instance_config = NrtsearchServiceEksDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=None,
soa_dir=soa_dir,
)
branch = temp_instance_config.get_branch()
deploy_group = temp_instance_config.get_deploy_group()
branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group)
return NrtsearchServiceEksDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=branch_dict,
soa_dir=soa_dir,
)
|
Read a service instance's configuration for Nrtsearch.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param service: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance
|
load_nrtsearchserviceeks_instance_config
|
python
|
Yelp/paasta
|
paasta_tools/nrtsearchserviceeks_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/nrtsearchserviceeks_tools.py
|
Apache-2.0
|
def log_to_paasta(log_line):
"""Add the event to the standard PaaSTA logging backend."""
line = "oom-killer killed {} on {} (container_id: {}).".format(
"a %s process" % log_line.process_name
if log_line.process_name
else "a process",
log_line.hostname,
log_line.container_id,
)
_log(
service=log_line.service,
instance=log_line.instance,
component="oom",
cluster=log_line.cluster,
level=DEFAULT_LOGLEVEL,
line=line,
)
|
Add the event to the standard PaaSTA logging backend.
|
log_to_paasta
|
python
|
Yelp/paasta
|
paasta_tools/oom_logger.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/oom_logger.py
|
Apache-2.0
|
def clusters(self) -> Iterable[str]:
"""Returns an iterator that yields cluster names for the service.
:returns: iterator that yields cluster names.
"""
if self._clusters is None:
self._clusters = list_clusters(service=self._service, soa_dir=self._soa_dir)
for cluster in self._clusters:
yield cluster
|
Returns an iterator that yields cluster names for the service.
:returns: iterator that yields cluster names.
|
clusters
|
python
|
Yelp/paasta
|
paasta_tools/paasta_service_config_loader.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/paasta_service_config_loader.py
|
Apache-2.0
|
def instances(
self, cluster: str, instance_type_class: Type[InstanceConfig_T]
) -> Iterable[str]:
"""Returns an iterator that yields instance names as strings.
:param cluster: The cluster name
:param instance_type_class: a subclass of InstanceConfig
:returns: an iterator that yields instance names
"""
if (cluster, instance_type_class) not in self._framework_configs:
self._refresh_framework_config(cluster, instance_type_class)
for instance in self._framework_configs.get((cluster, instance_type_class), []):
yield instance
|
Returns an iterator that yields instance names as strings.
:param cluster: The cluster name
:param instance_type_class: a subclass of InstanceConfig
:returns: an iterator that yields instance names
|
instances
|
python
|
Yelp/paasta
|
paasta_tools/paasta_service_config_loader.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/paasta_service_config_loader.py
|
Apache-2.0
|
def instance_configs(
self, cluster: str, instance_type_class: Type[InstanceConfig_T]
) -> Iterable[InstanceConfig_T]:
"""Returns an iterator that yields InstanceConfig objects.
:param cluster: The cluster name
:param instance_type_class: a subclass of InstanceConfig
:returns: an iterator that yields instances of KubernetesDeploymentConfig, etc.
:raises NotImplementedError: when it doesn't know how to create a config for instance_type_class
"""
if (cluster, instance_type_class) not in self._framework_configs:
self._refresh_framework_config(cluster, instance_type_class)
for instance, config in self._framework_configs.get(
(cluster, instance_type_class), {}
).items():
try:
yield self._create_service_config(
cluster, instance, config, instance_type_class
)
except NoDeploymentsAvailable:
pass
|
Returns an iterator that yields InstanceConfig objects.
:param cluster: The cluster name
:param instance_type_class: a subclass of InstanceConfig
:returns: an iterator that yields instances of KubernetesDeploymentConfig, etc.
:raises NotImplementedError: when it doesn't know how to create a config for instance_type_class
|
instance_configs
|
python
|
Yelp/paasta
|
paasta_tools/paasta_service_config_loader.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/paasta_service_config_loader.py
|
Apache-2.0
|
def _create_service_config(
self,
cluster: str,
instance: str,
config: utils.InstanceConfigDict,
config_class: Type[InstanceConfig_T],
) -> InstanceConfig_T:
"""Create a service instance's configuration for kubernetes.
:param cluster: The cluster to read the configuration for
:param instance: The instance of the service to retrieve
:param config: the framework instance config.
:returns: An instance of config_class
"""
merged_config = self._get_merged_config(config)
temp_instance_config = config_class(
service=self._service,
cluster=cluster,
instance=instance,
config_dict=merged_config,
branch_dict=None,
soa_dir=self._soa_dir,
)
branch_dict = self._get_branch_dict(cluster, instance, temp_instance_config)
return config_class(
service=self._service,
cluster=cluster,
instance=instance,
config_dict=merged_config,
branch_dict=branch_dict,
soa_dir=self._soa_dir,
)
|
Create a service instance's configuration for kubernetes.
:param cluster: The cluster to read the configuration for
:param instance: The instance of the service to retrieve
:param config: the framework instance config.
:returns: An instance of config_class
|
_create_service_config
|
python
|
Yelp/paasta
|
paasta_tools/paasta_service_config_loader.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/paasta_service_config_loader.py
|
Apache-2.0
|
def _make_determine_wants_func(ref_mutator):
"""Returns a safer version of ref_mutator, suitable for passing as the
determine_wants argument to dulwich's send_pack method. The returned
function will not delete or modify any existing refs."""
def determine_wants(old_refs):
refs = {k.decode("UTF-8"): v.decode("UTF-8") for k, v in old_refs.items()}
new_refs = ref_mutator(refs)
new_refs = {k.encode("UTF-8"): v.encode("UTF-8") for k, v in new_refs.items()}
new_refs.update(old_refs) # Make sure we don't delete/modify anything.
return new_refs
return determine_wants
|
Returns a safer version of ref_mutator, suitable for passing as the
determine_wants argument to dulwich's send_pack method. The returned
function will not delete or modify any existing refs.
|
_make_determine_wants_func
|
python
|
Yelp/paasta
|
paasta_tools/remote_git.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/remote_git.py
|
Apache-2.0
|
def make_force_push_mutate_refs_func(targets, sha):
"""Create a 'force push' function that will inform send_pack that we want
to mark a certain list of target branches/tags to point to a particular
git_sha.
:param targets: List of branches/tags to point at the input sha
:param sha: The git sha to point the branches/tags at
:returns: A function to do the ref manipulation that a dulwich client can use"""
def mutate_refs(refs):
for target in targets:
refs[target.encode("UTF-8")] = sha.encode("UTF-8")
return refs
return mutate_refs
|
Create a 'force push' function that will inform send_pack that we want
to mark a certain list of target branches/tags to point to a particular
git_sha.
:param targets: List of branches/tags to point at the input sha
:param sha: The git sha to point the branches/tags at
:returns: A function to do the ref manipulation that a dulwich client can use
|
make_force_push_mutate_refs_func
|
python
|
Yelp/paasta
|
paasta_tools/remote_git.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/remote_git.py
|
Apache-2.0
|
def create_remote_refs(git_url, ref_mutator, force=False):
"""Creates refs (tags, branches) on a remote git repo.
:param git_url: the URL or path to the remote git repo.
:param ref_mutator: A function that determines the new refs to create on
the remote repo. This gets passed a dictionary of the
remote server's refs in the format {name : hash, ...},
and should return a dictionary of the same format.
:param force: Bool, defaults to false. If true we will overwrite
refs even if they are already set.
:returns: The map of refs, with our changes applied.
"""
client, path = dulwich.client.get_transport_and_path(git_url)
if force is False:
determine_wants = _make_determine_wants_func(ref_mutator)
else:
determine_wants = ref_mutator
# We know we don't need to push any objects.
def generate_pack_contents(have, want):
return []
return client.send_pack(path, determine_wants, generate_pack_contents)
|
Creates refs (tags, branches) on a remote git repo.
:param git_url: the URL or path to the remote git repo.
:param ref_mutator: A function that determines the new refs to create on
the remote repo. This gets passed a dictionary of the
remote server's refs in the format {name : hash, ...},
and should return a dictionary of the same format.
:param force: Bool, defaults to false. If true we will overwrite
refs even if they are already set.
:returns: The map of refs, with our changes applied.
|
create_remote_refs
|
python
|
Yelp/paasta
|
paasta_tools/remote_git.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/remote_git.py
|
Apache-2.0
|
def list_remote_refs(git_url):
"""Get the refs from a remote git repo as a dictionary of name->hash."""
client, path = dulwich.client.get_transport_and_path(git_url)
try:
refs = client.fetch_pack(path, lambda refs: [], None, lambda data: None)
return {k.decode("UTF-8"): v.decode("UTF-8") for k, v in refs.items()}
except dulwich.errors.HangupException as e:
raise LSRemoteException(f"Unable to fetch remote refs from {git_url}: {e}")
|
Get the refs from a remote git repo as a dictionary of name->hash.
|
list_remote_refs
|
python
|
Yelp/paasta
|
paasta_tools/remote_git.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/remote_git.py
|
Apache-2.0
|
def get_authors(git_url, from_sha, to_sha):
"""Gets the list of authors who contributed to a git changeset.
Currently only supports fetching this in a very "yelpy" way by
executing a gitolite command"""
matches = re.match("(?P<git_server>.*):(?P<git_repo>.*)", git_url)
if matches is None:
return (1, f"could not understand the git url {git_url} for authors detection")
git_server = matches.group("git_server")
git_repo = matches.group("git_repo")
if git_server is None:
return (
1,
f"could not understand the git server in {git_url} for authors detection",
)
if git_repo is None:
return (
1,
f"could not understand the git repo in {git_url} for authors detection",
)
if "git.yelpcorp.com" in git_server:
ssh_command = (
f"ssh {git_server} authors-of-changeset {git_repo} {from_sha} {to_sha}"
)
return _run(command=ssh_command, timeout=5.0)
else:
# TODO: PAASTA-16927: support getting authors for services on GHE
return 1, f"Fetching authors not supported for {git_server}"
|
Gets the list of authors who contributed to a git changeset.
Currently only supports fetching this in a very "yelpy" way by
executing a gitolite command
|
get_authors
|
python
|
Yelp/paasta
|
paasta_tools/remote_git.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/remote_git.py
|
Apache-2.0
|
def is_shared_secret_from_secret_name(soa_dir: str, secret_name: str) -> bool:
"""Alternative way of figuring if a secret is shared, directly from the secret_name."""
secret_path = os.path.join(
soa_dir, SHARED_SECRET_SERVICE, "secrets", f"{secret_name}.json"
)
return os.path.isfile(secret_path)
|
Alternative way of figuring if a secret is shared, directly from the secret_name.
|
is_shared_secret_from_secret_name
|
python
|
Yelp/paasta
|
paasta_tools/secret_tools.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/secret_tools.py
|
Apache-2.0
|
def get_hpa_overrides(kube_client: KubeClient) -> Dict[str, Dict[str, HpaOverride]]:
"""
Load autoscaling overrides from the ConfigMap once.
This function reads the "paasta-autoscaling-overrides" ConfigMap in the "paasta" namespace
and extracts all valid (non-expired) overrides to return a dictionary mapping
service.instance pairs to override data (currently, just min_instances and when the
override should expire by).
The incoming ConfigMap is expected to have the following format:
{
$SERVICE_A.$INSTANCE_A: {
"min_instances": 2,
"expire_after": "2023-10-01T00:00:00Z"
},
$SERVICE_A.$INSTANCE_B: {
"min_instances": 3,
"expire_after": "2023-10-01T00:00:00Z"
},
...
},
$SERVICE_B.$INSTANCE_A: {
"min_instances": 1,
"expire_after": "2023-10-01T00:00:00Z"
},
$SERVICE_B.$INSTANCE_B: {
"min_instances": 2,
"expire_after": "2023-10-01T00:00:00Z"
},
...
}
"""
overrides: Dict[str, Dict[str, HpaOverride]] = {}
try:
configmap = get_namespaced_configmap(
name=AUTOSCALING_OVERRIDES_CONFIGMAP_NAME,
namespace=AUTOSCALING_OVERRIDES_CONFIGMAP_NAMESPACE,
kube_client=kube_client,
)
if configmap and configmap.data:
current_time = time.time()
for service_instance, override_json in configmap.data.items():
try:
service, instance = service_instance.split(".")
override_metadata = json.loads(override_json)
expire_after = override_metadata.get("expire_after")
min_instances = override_metadata.get("min_instances")
if expire_after and min_instances:
if current_time < expire_after:
if service not in overrides:
overrides[service] = {}
overrides[service][instance] = {
"min_instances": min_instances,
"expire_after": expire_after,
}
log.info(
f"Found valid HPA override for {service}: "
f"{override_metadata.get('min_instances')} (expires at {expire_after})"
)
else:
log.info(
f"Ignoring expired HPA override for {service}.{instance}"
f"(expired at {expire_after})"
)
else:
log.warning(
f"Invalid HPA override for {service}.{instance}: "
f"missing 'min_instances' or 'expire_after': {override_metadata}"
)
except Exception:
log.exception(
f"Error parsing override for {service} - proceeding without overrides for this service."
)
except Exception:
# If ConfigMap doesn't exist or there's an error, just return empty dict
log.exception(
f"Unable to load the {AUTOSCALING_OVERRIDES_CONFIGMAP_NAME} ConfigMap - proceeding without overrides"
)
return overrides
|
Load autoscaling overrides from the ConfigMap once.
This function reads the "paasta-autoscaling-overrides" ConfigMap in the "paasta" namespace
and extracts all valid (non-expired) overrides to return a dictionary mapping
service.instance pairs to override data (currently, just min_instances and when the
override should expire by).
The incoming ConfigMap is expected to have the following format:
{
$SERVICE_A.$INSTANCE_A: {
"min_instances": 2,
"expire_after": "2023-10-01T00:00:00Z"
},
$SERVICE_A.$INSTANCE_B: {
"min_instances": 3,
"expire_after": "2023-10-01T00:00:00Z"
},
...
},
$SERVICE_B.$INSTANCE_A: {
"min_instances": 1,
"expire_after": "2023-10-01T00:00:00Z"
},
$SERVICE_B.$INSTANCE_B: {
"min_instances": 2,
"expire_after": "2023-10-01T00:00:00Z"
},
...
}
|
get_hpa_overrides
|
python
|
Yelp/paasta
|
paasta_tools/setup_kubernetes_job.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/setup_kubernetes_job.py
|
Apache-2.0
|
def _minify_promql(query: str) -> str:
"""
Given a PromQL query, return the same query with most whitespace collapsed.
This is useful for allowing us to nicely format queries in code, but minimize the size of our
queries when they're actually sent to Prometheus by the adapter.
"""
trimmed_query = []
# while we could potentially do some regex magic, we want to ensure
# that we don't mess up any labels (even though they really shouldn't
# have any whitespace in them in the first place) - thus we just just
# strip any leading/trailing whitespace and leave everything else alone
for line in query.split("\n"):
trimmed_query.append(line.strip())
return (" ".join(trimmed_query)).strip()
|
Given a PromQL query, return the same query with most whitespace collapsed.
This is useful for allowing us to nicely format queries in code, but minimize the size of our
queries when they're actually sent to Prometheus by the adapter.
|
_minify_promql
|
python
|
Yelp/paasta
|
paasta_tools/setup_prometheus_adapter_config.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/setup_prometheus_adapter_config.py
|
Apache-2.0
|
def create_instance_active_requests_scaling_rule(
service: str,
instance_config: KubernetesDeploymentConfig,
metrics_provider_config: MetricsProviderDict,
paasta_cluster: str,
) -> PrometheusAdapterRule:
"""
Creates a Prometheus adapter rule config for a given service instance.
"""
instance = instance_config.instance
namespace = instance_config.get_namespace()
desired_active_requests_per_replica = metrics_provider_config.get(
"desired_active_requests_per_replica",
DEFAULT_DESIRED_ACTIVE_REQUESTS_PER_REPLICA,
)
moving_average_window = metrics_provider_config.get(
"moving_average_window_seconds",
DEFAULT_ACTIVE_REQUESTS_AUTOSCALING_MOVING_AVERAGE_WINDOW,
)
deployment_name = get_kubernetes_app_name(service=service, instance=instance)
# In order for autoscaling to work safely while a service migrates from one namespace to another, the HPA needs to
# make sure that the deployment in the new namespace is scaled up enough to handle _all_ the load.
# This is because once the new deployment is 100% healthy, cleanup_kubernetes_job will delete the deployment out of
# the old namespace all at once, suddenly putting all the load onto the deployment in the new namespace.
# To ensure this, we must:
# - DO NOT filter on namespace in worker_filter_terms (which is used when calculating desired_instances).
# - DO filter on namespace in replica_filter_terms (which is used to calculate current_replicas).
# This makes sure that desired_instances includes load from all namespaces, but that the scaling ratio calculated
# by (desired_instances / current_replicas) is meaningful for each namespace.
worker_filter_terms = f"paasta_cluster='{paasta_cluster}',paasta_service='{service}',paasta_instance='{instance}'"
replica_filter_terms = f"paasta_cluster='{paasta_cluster}',deployment='{deployment_name}',namespace='{namespace}'"
current_replicas = f"""
sum(
label_join(
(
kube_deployment_spec_replicas{{{replica_filter_terms}}} >= 0
or
max_over_time(
kube_deployment_spec_replicas{{{replica_filter_terms}}}[{DEFAULT_EXTRAPOLATION_TIME}s]
)
),
"kube_deployment", "", "deployment"
)
) by (kube_deployment)
"""
# Envoy tracks metrics at the smartstack namespace level. In most cases the paasta instance name matches the smartstack namespace.
# In rare cases, there are custom registration added to instance configs.
# If there is no custom registration the envoy and instance names match and no need to update the worker_filter_terms.
# If there is a single custom registration for an instance, we will process the registration value and extract the value to be used.
# The registrations usually follow the format of {service_name}.{smartstack_name}. Hence we split the string by dot and extract the last token.
# More than one custom registrations are not supported and config validation takes care of rejecting such configs.
registrations = instance_config.get_registrations()
mesh_instance = registrations[0].split(".")[-1] if len(registrations) == 1 else None
envoy_filter_terms = f"paasta_cluster='{paasta_cluster}',paasta_service='{service}',paasta_instance='{mesh_instance or instance}'"
# envoy-based metrics have no labels corresponding to the k8s resources that they
# front, but we can trivially add one in since our deployment names are of the form
# {service_name}-{instance_name} - which are both things in `worker_filter_terms` so
# it's safe to unconditionally add.
# This is necessary as otherwise the HPA/prometheus adapter does not know what these
# metrics are for.
total_load = f"""
(
sum(
label_replace(
paasta_instance:envoy_cluster__egress_cluster_upstream_rq_active{{{envoy_filter_terms}}},
"kube_deployment", "{deployment_name}", "", ""
)
) by (kube_deployment)
)
"""
desired_instances_at_each_point_in_time = f"""
{total_load} / {desired_active_requests_per_replica}
"""
desired_instances = f"""
avg_over_time(
(
{desired_instances_at_each_point_in_time}
)[{moving_average_window}s:]
)
"""
# The prometheus HPA adapter needs kube_deployment and kube_namespace labels attached to the metrics its scaling on.
# The envoy-based metrics have no labels corresponding to the k8s resources, so we can add them in.
metrics_query = f"""
label_replace(
label_replace(
{desired_instances} / {current_replicas},
"kube_deployment", "{deployment_name}", "", ""
),
"kube_namespace", "{namespace}", "", ""
)
"""
series_query = f"""
k8s:deployment:pods_status_ready{{{worker_filter_terms}}}
"""
metric_name = f"{deployment_name}-active-requests-prom"
return {
"name": {"as": metric_name},
"seriesQuery": _minify_promql(series_query),
"resources": {"template": "kube_<<.Resource>>"},
"metricsQuery": _minify_promql(metrics_query),
}
|
Creates a Prometheus adapter rule config for a given service instance.
|
create_instance_active_requests_scaling_rule
|
python
|
Yelp/paasta
|
paasta_tools/setup_prometheus_adapter_config.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/setup_prometheus_adapter_config.py
|
Apache-2.0
|
def get_rules_for_service_instance(
service_name: str,
instance_config: KubernetesDeploymentConfig,
paasta_cluster: str,
) -> List[PrometheusAdapterRule]:
"""
Returns a list of Prometheus Adapter rules for a given service instance. For now, this
will always be a 0 or 1-element list - but when we support scaling on multiple metrics
we will return N rules for a given service instance.
"""
rules: List[PrometheusAdapterRule] = []
for metrics_provider_type in ALL_METRICS_PROVIDERS:
metrics_provider_config = instance_config.get_autoscaling_metrics_provider(
metrics_provider_type
)
if metrics_provider_config is None:
log.debug(
f"Skipping {service_name}.{instance_config.instance} - no Prometheus-based autoscaling configured for {metrics_provider_type}"
)
continue
rule = create_instance_scaling_rule(
service=service_name,
instance_config=instance_config,
metrics_provider_config=metrics_provider_config,
paasta_cluster=paasta_cluster,
)
if rule is not None:
rules.append(rule)
return rules
|
Returns a list of Prometheus Adapter rules for a given service instance. For now, this
will always be a 0 or 1-element list - but when we support scaling on multiple metrics
we will return N rules for a given service instance.
|
get_rules_for_service_instance
|
python
|
Yelp/paasta
|
paasta_tools/setup_prometheus_adapter_config.py
|
https://github.com/Yelp/paasta/blob/master/paasta_tools/setup_prometheus_adapter_config.py
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.