Skip to content

Commit

Permalink
Add support for interim Ubuntu releases
Browse files Browse the repository at this point in the history
- update bundles to include UCA pocket tests
- update test configuration
- update metadata to include kinetic and lunar
- update snapcraft to allow run-on for kinetic and lunar
- sync charm-helpers

Change-Id: Id8bbaf1402935b88f14ecd6f736697694449e417
  • Loading branch information
ChrisMacNaughton authored and lmlg committed May 4, 2023
1 parent 96a6dae commit 2bce537
Show file tree
Hide file tree
Showing 33 changed files with 1,064 additions and 69 deletions.
1 change: 1 addition & 0 deletions .zuul.yaml
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
- project:
templates:
- openstack-python3-charm-zed-jobs
- openstack-python3-charm-jobs
6 changes: 6 additions & 0 deletions charmcraft.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -30,3 +30,9 @@ bases:
- name: ubuntu
channel: "22.04"
architectures: [amd64, s390x, ppc64el, arm64]
- name: ubuntu
channel: "22.10"
architectures: [amd64, s390x, ppc64el, arm64]
- name: ubuntu
channel: "23.04"
architectures: [amd64, s390x, ppc64el, arm64]
16 changes: 16 additions & 0 deletions hooks/charmhelpers/contrib/charmsupport/nrpe.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

import glob
import grp
import json
import os
import pwd
import re
Expand All @@ -30,6 +31,7 @@
from charmhelpers.core.hookenv import (
application_name,
config,
ERROR,
hook_name,
local_unit,
log,
Expand Down Expand Up @@ -416,6 +418,20 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True):
:param str unit_name: Unit name to use in check description
:param bool immediate_check: For sysv init, run the service check immediately
"""
# check_haproxy is redundant in the presence of check_crm. See LP Bug#1880601 for details.
# just remove check_haproxy if haproxy is added as a lsb resource in hacluster.
for rid in relation_ids("ha"):
ha_resources = relation_get("json_resources", rid=rid, unit=local_unit())
if ha_resources:
try:
ha_resources_parsed = json.loads(ha_resources)
except ValueError as e:
log('Could not parse JSON from ha resources. {}'.format(e), level=ERROR)
raise
if "lsb:haproxy" in ha_resources_parsed.values():
if "haproxy" in services:
log("removed check_haproxy. This service will be monitored by check_crm")
services.remove("haproxy")
for svc in services:
# Don't add a check for these services from neutron-gateway
if svc in ['ext-port', 'os-charm-phy-nic-mtu']:
Expand Down
2 changes: 1 addition & 1 deletion hooks/charmhelpers/contrib/hahelpers/cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -324,7 +324,7 @@ def valid_hacluster_config():
'''
vip = config_get('vip')
dns = config_get('dns-ha')
if not(bool(vip) ^ bool(dns)):
if not (bool(vip) ^ bool(dns)):
msg = ('HA: Either vip or dns-ha must be set but not both in order to '
'use high availability')
status_set('blocked', msg)
Expand Down
2 changes: 1 addition & 1 deletion hooks/charmhelpers/contrib/network/ip.py
Original file line number Diff line number Diff line change
Expand Up @@ -539,7 +539,7 @@ def port_has_listener(address, port):
"""
cmd = ['nc', '-z', address, str(port)]
result = subprocess.call(cmd)
return not(bool(result))
return not (bool(result))


def assert_charm_supports_ipv6():
Expand Down
149 changes: 117 additions & 32 deletions hooks/charmhelpers/contrib/openstack/context.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
import time

from base64 import b64decode
from distutils.version import LooseVersion
from subprocess import (
check_call,
check_output,
Expand All @@ -39,6 +40,7 @@
from charmhelpers.fetch import (
apt_install,
filter_installed_packages,
get_installed_version,
)
from charmhelpers.core.hookenv import (
NoNetworkBinding,
Expand All @@ -59,6 +61,7 @@
network_get_primary_address,
WARNING,
service_name,
remote_service_name,
)

from charmhelpers.core.sysctl import create as sysctl_create
Expand Down Expand Up @@ -130,6 +133,7 @@
ADDRESS_TYPES = ['admin', 'internal', 'public']
HAPROXY_RUN_DIR = '/var/run/haproxy/'
DEFAULT_OSLO_MESSAGING_DRIVER = "messagingv2"
DEFAULT_HAPROXY_EXPORTER_STATS_PORT = 8404


def ensure_packages(packages):
Expand Down Expand Up @@ -345,6 +349,14 @@ def db_ssl(rdata, ctxt, ssl_dir):

class IdentityServiceContext(OSContextGenerator):

_forward_compat_remaps = {
'admin_user': 'admin-user-name',
'service_username': 'service-user-name',
'service_tenant': 'service-project-name',
'service_tenant_id': 'service-project-id',
'service_domain': 'service-domain-name',
}

def __init__(self,
service=None,
service_user=None,
Expand Down Expand Up @@ -397,23 +409,34 @@ def _get_keystone_authtoken_ctxt(self, ctxt, keystonemiddleware_os_rel):
# 'www_authenticate_uri' replaced 'auth_uri' since Stein,
# see keystonemiddleware upstream sources for more info
if CompareOpenStackReleases(keystonemiddleware_os_rel) >= 'stein':
c.update((
('www_authenticate_uri', "{}://{}:{}/v3".format(
ctxt.get('service_protocol', ''),
ctxt.get('service_host', ''),
ctxt.get('service_port', ''))),))
if 'public_auth_url' in ctxt:
c.update((
('www_authenticate_uri', '{}/v3'.format(
ctxt.get('public_auth_url'))),))
else:
c.update((
('www_authenticate_uri', "{}://{}:{}/v3".format(
ctxt.get('service_protocol', ''),
ctxt.get('service_host', ''),
ctxt.get('service_port', ''))),))
else:
c.update((
('auth_uri', "{}://{}:{}/v3".format(
ctxt.get('service_protocol', ''),
ctxt.get('service_host', ''),
ctxt.get('service_port', ''))),))

if 'internal_auth_url' in ctxt:
c.update((
('auth_url', ctxt.get('internal_auth_url')),))
else:
c.update((
('auth_url', "{}://{}:{}/v3".format(
ctxt.get('auth_protocol', ''),
ctxt.get('auth_host', ''),
ctxt.get('auth_port', ''))),))

c.update((
('auth_url', "{}://{}:{}/v3".format(
ctxt.get('auth_protocol', ''),
ctxt.get('auth_host', ''),
ctxt.get('auth_port', ''))),
('project_domain_name', ctxt.get('admin_domain_name', '')),
('user_domain_name', ctxt.get('admin_domain_name', '')),
('project_name', ctxt.get('admin_tenant_name', '')),
Expand Down Expand Up @@ -441,39 +464,86 @@ def __call__(self):
for rid in relation_ids(self.rel_name):
self.related = True
for unit in related_units(rid):
rdata = {}
# NOTE(jamespage):
# forwards compat with application data
# bag driven approach to relation.
_adata = relation_get(rid=rid, app=remote_service_name(rid))
adata = {}
# if no app data bag presented - fallback
# to legacy unit based relation data
rdata = relation_get(rid=rid, unit=unit)
serv_host = rdata.get('service_host')
if _adata:
# New app data bag uses - instead of _
# in key names - remap for compat with
# existing relation data keys
for key, value in _adata.items():
if key == 'api-version':
adata[key.replace('-', '_')] = value.strip('v')
else:
adata[key.replace('-', '_')] = value
# Re-map some keys for backwards compatibility
for target, source in self._forward_compat_remaps.items():
adata[target] = _adata.get(source)
# Now preferentially get data from the app data bag, but if
# it's not available, get it from the legacy based relation
# data.

def _resolve(key):
return adata.get(key) or rdata.get(key)

serv_host = _resolve('service_host')
serv_host = format_ipv6_addr(serv_host) or serv_host
auth_host = rdata.get('auth_host')
auth_host = _resolve('auth_host')
auth_host = format_ipv6_addr(auth_host) or auth_host
int_host = rdata.get('internal_host')
int_host = _resolve('internal_host',)
int_host = format_ipv6_addr(int_host) or int_host
svc_protocol = rdata.get('service_protocol') or 'http'
auth_protocol = rdata.get('auth_protocol') or 'http'
int_protocol = rdata.get('internal_protocol') or 'http'
api_version = rdata.get('api_version') or '2.0'
ctxt.update({'service_port': rdata.get('service_port'),
svc_protocol = _resolve('service_protocol') or 'http'
auth_protocol = _resolve('auth_protocol') or 'http'
admin_role = _resolve('admin_role') or 'Admin'
int_protocol = _resolve('internal_protocol') or 'http'
api_version = _resolve('api_version') or '2.0'
ctxt.update({'service_port': _resolve('service_port'),
'service_host': serv_host,
'auth_host': auth_host,
'auth_port': rdata.get('auth_port'),
'auth_port': _resolve('auth_port'),
'internal_host': int_host,
'internal_port': rdata.get('internal_port'),
'admin_tenant_name': rdata.get('service_tenant'),
'admin_user': rdata.get('service_username'),
'admin_password': rdata.get('service_password'),
'internal_port': _resolve('internal_port'),
'admin_tenant_name': _resolve('service_tenant'),
'admin_user': _resolve('service_username'),
'admin_password': _resolve('service_password'),
'admin_role': admin_role,
'service_protocol': svc_protocol,
'auth_protocol': auth_protocol,
'internal_protocol': int_protocol,
'api_version': api_version})

if rdata.get('service_type'):
ctxt['service_type'] = rdata.get('service_type')
service_type = _resolve('service_type')
if service_type:
ctxt['service_type'] = service_type

if float(api_version) > 2:
ctxt.update({
'admin_domain_name': rdata.get('service_domain'),
'service_project_id': rdata.get('service_tenant_id'),
'service_domain_id': rdata.get('service_domain_id')})
'admin_domain_name': _resolve('service_domain'),
'service_project_id': _resolve('service_tenant_id'),
'service_domain_id': _resolve('service_domain_id')})

# NOTE:
# keystone-k8s operator presents full URLS
# for all three endpoints - public and internal are
# externally addressable for machine based charm
public_auth_url = _resolve('public_auth_url')
# if 'public_auth_url' in rdata:
if public_auth_url:
ctxt.update({
'public_auth_url': public_auth_url,
})
internal_auth_url = _resolve('internal_auth_url')
# if 'internal_auth_url' in rdata:
if internal_auth_url:
ctxt.update({
'internal_auth_url': internal_auth_url,
})

# we keep all veriables in ctxt for compatibility and
# add nested dictionary for keystone_authtoken generic
Expand All @@ -487,8 +557,8 @@ def __call__(self):
# NOTE(jamespage) this is required for >= icehouse
# so a missing value just indicates keystone needs
# upgrading
ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
ctxt['admin_domain_id'] = rdata.get('service_domain_id')
ctxt['admin_tenant_id'] = _resolve('service_tenant_id')
ctxt['admin_domain_id'] = _resolve('service_domain_id')
return ctxt

return {}
Expand Down Expand Up @@ -860,9 +930,14 @@ class HAProxyContext(OSContextGenerator):
interfaces = ['cluster']

def __init__(self, singlenode_mode=False,
address_types=ADDRESS_TYPES):
address_types=None,
exporter_stats_port=DEFAULT_HAPROXY_EXPORTER_STATS_PORT):
if address_types is None:
address_types = ADDRESS_TYPES[:]

self.address_types = address_types
self.singlenode_mode = singlenode_mode
self.exporter_stats_port = exporter_stats_port

def __call__(self):
if not os.path.isdir(HAPROXY_RUN_DIR):
Expand Down Expand Up @@ -957,10 +1032,20 @@ def __call__(self):
db = kv()
ctxt['stat_password'] = db.get('stat-password')
if not ctxt['stat_password']:
ctxt['stat_password'] = db.set('stat-password',
pwgen(32))
ctxt['stat_password'] = db.set('stat-password', pwgen(32))
db.flush()

# NOTE(rgildein): configure prometheus exporter for haproxy > 2.0.0
# New bind will be created and a prometheus-exporter
# will be used for path /metrics. At the same time,
# prometheus-exporter avoids using auth.
haproxy_version = get_installed_version("haproxy")
if (haproxy_version and
haproxy_version.ver_str >= LooseVersion("2.0.0") and
is_relation_made("haproxy-exporter")):
ctxt["stats_exporter_host"] = get_relation_ip("haproxy-exporter")
ctxt["stats_exporter_port"] = self.exporter_stats_port

for frontend in cluster_hosts:
if (len(cluster_hosts[frontend]['backends']) > 1 or
self.singlenode_mode):
Expand Down
4 changes: 3 additions & 1 deletion hooks/charmhelpers/contrib/openstack/deferred_events.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,9 @@ def deferred_events():
"""
events = []
for defer_file in deferred_events_files():
events.append((defer_file, read_event_file(defer_file)))
event = read_event_file(defer_file)
if event.policy_requestor_name == hookenv.service_name():
events.append((defer_file, event))
return events


Expand Down
29 changes: 29 additions & 0 deletions hooks/charmhelpers/contrib/openstack/ha/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@

import hashlib
import json
import os

import re

Expand All @@ -36,6 +37,7 @@
config,
status_set,
DEBUG,
application_name,
)

from charmhelpers.core.host import (
Expand Down Expand Up @@ -65,6 +67,7 @@

VIP_GROUP_NAME = 'grp_{service}_vips'
DNSHA_GROUP_NAME = 'grp_{service}_hostnames'
HAPROXY_DASHBOARD_RESOURCE = "haproxy-dashboard"


class DNSHAException(Exception):
Expand Down Expand Up @@ -346,3 +349,29 @@ def update_hacluster_vip(service, relation_data):
relation_data['groups'] = {
key: ' '.join(vip_group)
}


def render_grafana_dashboard(prometheus_app_name, haproxy_dashboard):
"""Load grafana dashboard json model and insert prometheus datasource.
:param prometheus_app_name: name of the 'prometheus' application that will
be used as datasource in grafana dashboard
:type prometheus_app_name: str
:param haproxy_dashboard: path to haproxy dashboard
:type haproxy_dashboard: str
:return: Grafana dashboard json model as a str.
:rtype: str
"""
from charmhelpers.contrib.templating import jinja

dashboard_template = os.path.basename(haproxy_dashboard)
dashboard_template_dir = os.path.dirname(haproxy_dashboard)
app_name = application_name()
datasource = "{} - Juju generated source".format(prometheus_app_name)
return jinja.render(dashboard_template,
{"datasource": datasource,
"app_name": app_name,
"prometheus_app_name": prometheus_app_name},
template_dir=dashboard_template_dir,
jinja_env_args={"variable_start_string": "<< ",
"variable_end_string": " >>"})
Loading

0 comments on commit 2bce537

Please sign in to comment.