diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..fce6fd3 --- /dev/null +++ b/.gitignore @@ -0,0 +1,41 @@ +# Compiled files +*.py[co] +*.a +*.o +*.so +__pycache__ + +# Sphinx +_build +doc/source/api/ + +# Packages/installer info +*.egg +*.egg-info +dist +build +eggs +parts +var +sdist +develop-eggs +.installed.cfg + +# Other +*.DS_Store +.stestr +.testrepository +.tox +.venv +.*.swp +.coverage +bandit.xml +cover +AUTHORS +ChangeLog +*.sqlite +**bin +**go-compose +*.log +coverage.* +venv/ diff --git a/.zuul.yaml b/.zuul.yaml new file mode 100644 index 0000000..f45c052 --- /dev/null +++ b/.zuul.yaml @@ -0,0 +1,196 @@ +--- +- project: + check: + jobs: + - openstack-tox-linters + - cstate-management-tox-py39 + - cstate-management-tox-pylint + - cstate-management-tox-flake8 + - cstate-management-tox-bandit + gate: + jobs: + - openstack-tox-linters + - cstate-management-tox-py39 + - cstate-management-tox-pylint + - cstate-management-tox-flake8 + - cstate-management-tox-bandit + post: + jobs: + - stx-app-cstate-management-upload-git-mirror + +# Application Jobs +- job: + name: cstate-management-tox-py39 + parent: tox-py39 + description: | + Run py39 test for cstate-management + nodeset: debian-bullseye + required-projects: + - starlingx/config + - starlingx/fault + - starlingx/update + - starlingx/utilities + files: + - cstate-management/* + vars: + tox_envlist: py39 + python_version: 3.9 + tox_extra_args: -c cstate-management/docker/cstate-management/tox.ini + +- job: + name: cstate-management-tox-pylint + parent: tox + description: | + Run pylint test for cstate-management + nodeset: debian-bullseye + required-projects: + - starlingx/config + - starlingx/fault + - starlingx/update + - starlingx/utilities + files: + - cstate-management/* + vars: + tox_envlist: pylint + tox_extra_args: -c cstate-management/docker/cstate-management/tox.ini + +- job: + name: cstate-management-tox-flake8 + parent: tox + description: | + Run flake8 test for cstate_management + nodeset: debian-bullseye + required-projects: + - starlingx/config + - starlingx/fault + - starlingx/update + - starlingx/utilities + - starlingx/root + files: + - cstate-management/* + vars: + tox_envlist: flake8 + tox_extra_args: -c cstate-management/docker/cstate-management/tox.ini + tox_constraints_file: '{{ ansible_user_dir }}/src/opendev.org/starlingx/root/build-tools/requirements/debian/upper-constraints.txt' + +- job: + name: cstate-management-tox-metadata + parent: tox + description: | + Run metadata test for cstate_management + nodeset: debian-bullseye + required-projects: + - starlingx/config + - starlingx/fault + - starlingx/update + - starlingx/utilities + - starlingx/root + vars: + tox_envlist: metadata + tox_extra_args: -c cstate-management/docker/cstate-management/tox.ini + tox_constraints_file: '{{ ansible_user_dir }}/src/opendev.org/starlingx/root/build-tools/requirements/debian/upper-constraints.txt' + +- job: + name: cstate-management-tox-bandit + parent: tox + description: | + Run bandit test for cstate-management + nodeset: debian-bullseye + files: + - cstate-management/* + vars: + tox_envlist: bandit + tox_extra_args: -c cstate-management/docker/cstate-management/tox.ini + +- job: + name: stx-app-cstate-management-upload-git-mirror + parent: upload-git-mirror + description: > + Mirrors opendev.org/starlingx/app-cstate to + github.com/starlingx/app-cstate + vars: + git_mirror_repository: starlingx/app-cstate + secrets: + - name: git_mirror_credentials + secret: stx-app-cstate-management-github-secret + pass-to-parent: true + +- secret: + name: stx-app-cstate-management-github-secret + data: + user: git + host: github.com + # yamllint disable-line rule:line-length + host_key: github.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk= + ssh_key: !encrypted/pkcs1-oaep + - dfihV1DxNAwL8LV1NWGyqaaQAgnTbYaDqQ68konrTjFuuexZkE//6Qlhpojpm3lrQW4MD + leP13umtdYuKmHj8BRg8s+725zN/YTqJjNTC75Nca87ujThGITsLlnJRLc6KoE6rzZQus + XX9ffQIVEpCdF56PCmtOjFmSLNgCH4PVe4wyZLyZc8Ltd3BnJ7Wem6V+cUnhzLtO0u197 + Y49jjQpD9qIkc6ws9Wvk2qbQX+XWHm0VtntJoE3v9P268QMmBUEvOuI4DNTjEb58jxNrR + xH4w00ehPTsmWwVIYtoAo4F32rGtXw0wUhO2QacE9EtMORLVZu2O+OXydSWtm/gCa7W1E + EGh4H0l/pYMTgV181fWr9CwMG9y6KLR2N0fR6nuIbu7FjY1YeN0pSGr7jGrLhKh45NJcu + dnvPBn0o7acSJ/zzYkU2z5xPgD2DeB4UwLa2uQdxdt/LjGEXzc4SMSnaGb1CdUxSHjqN6 + achudWZF3E43+YSvTkxeoMSkzNXJuh8fqGErYHP8hefX+/VKAlwWDeTOcJum+iqSPaHt7 + EMCGQ54DQAd7mQAQMo9WsgnqeCb5zuJSYcqMOcRTRw4zGCxIu5urauVz9X0WLCO0m2CUf + N8qlbr3/YYf4/O1Y8gEoBIvlRpEwG0kS8jrELmGFLFThoRITyPNWrJ3KokP0Mg= + - fm/uF6A7eMbub7qUlk+Kil9vHRF/qp8fRdIaYOsC3tABRC78sfUEhUB65PXLG7zyBWuSp + /tJceVwKMJiRujXiEBj24kTKiF8Vtdli8dRnN2C/Zq0NAWTJZQJ20hpmxeByQwTcoF7KI + q/lSGOD9ZHTY32b1Q+tFWN3auoPKCFCykpTgefsWjK4apYqRWFchwUInc2v0UBBmhb8VS + P8Z8eVE62C48H34EkhsprsUBIJSGxhh9tIJSO9LsF1RZ6fWLtBzvYPTqTjwGPnNlJzO4T + r7NWZpfcMcHd5YkHcriyncynzcz6yHlz2EXOmiCmPI0Ziv0OsDb8ZtnCYOvgmOwLM1cIC + S1CkMx7OIiX7vGxoRfV7hTHZHrSLvII4wbFqqGHpGqGJbL0c8+fgOiotRtYokZ4X5Yba7 + T2+b1WTtb8wP68BN3rHQ4Gr6cOMFnWsO39EckkzNl7ZHIgZrO6iJKCUGxnz8QVA/7ceBu + vNnc9lezn9NFrwb/JNeGiLKAM2hlePc1VFXjISw7ARuLbW6gb2vj8PxXpMI71k6ApJx5Y + cEXVBtNvuiiwiyvomwXOnxh8skS6FY2ST/Oe1GZe6LJLlCVkQ/sXBFVcWPJBqUXMLZfdI + 8D9F37INapNVwj3Fq8661BTTleEVoyUpQ5P7bR4pwH0qOapHQDw4fZJ3mI+VIg= + - NSn6lMdY84JuY2Cg19qdNJS//6BKBbYwCscQaOnq4nQkopNd2GRCgc/eS4/zs/O/YsPko + aM39DUd77EF2yIZ3Hw9JQ/VNCUId4TUm7Wzg02JDlWC8YdTCoDZvItIx9qUU9mEJBsNI/ + z+BcSbt9LPvvBycZcqSmS/FVsy44YGGANw5N6n4GRxAQDt0JVsJuTkM92gBrjTySy42+Z + uC0JcWc2GpMNYffTDTMjHvkuMJvpDJkLxWNyqdjFswDgfSG3o2TDO1li+u6NZpWa5SP1N + szWbCUTxa+UFbiaM3Js1nRWY0gZUrFRqG/WqcLibNRJXO80mvBHdR22YMMbiElVXrvhfB + qdHsljKoF2QZSRuROLiDtySaHm98U2vCNNy3KECA7kJcK+at4X6bs7sDz1EbvvSVw4lgd + EJJ9vwz8CRPYeJqJG+9jUNpMkXzUnyHGo4nH3ebmvOTz4B0LBa0QOm0BoinVG6Y2+xtwY + y1mUp+r3G3BvZ8L8LYR+t+84WeZuqM1ya0rzg7PjzX3drK36aR8mpHtsi3p6AINu62u0e + Aio7TraY2vxAfGG4Ke04327shFSohWVixC1culGtJz2lfLFXRyxvMaEK62FnffPzkJCdQ + JGhO7ynyY6veveRl8cRYYY+HFJVzj+LwFR4Ghmhl4w+HWxJOg6Cc+xL6+eVpic= + - C1tmuvayO6FcpWfCNGiDbUKouAwMXhsuu1dXAitdO3ezafXhAjAzVom3lRBNvEkallgMG + wPFs/LK6GIrVShUGj1cjzWzFiIvVCA29VEyHMNFMB5OPQh91Y7Tck6HorTN3ytCoL1BEi + /3BlYNx8R40u4XzAKwfeyv8bQTLRUxU0B1cB8XV5gypGDMFje1dbZL1Kep8TfrQsCGUlS + rCWLO+9tPH+4Ay+p48oSDJ7Zkf4Bg9NJBzyk7eaM/d/pLkC9ANw6KEL0ObKZdR+JnNE0j + xE/rcOAk8gg7bCJe6EQ2odpVRBx7PCmI4y+Xit3c7MJAaVaTH4BiYLkV4KLjtZtloLqvH + xXGTuQmllzjywqn7ChDPwQc0FNo/twDNp3qe/9pLQE4uJXOWxqCbeOpP+gNFMiyaKaQRn + puucS2v2MHuIq9Sp9R6cJ5xdgsHROJjak52K76K6j/ien3s7/GkcmMxhfKVzndtPCZxaL + gkjlvFCSwcGU5uXaojQhI9+u0i+vb/emCLV0v42Iy3VDsq7A6MCDwpZ2o0C+R1l51gx7a + mi0x7iozJTUq5uldcPAWY8oB6Bqb5hJbZ2VRnW8uX0/wM4gJeTRWOCleLvDbb/IGX2jXM + zEc9+tvQ5Ya/mPytU+9L8fhBaZk8Lbc+cixxmhmkbFJTJFqms7F5WXbuEZKLLc= + - MfAiBheNKY4BDqLcUtZl5gWR07+UETexQtjI9JP11+V2zYr1ni0yASLcxUF1/WiYMfRzd + oH2fS+fKe6yFwGWQmqmMRiFMRkJnuGLpQGdY68/o9P2G+XmjGyP5SCbmw9zOUrwOdyJAP + 1+GocdyA64l7l0QyAEOQH1/BGL8mx6rziC7NolkT6NXyY4CHYIZt0ayHsgH7+6OS3DryZ + 3MEdeQ3TUwHDjRCG2S26H4fRHgXnYlSYuy6+GoPBYqL/D5Lw7DrnF7Ds4sRuGKvDXKXSK + eEAwMS+C+UYmxDTCg58g28T4NNklEPlOOv+qFuzJdS5dOixr9Fm6Ckp/58ZvJBkfu0DOc + qgeIOgwp4aMO7CZAmUFcHPNSsgb4NXmbQi3FA1JJDXZEkCfXdmNe/GvalGV0rJGMDnCyy + luvpv3FbecEBuNxmMpGoGBCfU0iJz0cjcwDWhhR63GcSgBuBZ7KGlvU1HzAEZ/ZwMyYNl + gI/2k7q/4qPjC9EwwQG31F2wZ1E9EmyarwXsl+4dzhdhFNk8XDvnNhEWFs2WJvu6ktver + MRfDnhcby1kzvC5McKQfvbEkzrUqpiOnuJufz9eThu4ZAZrsZfs6T2J1TOUYvH8K7DhU2 + 7spwLYCho6V7xe0bRyBPpyUwdGuyH1wkUrKC9VMt+Oq+Ed2YetaPaOWXcV46Zg= + - f2Ev0+e05Iag6Qhk+EFUGQ69KLHC74rkb5lhqBPu0nckuJjLaBR0colziLejZVPDHajpo + 49ZaUn9/PAtHNzhmJgJwV+yE8Uzy5dgEDeg/LNAcu/QoGNrFPVDvqbuL0SDz+UNnPb30F + nXUB8O3XaXSRyXMikpUqrB1HbGLlV3C0UoFQ9lPOqik93z8fyR+q+o+ruarrJhpwkvs3c + rR0H4NtLF1JaWv0xYVFtIn+bWQQD9kITKxJY+Kyz8bKgRwuPOd99O4m3/5ZySl0ZIjSio + Y9vJHv7EJLdjpu8FOHd1UiV9HJaRS+YxNIpHmvdqVvDufHPchpYmI0NiS+XI4lFePClDQ + MZ0V4bsMhmmDt9UnLf6Htgm/lQsvT5JtI/uaiMBtjU87Cy8L9Mi9uKcLUlVIOKpjxv4On + FkOFHlPQp4dCnntEaQ3bH63kiVVrHcwhomm9q7N2uTdVYxL4b1AuwQus28hQz8bMHlrKh + wWxEgs3gO+yPRrFosdrbkc71ULnMaNS3bBPz7n9mJfRMGGBY71dH/16OpYRoIl6r6gua6 + lwRBrLdmK5bqvh2iRgiTaZC5TjJeUOJBwF+h3IdZ2enpZ1110WelYOjvK/Vf+zXWu7Uhk + OdPMOwr3Mf72zO6YIKeEP+9r23b/aVyQ4xVIIHAgeBvJdUv2DoOilMZdV8nQfU= + - cxCyhnIHQemd9ZUaeACUwcScYDKuPYMATGF0sn/WyiAsH8fyXoTv604LgDTM4ZsMwK/z9 + v6/VV2I1X3oMiL3JvB02sL8PPOibMMqgchJmKwdU2FoYTpZxELJfMF9n7ZU3Xd6gSGXZa + xidA4O1vI9bDbbZNbb3cpBNzG44v6Rdr1/0Zpq4fy7Muf0/ypmZeot5wcFOoH0n0TVY8R + c65i3gDVJg9j+GxSOZirGbYxFq9S0o/bF+cVYW1yZPZPBwIoQ5ctJY45Bkbute0e2P+mL + MKw0FMrY0Lk0nwPu8+cCoe6+OW/cWTsU9hgvDGCntyOWoMwaWP2kO0S2W6tUuGw/2vlk3 + Hh6fg9sfVF3FznqIKGglgmI+VyDbqXI4ULet1pWHk8YIVfo2+Cb3hgxZmxcJKpHTqvvMw + VJlEpM+FUchJz5AKNf/UYs5o7SSKgBwpaQSKvjdjoeiGSUdJR3ahmuvN3BIQwgGFthYzs + TEUIrffT9PRVbGlWZ0owIEiVETuKaby47XI9cr6nwU3y3sSKuQq5LwLryGD2raYt4UYVA + 1qlG+VMaGzMLnkR022W3nXZJlJtm1MkFzXwGGFE6uTGg3PPKywzjuP36Y2Ews47bT1UgX + Iw8KSvs3NudtAWzz93UkhAn/H9vJkDc6Pf1VwqSZPlqm+yY5jNn5vGWPVw68yY= + diff --git a/bindep.txt b/bindep.txt new file mode 100644 index 0000000..3ffe69f --- /dev/null +++ b/bindep.txt @@ -0,0 +1,10 @@ +# This is a cross-platform list tracking distribution packages needed for install and tests; +# see https://docs.openstack.org/infra/bindep/ for additional information. + +libffi-dev [platform:dpkg] +libldap2-dev [platform:dpkg] +libxml2-dev [platform:dpkg] +libxslt1-dev [platform:dpkg] +libsasl2-dev [platform:dpkg] +libffi-devel [platform:rpm] +python3-all-dev [platform:dpkg] diff --git a/cstate-management/debian/.gitkeep b/cstate-management/debian/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/cstate-management/docker/cstate-management/.stestr.conf b/cstate-management/docker/cstate-management/.stestr.conf new file mode 100644 index 0000000..8c81b52 --- /dev/null +++ b/cstate-management/docker/cstate-management/.stestr.conf @@ -0,0 +1,3 @@ +[DEFAULT] +test_path=./cstatemanagement/tests +top_dir=./cstatemanagement diff --git a/cstate-management/docker/cstate-management/MANIFEST.in b/cstate-management/docker/cstate-management/MANIFEST.in new file mode 100644 index 0000000..c922f11 --- /dev/null +++ b/cstate-management/docker/cstate-management/MANIFEST.in @@ -0,0 +1 @@ +recursive-include public * diff --git a/cstate-management/docker/cstate-management/README b/cstate-management/docker/cstate-management/README new file mode 100644 index 0000000..1de936c --- /dev/null +++ b/cstate-management/docker/cstate-management/README @@ -0,0 +1 @@ +# Source code dir for cstate REST API diff --git a/cstate-management/docker/cstate-management/config.py b/cstate-management/docker/cstate-management/config.py new file mode 100644 index 0000000..2a545a6 --- /dev/null +++ b/cstate-management/docker/cstate-management/config.py @@ -0,0 +1,61 @@ +# Server Specific Configurations +server = { + 'port': '32000', + 'host': '0.0.0.0' +} + +# Pecan Application Configurations +app = { + 'root': 'cstatemanagement.controllers.root.RootController', + 'modules': ['cstatemanagement'], + 'static_root': '%(confdir)s/public', + 'template_path': '%(confdir)s/cstatemanagement/templates', + 'debug': False, + 'errors': { + 404: '/error/404', + '__force_dict__': True + } +} + +logging = { + 'root': {'level': 'INFO', 'handlers': ['logfile', 'console']}, + 'loggers': { + 'cstatemanagement': { + 'level': 'DEBUG', + 'handlers': ['logfile', 'console'], + 'propagate': False + }, + 'pecan': { + 'level': 'DEBUG', + 'handlers': ['logfile', 'console'], + 'propagate': False + }, + 'py.warnings': {'handlers': ['logfile', 'console']}, + '__force_dict__': True + }, + 'handlers': { + 'logfile': { + 'level': 'INFO', + 'class': 'logging.FileHandler', + 'filename': '/var/log/cstate.log', + 'formatter': 'simple' + }, + 'console': { + 'level': 'INFO', + 'class': 'logging.StreamHandler', + 'formatter': 'simple' + } + }, + 'formatters': { + 'simple': { + 'format': ('%(asctime)s %(levelname)-4.4s [%(name)s]' + '[%(threadName)s] %(message)s') + }, + 'color': { + '()': 'pecan.log.ColorFormatter', + 'format': ('%(asctime)s [%(padded_color_levelname)s] [%(name)s]' + '[%(threadName)s] %(message)s'), + '__force_dict__': True + } + } +} diff --git a/cstate-management/docker/cstate-management/cstatemanagement/__init__.py b/cstate-management/docker/cstate-management/cstatemanagement/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/cstate-management/docker/cstate-management/cstatemanagement/app.py b/cstate-management/docker/cstate-management/cstatemanagement/app.py new file mode 100644 index 0000000..5975de0 --- /dev/null +++ b/cstate-management/docker/cstate-management/cstatemanagement/app.py @@ -0,0 +1,14 @@ +from cstatemanagement import model +from pecan import make_app + + +def setup_app(config): + + model.init_model() + app_conf = dict(config.app) + + return make_app( + app_conf.pop('root'), + logging=getattr(config, 'logging', {}), + **app_conf + ) diff --git a/cstate-management/docker/cstate-management/cstatemanagement/common/__init__.py b/cstate-management/docker/cstate-management/cstatemanagement/common/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/cstate-management/docker/cstate-management/cstatemanagement/common/cpusets.py b/cstate-management/docker/cstate-management/cstatemanagement/common/cpusets.py new file mode 100644 index 0000000..519a7fd --- /dev/null +++ b/cstate-management/docker/cstate-management/cstatemanagement/common/cpusets.py @@ -0,0 +1,345 @@ +# +# Copyright (c) 2024 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +import itertools as it +import json +import logging +import logging.handlers +import os +import re +import socket +import subprocess + +# Constants +statefile = '/var/lib/kubelet/cpu_manager_state' +platformconf = '/etc/platform/platform.conf' +reservedfile = '/etc/platform/worker_reserved.conf' +cpu_isolated = '/sys/devices/system/cpu/isolated' +cpu_online = '/sys/devices/system/cpu/online' +cpuset_cpus = '/sys/fs/cgroup/cpuset/k8s-infra/cpuset.cpus' +node_path = '/sys/devices/system/node' + +LOG = logging.getLogger(__name__) + + +def format_range_set(items): + """Generate a pretty-printed value of ranges from a set of integers. + + e.g., given a set or list of integers, format as '3-6,8-9,12-17' + """ + ranges = [] + for _, iterable in it.groupby(enumerate(sorted(items)), + lambda x: x[1] - x[0]): + rng = list(iterable) + if len(rng) == 1: + s = str(rng[0][1]) + else: + s = f"{str(rng[0][1])}-{str(rng[-1][1])}" + ranges.append(s) + return ','.join(ranges) + + +def range_to_list(csv_range=None): + """Convert a string of comma separate ranges into an expanded list. + + e.g., '1-3,8-9,15' is converted to [1,2,3,8,9,15] + """ + if not csv_range: + return [] + splited_range = [r for r in csv_range.split(',')] + result = [] + int_range_list = [list(map(int, b.split('-'))) for b in splited_range] + for m in int_range_list: + result.extend([t for t in range(m[0], m[-1] + 1)]) + return result + + +def cpuset_from_cpulist_file(filename): + """Read cpulist file and convert to set of integers. + + File containing comma separated ranges is converted to an expanded set + of integers. e.g., '1-3,8-9,15' is converted to set([1,2,3,8,9,15]) + """ + cpuset_str = None + try: + with open(filename, 'r') as f: + cpuset_str = f.readline().strip() + except Exception as e: + LOG.error('Cannot parse file:%s, error=%s', filename, e) + cpuset = set(range_to_list(csv_range=cpuset_str)) + return cpuset + + +def get_isolated_cpuset(): + """Get isolated cpuset from sys devices. + + Reads sys devices isolated cpu file containing comma separated ranges + and convert to an expanded set of integers. + """ + filename = cpu_isolated + cpuset = cpuset_from_cpulist_file(filename) + return cpuset + + +def get_online_cpuset(): + """Get online cpuset from sys devices. + + Reads sys devices online cpu file containing comma separated ranges + and convert to an expanded set of integers. + """ + filename = cpu_online + cpuset = cpuset_from_cpulist_file(filename) + return cpuset + + +def get_k8sinfra_cpuset(): + """Get cgroup k8s-infra cpuset from sys fs cgroup. + + Reads sys fs cgroup k8s-infra cpuset.cpus file containing comma + separated ranges and convert to an expanded set of integers. + """ + filename = cpuset_cpus + cpuset = cpuset_from_cpulist_file(filename) + return cpuset + + +def get_node_cpusets(): + """Get cpusets per numa node from sys devices. + + Returns a list of nodes with the set of integers per numa node. + """ + nodepath = node_path + re_node = re.compile(r'^node(\d+)$') + nodes = {} + if os.path.isdir(nodepath): + for d in os.listdir(nodepath): + match = re_node.search(d) + if match: + node = int(match.group(1)) + filename = nodepath + '/node' + str(node) + '/cpulist' + cpuset = set() + if os.path.isfile(filename): + cpuset = cpuset_from_cpulist_file(filename) + nodes[node] = cpuset + return nodes + + +def gather_containers_cpuset_info(namespace, pod_name): + """Gather cpuset information for the containers of a `pod`. + + Get the cpuset per container and the mapping to numa nodes. + Get the aggregate cpuset usage per system-level groupings, + per-numa node. + """ + hostname = socket.gethostname() + + # Read current host cpusets from sysfs + node_cpusets = get_node_cpusets() + isolated_cpuset = get_isolated_cpuset() + online_cpuset = get_online_cpuset() + k8sinfra_cpuset = get_k8sinfra_cpuset() + LOG.debug('node_cpusets = %r', node_cpusets) + LOG.debug('isolated_cpuset = %r', isolated_cpuset) + LOG.debug('online_cpuset = %r', online_cpuset) + LOG.debug('k8sinfra_cpuset = %r', k8sinfra_cpuset) + + # Obtain platform node configuration + re_keyval = re.compile(r'^(\S+)\s*=\s*(\S+)') + platconf = {} + try: + with open(platformconf, 'r') as f: + for line in f: + m = re.search(re_keyval, line) + if m: + key = m.group(1) + value = m.group(2) + platconf[key] = value + except Exception as e: + LOG.error('Could not parse: %s, error: %s.', platformconf, e) + return 1 + nodetype = platconf.get('nodetype') + subfunction = platconf.get('subfunction') + system_type = platconf.get('system_type') + + # Obtain platform cpuset for worker node, as configured by sysinv/puppet. + re_platform = re.compile(r'^PLATFORM_CPU_LIST\s*=\s*\"(\S+)\"') + if 'worker' in subfunction: + cpulist_str = None + try: + with open(reservedfile, 'r') as f: + for line in f: + m = re.search(re_platform, line) + if m: + cpulist_str = m.group(1) + except Exception as e: + LOG.error('Could not parse: %s, error: %s.', reservedfile, e) + platform_cpuset = set(range_to_list(csv_range=cpulist_str)) + else: + platform_cpuset = online_cpuset - isolated_cpuset + LOG.debug('platform_cpuset = %r', platform_cpuset) + + # Read cpusets from kubelet cpumanager JSON state file dictionary + state = {} + try: + with open(statefile, 'r') as f: + state = json.load(f) + except Exception as e: + LOG.error('Could not load: %s, error: %s.', statefile, e) + return 1 + LOG.debug('cpu-manager state = %r', state) + + # Obtain cpu-manager policy + policy = str(state['policyName']) + + # Print tool header line + LOG.info('host:%s, system_type=%s, nodetype=%s, subfunction=%s, ' + 'cpumanager_policy=%s', + hostname, system_type, nodetype, subfunction, policy) + + # Determine default cpu-manager cpuset + if 'defaultCpuSet' not in state: + LOG.error('Missing defaultCpuSet in %s', statefile) + return 1 + default_cpuranges = str(state['defaultCpuSet']) + default_cpuset = set(range_to_list(csv_range=default_cpuranges)) + LOG.debug('default_cpuset = %r', default_cpuset) + + # Determine aggregate of cpumanager static allocations, + # i.e., this contains: platform, guaranteed, isolated . + static_cpuset = set() + if 'entries' in state: + for _, dcpus in state['entries'].items(): + for cpus in [str(i) for i in dcpus.values()]: + cpulist = set(range_to_list(csv_range=cpus)) + static_cpuset.update(cpulist) + + # Determine guaranteed cpuset + guaranteed_cpuset = static_cpuset - platform_cpuset - isolated_cpuset + LOG.debug('guaranteed_cpuset = %r', guaranteed_cpuset) + + # Determine isolated cpuset + # isolated_used_cpuset = static_cpuset.intersection(isolated_cpuset) + + # Commands to gather the targeted pod's resources + cmd_filter_pod = [ + 'crictl', 'pods', '--name', pod_name, '--namespace', namespace, '-q' + ] + cmd_filter_container_info = [ + 'xargs', '-I{}', 'crictl', 'ps', '-pod', '{}', '--output=json' + ] + + try: + filter_pod_output = subprocess.Popen( + cmd_filter_pod, + stdout=subprocess.PIPE + ) + output = subprocess.check_output( + cmd_filter_container_info, + stderr=subprocess.STDOUT, + stdin=filter_pod_output.stdout + ) + + LOG.debug( + 'command: %s | %s\n%s', + ' '.join(cmd_filter_pod), + ' '.join(cmd_filter_container_info), + output + ) + + # Gather data for each container + J = json.loads(output) + except Exception as e: + LOG.error('Could not list containers, error=%s', e) + return [] + + gathered_containers = [] + containers = {} + for cont in J['containers']: + containers[cont['id']] = { + 'name': cont['metadata']['name'], + 'pod.name': cont['labels']['io.kubernetes.pod.name'], + 'cont.name': cont['labels']['io.kubernetes.container.name'], + 'namespace': cont['labels']['io.kubernetes.pod.namespace'], + 'state': cont['state'], + } + for cid, C in sorted(containers.items(), + key=lambda kv: (kv[1]['namespace'], + kv[1]['name'], + kv[1]['cont.name'])): + cid_short = cid[0:13] + pname = C['pod.name'] + cname = C['cont.name'] + namespace = C['namespace'] + cstate = C['state'] + + # Now that we have the container ids, get more detailed resource info + cmd = ['crictl', 'inspect', '--output=json', cid] + try: + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + LOG.debug('command: %s\n%s', ' '.join(cmd), output) + except subprocess.CalledProcessError as e: + LOG.error('Could not get container %s, error=%s', cid, e) + return 1 + + inspect = json.loads(output) + linux = inspect['info']['runtimeSpec']['linux'] + cgroupsPath = linux['cgroupsPath'] + resources = linux['resources'] + cpu = resources['cpu'] + shares = cpu.get('shares', '-') + cpus = cpu.get('cpus') + cpuset = set(range_to_list(csv_range=cpus)) + if not cpuset: + cpuset = k8sinfra_cpuset + + # Determine pod QoS + if 'besteffort' in cgroupsPath: + QoS = 'besteffort' + elif 'burstable' in cgroupsPath: + QoS = 'burstable' + else: + QoS = 'guaranteed' + + # Determine cpuset group mapping + if cpus is None: + group = 'k8s-infra' + else: + if cpuset.issubset(platform_cpuset): + group = 'platform' + elif cpuset.issubset(isolated_cpuset): + group = 'isolated' + elif cpuset.issubset(guaranteed_cpuset): + group = 'guaranteed' + elif cpuset.issubset(default_cpuset): + group = 'default' + else: + group = 'unknown' + + # Determine per-numa node mapping of the container cpuset + per_node = {} + for node, node_cpuset in sorted(node_cpusets.items(), + key=lambda kv: kv[0]): + # calculate subset of cpus for the given numa node + n_cpuset = cpuset.intersection(node_cpuset) + if n_cpuset: + cpuranges = format_range_set(n_cpuset) + per_node[f"node{node}"] = cpuranges + cpu_set = set(range_to_list(cpus)) + gathered_containers.append( + {"namespace": namespace, + "pod_name": pname, + "container_name": cname, + "container_id": cid, + "container_id_short": cid_short, + "container_state": cstate, + "QoS": QoS, + "shares": shares, + "group": group, + "cpus": per_node, + "cpu_set": cpu_set + }) + + return gathered_containers diff --git a/cstate-management/docker/cstate-management/cstatemanagement/common/power_tool.py b/cstate-management/docker/cstate-management/cstatemanagement/common/power_tool.py new file mode 100644 index 0000000..fc7e9a6 --- /dev/null +++ b/cstate-management/docker/cstate-management/cstatemanagement/common/power_tool.py @@ -0,0 +1,175 @@ +# +# Copyright (c) 2024 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +import logging +import os + +LOG = logging.getLogger(__name__) + + +def getfileval(stateFileName): + with open(stateFileName, 'r') as state_file: + return state_file.readline().strip("\n") + + +def get_current_driver(): + """Get current cpu idle driver.""" + current_driver = 'acpi_idle' + current_driver_path = "/sys/devices/system/cpu/cpuidle/current_driver" + try: + current_driver_file = open(current_driver_path, 'r') + current_driver = current_driver_file.readline().strip("\n") + except OSError as ex: + LOG.error(f"Exception:{type(ex)}@{str(ex)}") + return current_driver + + +def get_available_cstates(cpu=0): + """Get the list of available cstate of core.""" + + stateList = [] + cpuidle_path = f"/sys/devices/system/cpu/cpu{str(cpu)}/cpuidle/" + try: + states = os.listdir(cpuidle_path) + except OSError as ex: + states = '' + LOG.error(f"Exception:{type(ex)}@{str(ex)}") + for state in states: + stateFileName = f"/sys/devices/system/cpu/cpu{str(cpu)}" \ + f"/cpuidle/{state}/name" + stateFile = open(stateFileName, 'r') + statename = stateFile.readline().strip("\n") + stateList.append(statename) + stateFile.close() + return stateList + + +def get_cstates(cpu=0): + """Get the list of available cstate with latency of core.""" + + stateList = [] + # idle_driver_type = get_current_driver() + cpuidle_path = f"/sys/devices/system/cpu/cpu{str(cpu)}/cpuidle/" + try: + states = os.listdir(cpuidle_path) + except OSError as ex: + states = '' + LOG.error(f"Exception:{type(ex)}@{str(ex)}") + for state in states: + stateFileName = f'/sys/devices/system/cpu/cpu%s/cpuidle/{state}/name' \ + % str(cpu) + stateLatency = f'/sys/devices/system/cpu/cpu%s/cpuidle/{state}/' \ + 'latency' % str(cpu) + stateFile = open(stateFileName, 'r') + latencyFile = open(stateLatency, 'r') + statename = stateFile.readline().strip("\n") + statelat = latencyFile.readline().strip("\n") + stateobj = { + 'stateName': statename, + 'stateLatency': statelat, + 'state': state + } + stateList.append(stateobj) + stateFile.close() + latencyFile.close() + return stateList + + +def set_cstate(cpurange, disable, cstate): + """Get list of cstate dirs to iterate through to find the cstate name + + parameters: + - cpurange: [0,1,2,3,4,10] + - disable: 0 or 1 + - cstate: POLL, C1 or other + """ + + cstates = os.listdir("/sys/devices/system/cpu/cpu0/cpuidle") + + for core in cpurange: + for y in cstates: + name = getfileval( + "/sys/devices/system/cpu/cpu0/cpuidle/" + y + "/name") + if (name == cstate): + stateName = "/sys/devices/system/cpu/cpu" + \ + str(core) + "/cpuidle/" + str(y) + "/disable" + try: + stateFile = open(stateName, 'w') + except OSError: + LOG.error( + "Could not open '" + str(stateName) + "', skipping." + ) + continue + LOG.info("Writing '" + str(disable) + "' to " + stateName) + stateFile.write(str(disable)) + stateFile.close() + + +def get_cstates_configured(cpurange): + """Get current c-state settings for specific cpurange. + + parameter: + - cpurange: [0,1,2,3,4] + + Return cpu_configured_cstates: + {'0': {'C1': '0', 'C2': '1', 'POLL': '0'}, + '1': {'C1': '0', 'C2': '1', 'POLL': '0'}, + '2': {'C1': '0', 'C2': '1', 'POLL': '0'}, + '3': {'C1': '0', 'C2': '1', 'POLL': '0'}, + '4': {'C1': '0', 'C2': '0', 'POLL': '0'} + } + """ + cpu_configured_cstates = dict() + + for cpu in cpurange: + cpustates = os.listdir( + f"/sys/devices/system/cpu/cpu{str(cpu)}/cpuidle/" + ) + configured_cstates = dict() + for state in cpustates: + stateBasePath = f'/sys/devices/system/cpu/cpu%s/cpuidle/{state}/' \ + % str(cpu) + stateFileName = stateBasePath + 'name' + stateDisableFileName = stateBasePath + 'disable' + + stateNameFile = open(stateFileName, 'r') + statename = stateNameFile.readline().strip("\n") + stateNameFile.close() + + stateDisableFile = open(stateDisableFileName, 'r') + stateDisable = stateDisableFile.readline().strip("\n") + stateDisableFile.close() + + configured_cstates[statename] = '1' if stateDisable == '0' else '0' + + cpu_configured_cstates[str(cpu)] = configured_cstates + + return cpu_configured_cstates + + +def get_max_cstate(cpurange): + cstates_configured = {} + cstates = get_cstates_configured(cpurange) + # cstates object be like: + # { + # "5": {"C1": "1", "C2": "1", "POLL": "1"}, + # "6": {"C1": "1", "C2": "1", "POLL": "1"} + # } + for core in cstates: + core_cstates = cstates.get(core) + # core_cstates: {"C1": "1", "C2": "1", "POLL": "1"} + filtered_core_cstates = { + key: value for key, value in core_cstates.items() + if value == '1' and key != 'POLL' + } + # drop POLL cstate + sorted_filtered_core_cstates = sorted(filtered_core_cstates) + if not sorted_filtered_core_cstates: + sorted_filtered_core_cstates = ['POLL'] + max_enabled_cstate = sorted_filtered_core_cstates[-1] + cstates_configured[core] = max_enabled_cstate + + return cstates_configured diff --git a/cstate-management/docker/cstate-management/cstatemanagement/controllers/__init__.py b/cstate-management/docker/cstate-management/cstatemanagement/controllers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/cstate-management/docker/cstate-management/cstatemanagement/controllers/root.py b/cstate-management/docker/cstate-management/cstatemanagement/controllers/root.py new file mode 100644 index 0000000..7125af8 --- /dev/null +++ b/cstate-management/docker/cstate-management/cstatemanagement/controllers/root.py @@ -0,0 +1,277 @@ +# +# Copyright (c) 2024 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +from cstatemanagement.common import cpusets +from cstatemanagement.common import power_tool as power_cli +import logging +import pecan +from pecan import request +from pecan import expose, rest, abort +from webob import exc as webobexc +from webob.exc import status_map +from wsme import types as wtypes +from wsmeext.pecan import wsexpose + + +LOG = logging.getLogger(__name__) + +AVAILABLE_CSTATES = power_cli.get_available_cstates() +AVAILABLE_CSTATES.sort() +SORTED_AVAILABLE_CSTATES = ['POLL'] +for cstate in AVAILABLE_CSTATES: + if cstate != 'POLL': + SORTED_AVAILABLE_CSTATES.append(cstate) +LOG.info(f"Current available cstates: {str(SORTED_AVAILABLE_CSTATES)}") + + +class SetController(rest.RestController): + + def _validate_cstate(self, cstate): + cstates = power_cli.get_available_cstates() + if cstate not in cstates: + exp = f"Request c-state is not one of available c-states: " \ + f"{','.join(SORTED_AVAILABLE_CSTATES)}" + raise webobexc.HTTPBadRequest(explanation=exp) + + def _validate_cores(self, cores, namespace, pod_name): + try: + pod = cpusets.gather_containers_cpuset_info(namespace, pod_name) + except Exception as ex: + LOG.error(f"Exception:{type(ex)}@{str(ex)}") + raise webobexc.HTTPServerError( + explanation='Unexpected internal error' + ) + + if not pod: + exp = f"Request pod: {pod_name} not found in " \ + f"namespace: {namespace}" + raise webobexc.HTTPNotFound(explanation=exp) + + cpu_set = set() + for container in pod: + cpu_set = cpu_set.union(container['cpu_set']) + + if cpu_set.intersection(set(cores)) != set(cores): + exp = f"The Requested cores: {str(cores)} are not used in " \ + f"isolation by namespace: {namespace}, pod: {pod_name}" + raise webobexc.HTTPBadRequest(explanation=exp) + + @expose('json') + def post(self): + params = request.json + namespace = params.get('namespace') + pod_name = params.get('pod_name') + cores = params.get('cores') + max_cstate = params.get('max_cstate') + if not cores or not max_cstate: + LOG.warning(f"Invalid Request data: {str(params)}") + raise webobexc.HTTPBadRequest( + explanation='Missing request required parameters: ' + 'max_cstate or cores.' + ) + cores_arr = cpusets.range_to_list(cores) + self._validate_cstate(max_cstate) + + if namespace and pod_name: + self._validate_cores(cores_arr, namespace, pod_name) + LOG.info( + f"Ready to configure the c-state: {max_cstate}" + f"of the cores: {str(cores_arr)} belonging to pod: " + f"{pod_name}, namespace: {namespace}." + ) + cstate_current = { + 'namespace': namespace, + 'pod_name': pod_name + } + else: + LOG.info( + f"Ready to configure the c-state: {max_cstate} " + f"of the cores: {str(cores_arr)}" + ) + cstate_current = {} + + try: + if max_cstate == 'POLL': + for cstate in SORTED_AVAILABLE_CSTATES[1:]: + power_cli.set_cstate(cores_arr, 1, cstate) + else: + for cstate in SORTED_AVAILABLE_CSTATES[1:]: + if cstate <= max_cstate: + power_cli.set_cstate(cores_arr, 0, cstate) + elif cstate > max_cstate: + power_cli.set_cstate(cores_arr, 1, cstate) + except Exception: + raise webobexc.HTTPServerError( + explanation='Unexpected internal error' + ) + cstates_set = power_cli.get_max_cstate(cores_arr) + cstate_current['cstates_set'] = cstates_set + return cstate_current + + +class AvailableController(rest.RestController): + + @expose('json') + def get(self): + LOG.info("Get all available cstates.") + cpustates = power_cli.get_available_cstates() + return {'cstates': cpustates} + + +class DetailController(rest.RestController): + + @expose('json') + def get(self): + LOG.info("Get all cstates detail.") + details = power_cli.get_cstates() + return {'cstates_detail': details} + + +class ConfiguredController(rest.RestController): + + def _parse_json_container(self, containers=[]): + for container in containers: + for key, value in container.items(): + if isinstance(value, set): + container[key] = list(value) + + @expose('json') + def get(self): + namespace = request.params.get("namespace") + pod_name = request.params.get("pod_name") + cores = request.params.get("cores") + + idle_driver_type = power_cli.get_current_driver() + + if cores and not namespace and not pod_name: + LOG.info(f"Request to fetch cstate for cores: {cores}") + cpu_set = cpusets.range_to_list(cores) + return { + 'cstate_current': { + 'cstates_configured': power_cli.get_max_cstate(cpu_set) + }, + 'idle_driver_type': idle_driver_type + } + + LOG.info( + f"Request to fetch cstate, pod: {pod_name} " + f"with namespace: {namespace}" + ) + cstate_current = {'namespace': namespace, + 'pod_name': pod_name, + 'cstates_available': SORTED_AVAILABLE_CSTATES} + cstates_configured = {} + if not namespace or not pod_name: + LOG.warning(f'Invalid Request data:{str(request.params)}') + raise webobexc.HTTPBadRequest( + explanation='Missing request params: namespace, pod_name.' + ) + + try: + all_containers = cpusets.gather_containers_cpuset_info( + namespace, pod_name + ) + pod_containers = [] + for container in all_containers: + if container.get('group') not in ['isolated', 'guaranteed']: + LOG.debug( + f"Container: {container.get('container_name')} " + f"with Namespace: {namespace} and Pod: {pod_name} " + 'does not use cpu isolated or guaranteed' + ) + continue + if container.get('namespace') == namespace \ + and container.get('pod_name') == pod_name: + pod_containers.append(container) + cpurange = container.get('cpu_set') + if not cpurange: + LOG.warning( + f"No isolated cores found for Container: " + f"{container.get('container_name')} with " + f" Namespace: {namespace} and Pod: {pod_name} ") + continue + + cstates_configured.update( + power_cli.get_max_cstate(cpurange) + ) + cstate_current['cstates_configured'] = cstates_configured + except Exception as ex: + LOG.error(f"Exception:{type(ex)}@{str(ex)}") + raise webobexc.HTTPServerError( + explanation='Unexpected internal error' + ) + if not pod_containers: + exp = f"No container associated with the Namespace use cpu " \ + f"isolated: {namespace}, Pod: {pod_name} was found." + raise webobexc.HTTPNotFound(explanation=exp) + return {'cstate_current': cstate_current, + 'idle_driver_type': idle_driver_type} + + +class CStateController(rest.RestController): + + @expose("json") + def _lookup(self, primary_key, *remainder): + if primary_key: + if 'current' == primary_key.lower(): + return ConfiguredController(), remainder + if 'set' == primary_key.lower(): + return SetController(), remainder + if 'detail' == primary_key.lower(): + return DetailController(), remainder + abort(404) + + @wsexpose(wtypes.text) + def get(self): + return 'cstateapi' + + def index(self): + return dict() + + +class CStateControllerV1(rest.RestController): + + @pecan.expose('json') + def index(self): + return dict() + + @expose('json') + def error(self, status): + try: + status = int(status) + except ValueError: + status = 500 + message = getattr(status_map.get(status), 'explanation', '') + return dict(status=status, message=message) + + @expose("json") + def _lookup(self, primary_key, *remainder): + if primary_key: + if 'cstate' == primary_key.lower(): + return CStateController(), remainder + abort(404) + + +class RootController(object): + + @pecan.expose('json') + def index(self): + return dict() + + @expose('json') + def error(self, status): + try: + status = int(status) + except ValueError: + status = 500 + message = getattr(status_map.get(status), 'explanation', '') + return dict(status=status, message=message) + + @expose("json") + def _lookup(self, primary_key, *remainder): + if primary_key: + if 'v0.1' == primary_key.lower(): + return CStateControllerV1(), remainder + abort(404) diff --git a/cstate-management/docker/cstate-management/cstatemanagement/model/__init__.py b/cstate-management/docker/cstate-management/cstatemanagement/model/__init__.py new file mode 100644 index 0000000..b224c60 --- /dev/null +++ b/cstate-management/docker/cstate-management/cstatemanagement/model/__init__.py @@ -0,0 +1,15 @@ +from pecan import conf # noqa + + +def init_model(): + """ + This is a stub method which is called at application startup time. + + If you need to bind to a parsed database configuration, set up tables or + ORM classes, or perform any database initialization, this is the + recommended place to do it. + + For more information working with databases, and some common recipes, + see https://pecan.readthedocs.io/en/latest/databases.html + """ + pass diff --git a/cstate-management/docker/cstate-management/cstatemanagement/tests/__init__.py b/cstate-management/docker/cstate-management/cstatemanagement/tests/__init__.py new file mode 100644 index 0000000..7587d34 --- /dev/null +++ b/cstate-management/docker/cstate-management/cstatemanagement/tests/__init__.py @@ -0,0 +1,22 @@ +import os +from pecan import set_config +from pecan.testing import load_test_app +from unittest import TestCase + +__all__ = ['FunctionalTest'] + + +class FunctionalTest(TestCase): + """ + Used for functional tests where you need to test your + literal application and its integration with the framework. + """ + + def setUp(self): + self.app = load_test_app(os.path.join( + os.path.dirname(__file__), + 'config.py' + )) + + def tearDown(self): + set_config({}, overwrite=True) diff --git a/cstate-management/docker/cstate-management/cstatemanagement/tests/config.py b/cstate-management/docker/cstate-management/cstatemanagement/tests/config.py new file mode 100644 index 0000000..9631c6e --- /dev/null +++ b/cstate-management/docker/cstate-management/cstatemanagement/tests/config.py @@ -0,0 +1,25 @@ +# Server Specific Configurations +server = { + 'port': '8080', + 'host': '0.0.0.0' +} + +# Pecan Application Configurations +app = { + 'root': 'cstatemanagement.controllers.root.RootController', + 'modules': ['cstatemanagement'], + 'static_root': '%(confdir)s/../../public', + 'template_path': '%(confdir)s/../templates', + 'debug': True, + 'errors': { + '404': '/error/404', + '__force_dict__': True + } +} + +# Custom Configurations must be in Python dictionary format:: +# +# foo = {'bar':'baz'} +# +# All configurations are accessible at:: +# pecan.conf diff --git a/cstate-management/docker/cstate-management/cstatemanagement/tests/test_functional.py b/cstate-management/docker/cstate-management/cstatemanagement/tests/test_functional.py new file mode 100644 index 0000000..e9356b7 --- /dev/null +++ b/cstate-management/docker/cstate-management/cstatemanagement/tests/test_functional.py @@ -0,0 +1,12 @@ +from cstatemanagement.tests import FunctionalTest + + +class TestRootController(FunctionalTest): + + def test_get(self): + response = self.app.get('/') + assert response.status_int == 200 + + def test_get_not_found(self): + response = self.app.get('/a/bogus/url', expect_errors=True) + assert response.status_int == 404 diff --git a/cstate-management/docker/cstate-management/cstatemanagement/tests/test_units.py b/cstate-management/docker/cstate-management/cstatemanagement/tests/test_units.py new file mode 100644 index 0000000..573fb68 --- /dev/null +++ b/cstate-management/docker/cstate-management/cstatemanagement/tests/test_units.py @@ -0,0 +1,7 @@ +from unittest import TestCase + + +class TestUnits(TestCase): + + def test_units(self): + assert 5 * 5 == 25 diff --git a/cstate-management/docker/cstate-management/pylint.rc b/cstate-management/docker/cstate-management/pylint.rc new file mode 100644 index 0000000..141e10e --- /dev/null +++ b/cstate-management/docker/cstate-management/pylint.rc @@ -0,0 +1,255 @@ +[MASTER] +# Specify a configuration file. +rcfile=pylint.rc + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Add files or directories to the blacklist. Should be base names, not paths. +ignore= + +# Pickle collected data for later comparisons. +persistent=yes + +# List of plugins (as comma separated values of python modules names) to load, +# usually to register additional checkers. +load-plugins= + +# Use multiple processes to speed up Pylint. +jobs=4 + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code +extension-pkg-whitelist=lxml.etree,greenlet + + + +[MESSAGES CONTROL] +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). +# See "Messages Control" section of +# https://pylint.readthedocs.io/en/latest/user_guide +disable= + # C codes refer to Convention + C0103, # invalid-name + C0114, # missing-module-docstring + C0115, # missing-class-docstring + C0116, # missing-function-docstring + C0325, # superfluous-parens + C0411, # wrong-import-order + # R codes refer to refactoring + R0205, # useless-object-inheritance + R0901, # too-many-ancestors + R0903, # too-few-public-methods + R0912, # too-many-branches + R0913, # too-many-arguments + R0914, # too-many-locals + R0915, # too-many-statements + R1710, # inconsistent-return-statements + R1725, # super-with-arguments + R1721, # unnecessary-comprehension + R1732, # consider-using-with + R1735, # use-dict-literal + # W codes are warnings + W0107, # unnecessary-pass + W0102, # dangerous-default-value + W0212, # protected-access + W0246, # useless-parent-delegation + W0621, # redefined-outer-name + W0707, # raise-missing-from + W0718, # broad-exception-caught + W1201, # logging-not-lazy + W1202, # logging-format-interpolation + W1203, # logging-fstring-interpolation + W1514, # unspecified-encoding + +[REPORTS] +# Set the output format. Available formats are text, parseable, colorized, msvs +# (visual studio) and html +output-format=text + +# Tells whether to display a full report or only the messages +reports=no + +# Python expression which should return a note less than 10 (10 is the highest +# note). You have access to the variables errors warning, statement which +# respectively contain the number of errors / warnings messages and the total +# number of statements analyzed. This is used by the global evaluation report +# (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + + +[SIMILARITIES] +# Minimum lines number of a similarity. +min-similarity-lines=4 + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + + +[FORMAT] +# Maximum number of characters on a single line. +max-line-length=85 + +# Maximum number of lines in a module +max-module-lines=1000 + +# String used as indentation unit. This is usually 4 spaces or "\t" (1 tab). +indent-string=' ' + + +[TYPECHECK] +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis +ignored-modules=distutils,eventlet.green.subprocess,six,six.moves + +# List of classes names for which member attributes should not be checked +# (useful for classes with attributes dynamically set). +# pylint is confused by sqlalchemy Table, as well as sqlalchemy Enum types +# ie: (unprovisioned, identity) +# LookupDict in requests library confuses pylint +ignored-classes=SQLObject, optparse.Values, thread._local, _thread._local, + Table, unprovisioned, identity, LookupDict + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E0201 when accessed. Python regular +# expressions are accepted. +generated-members=REQUEST,acl_users,aq_parent + + +[BASIC] +# Regular expression which should only match correct module names +module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ + +# Regular expression which should only match correct module level names +const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ + +# Regular expression which should only match correct class names +class-rgx=[A-Z_][a-zA-Z0-9]+$ + +# Regular expression which should only match correct function names +function-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct method names +method-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct instance attribute names +attr-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct argument names +argument-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct variable names +variable-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct list comprehension / +# generator expression variable names +inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ + +# Good variable names which should always be accepted, separated by a comma +good-names=i,j,k,ex,Run,_ + +# Bad variable names which should always be refused, separated by a comma +bad-names=foo,bar,baz,toto,tutu,tata + +# Regular expression which should only match functions or classes name which do +# not require a docstring +no-docstring-rgx=__.*__ + + +[MISCELLANEOUS] +# List of note tags to take in consideration, separated by a comma. +notes=FIXME,XXX,TODO + + +[VARIABLES] +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# A regular expression matching the beginning of the name of dummy variables +# (i.e. not used). +dummy-variables-rgx=_|dummy + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid to define new builtins when possible. +additional-builtins= + + +[IMPORTS] +# Deprecated modules which should not be used, separated by a comma +deprecated-modules=regsub,string,TERMIOS,Bastion,rexec + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled) +import-graph= + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled) +ext-import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled) +int-import-graph= + + +[DESIGN] +# Maximum number of arguments for function / method +max-args=5 + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore +ignored-argument-names=_.* + +# Maximum number of locals for function / method body +max-locals=15 + +# Maximum number of return / yield for function / method body +max-returns=6 + +# Maximum number of branch for function / method body +max-branches=12 + +# Maximum number of statements in function / method body +max-statements=50 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + + +[CLASSES] +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__,__new__,setUp + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + + +[EXCEPTIONS] +# Exceptions that will emit a warning when caught. +overgeneral-exceptions=builtins.BaseException,builtins.Exception + diff --git a/cstate-management/docker/cstate-management/requirements.txt b/cstate-management/docker/cstate-management/requirements.txt new file mode 100644 index 0000000..0fe062b --- /dev/null +++ b/cstate-management/docker/cstate-management/requirements.txt @@ -0,0 +1,6 @@ +pecan~=1.5.1 +wsme +mock +cherrypy +cheroot +oslo_utils \ No newline at end of file diff --git a/cstate-management/docker/cstate-management/run.py b/cstate-management/docker/cstate-management/run.py new file mode 100644 index 0000000..c7a504e --- /dev/null +++ b/cstate-management/docker/cstate-management/run.py @@ -0,0 +1,59 @@ +import os + +from cheroot import wsgi +from cheroot.wsgi import PathInfoDispatcher +import cherrypy +from pecan.deploy import deploy + +from oslo_utils import netutils + +wsgi_app = deploy('config.py') + +public_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'public')) + +# A dummy class for our Root object +# necessary for some CherryPy machinery + + +class Root(object): + pass + + +def get_wildcard_address(): + if netutils.is_ipv6_enabled(): + return "::" + return "0.0.0.0" + + +def make_static_config(static_dir_name): + """ + All custom static configurations are set here, since most are common, it + makes sense to generate them just once. + """ + static_path = os.path.join('/', static_dir_name) + path = os.path.join(public_path, static_dir_name) + configuration = { + static_path: { + 'tools.staticdir.on': True, + 'tools.staticdir.dir': path + } + } + return cherrypy.tree.mount(Root(), '/', config=configuration) + + +# Assuming your app has media on different paths, like 'css', and 'images' +application = PathInfoDispatcher({ + '/': wsgi_app +}) + +server = wsgi.Server( + (get_wildcard_address(), 32000), + application, + server_name='pecanapp' +) + +try: + server.start() +except KeyboardInterrupt: + print("Terminating server...") + server.stop() diff --git a/cstate-management/docker/cstate-management/setup.cfg b/cstate-management/docker/cstate-management/setup.cfg new file mode 100644 index 0000000..3b91193 --- /dev/null +++ b/cstate-management/docker/cstate-management/setup.cfg @@ -0,0 +1,6 @@ +[nosetests] +match=^test +where=cstatemanagement +nocapture=1 +cover-package=cstatemanagement +cover-erase=1 diff --git a/cstate-management/docker/cstate-management/setup.py b/cstate-management/docker/cstate-management/setup.py new file mode 100644 index 0000000..3a8ba8e --- /dev/null +++ b/cstate-management/docker/cstate-management/setup.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +try: + from setuptools import setup, find_packages +except ImportError: + from ez_setup import use_setuptools + use_setuptools() + from setuptools import setup, find_packages + +setup( + name='cstatemanagement', + version='0.1', + description='', + author='', + author_email='', + install_requires=[ + "pecan", + ], + test_suite='cstatemanagement', + zip_safe=False, + include_package_data=True, + packages=find_packages(exclude=['ez_setup']) +) diff --git a/cstate-management/docker/cstate-management/test-requirements.txt b/cstate-management/docker/cstate-management/test-requirements.txt new file mode 100644 index 0000000..d42863c --- /dev/null +++ b/cstate-management/docker/cstate-management/test-requirements.txt @@ -0,0 +1,22 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. +hacking>=1.1.0,<=2.0.0 # Apache-2.0 +astroid +bandit<1.7.2;python_version>="3.0" +coverage>=3.6 +fixtures>=3.0.0 # Apache-2.0/BSD +mock>=2.0.0 # BSD +python-subunit>=0.0.18 +requests-mock>=0.6.0 # Apache-2.0 +sphinx +oslosphinx +oslotest>=3.2.0 # Apache-2.0 +stestr>=1.0.0 # Apache-2.0 +testrepository>=0.0.18 +testtools!=1.2.0,>=0.9.36 +isort<5;python_version>="3.0" +pylint +pycryptodomex +flake8<3.8.0 + diff --git a/cstate-management/docker/cstate-management/tox.ini b/cstate-management/docker/cstate-management/tox.ini new file mode 100644 index 0000000..44ef678 --- /dev/null +++ b/cstate-management/docker/cstate-management/tox.ini @@ -0,0 +1,111 @@ +[tox] +envlist = flake8,py39,pylint,bandit +minversion = 2.9 +skipsdist = True + +# tox does not work if the path to the workdir is too long, so move it to /tmp +toxworkdir = /tmp/{env:USER}_cstate_management_tox +stxdir = {toxinidir}/../../../.. +distshare={toxworkdir}/.tox/distshare + +[testenv] +sitepackages = True +basepython = python3.9 + +allowlist_externals = bash + find + echo + +install_command = pip install -v -v -v \ + -c{toxinidir}/upper-constraints.txt \ + -c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/starlingx/root/raw/branch/master/build-tools/requirements/debian/upper-constraints.txt} \ + {opts} {packages} + +commands = + find . -type f -name "*.pyc" -delete + +setenv = VIRTUAL_ENV={envdir} + PYTHONHASHSEED=0 + PYTHONDONTWRITEBYTECODE=1 + OS_TEST_PATH=./cstatemanagement/tests + LANG=en_US.UTF-8 + LANGUAGE=en_US:en + LC_ALL=C + SYSINV_TEST_ENV=True + EVENTS_YAML=./cstatemanagement/tests/events_for_testing.yaml + TOX_WORK_DIR={toxworkdir} + PYLINTHOME={toxworkdir} + +deps = -r{toxinidir}/test-requirements.txt + -e{[tox]stxdir}/config/sysinv/sysinv/sysinv + -e{[tox]stxdir}/config/tsconfig/tsconfig + -e{[tox]stxdir}/fault/fm-api/source + -e{[tox]stxdir}/fault/python-fmclient/fmclient + -e{[tox]stxdir}/utilities/ceph/python-cephclient/python-cephclient + -e{[tox]stxdir}/update/sw-patch/cgcs-patch + +[flake8] +# H series are hacking +# H301 one import per line +# H403 multi line docstrings should end on a new line +# H404 multi line docstring should start without a leading new line +# H405 multi line docstring summary not separated with an empty line +# E series +# E122 continuation line missing indentation or outdented +ignore = H301,H403,H404,H405,W503,W504,E122 +exclude = build,dist,tools,.eggs +max-line-length=80 + +[testenv:flake8] +deps = -r{toxinidir}/test-requirements.txt +commands = + flake8 {posargs} . + +[testenv:py39] +commands = + {[testenv]commands} + stestr run {posargs} + stestr slowest + +[testenv:venv] +commands = {posargs} + +[bandit] +# The following bandit tests are being skipped: +# B108: Test for insecure usage of tmp file/directory +# B404: security implications associated with the subprocess module. +# B603: subprocess call - check for execution of untrusted input. +# Note: 'skips' entry cannot be split across multiple lines +# +skips = B108,B404,B603 +exclude = tests + +[testenv:bandit] +deps = -r{toxinidir}/test-requirements.txt + bandit +commands = bandit --ini tox.ini -n 5 -r cstatemanagement + +[testenv:pylint] +commands = + pylint {posargs} cstatemanagement --rcfile=./pylint.rc + +[testenv:cover] +setenv = {[testenv]setenv} + PYTHON=coverage run --parallel-mode +commands = + {[testenv]commands} + coverage erase + stestr run {posargs} + coverage combine + coverage html -d cover + coverage xml -o cover/coverage.xml + coverage report + +[testenv:pip-missing-reqs] +# do not install test-requirements as that will pollute the virtualenv for +# determining missing packages +# this also means that pip-missing-reqs must be installed separately, outside +# of the requirements.txt files +deps = pip_missing_reqs + -rrequirements.txt +commands=pip-missing-reqs -d --ignore-file=/cstatemanagement/tests cstatemanagement diff --git a/cstate-management/docker/cstate-management/upper-constraints.txt b/cstate-management/docker/cstate-management/upper-constraints.txt new file mode 100644 index 0000000..beb5a6f --- /dev/null +++ b/cstate-management/docker/cstate-management/upper-constraints.txt @@ -0,0 +1,2 @@ + +# Override upstream constraints based on StarlingX load diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..c01ade2 --- /dev/null +++ b/requirements.txt @@ -0,0 +1 @@ +# Nothing diff --git a/test-requirements.txt b/test-requirements.txt new file mode 100644 index 0000000..d423810 --- /dev/null +++ b/test-requirements.txt @@ -0,0 +1,4 @@ +# hacking pulls in flake8 +hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0 +bashate >= 0.2 + diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..bb6c08a --- /dev/null +++ b/tox.ini @@ -0,0 +1,64 @@ +[tox] +envlist = linters +minversion = 2.9 +skipsdist = True +sitepackages = False + +[testenv] +install_command = pip install -U \ + {opts} {packages} \ + -c{env:TOX_CONSTRAINTS_FILE:https://opendev.org/starlingx/root/raw/branch/master/build-tools/requirements/debian/upper-constraints.txt} +setenv = + VIRTUAL_ENV={envdir} + OS_STDOUT_CAPTURE=1 + OS_STDERR_CAPTURE=1 + OS_DEBUG=1 + OS_LOG_CAPTURE=1 +deps = + -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +allowlist_externals = + bash +passenv = + XDG_CACHE_HOME + + +[testenv:bashate] +# Treat all E* codes as Errors rather than warnings using: -e 'E*' +commands = + bash -c "find {toxinidir} \ + -not \( -type d -name .?\* -prune \) \ + -type f \ + -not -name \*~ \ + -not -name \*.md \ + -name \*.sh \ + -print0 | xargs -r -n 1 -0 bashate -v \ + -e 'E*'" + + +[testenv:linters] +commands = + {[testenv:bashate]commands} + + +[testenv:flake8] +basepython = python3 +description = Dummy environment to allow flake8 to be run in subdir tox + + +[testenv:pylint] +basepython = python3 +description = Dummy environment to allow pylint to be run in subdir tox + + +[testenv:metadata] +basepython = python3 +description = Dummy environment to allow sysinv-app to be run in subdir tox + +[testenv:py39] +basepython = python3 +description = Dummy environment to allow sysinv-app to be run in subdir tox + +[testenv:bandit] +basepython = python3 +description = Dummy environment to allow bandit to be run in subdir tox