fix(kubernetes): temporary solution for updated k8s python client
This commit is contained in:
parent
07d6fe7442
commit
9129813244
1478 changed files with 422354 additions and 2 deletions
11
Dockerfile
11
Dockerfile
|
@ -6,6 +6,13 @@ ENV PYTHONUNBUFFERED=1 \
|
|||
CERT_CLEANUP=false \
|
||||
PATCH_SECRETNAME=true
|
||||
|
||||
RUN pip install kubernetes
|
||||
# As the k8s Python client does not yet support k8s 1.32, the updated client has been temporarily moved to the repo.
|
||||
# So the installation is currently not needed.
|
||||
# RUN pip install kubernetes
|
||||
|
||||
COPY main.py /
|
||||
CMD python /main.py
|
||||
COPY kubernetes /kubernetes
|
||||
|
||||
RUN pip install -r /kubernetes/requirements.txt
|
||||
|
||||
CMD python main.py
|
||||
|
|
33
kubernetes/.gitlab-ci.yml
Normal file
33
kubernetes/.gitlab-ci.yml
Normal file
|
@ -0,0 +1,33 @@
|
|||
# ref: https://docs.gitlab.com/ee/ci/README.html
|
||||
|
||||
stages:
|
||||
- test
|
||||
|
||||
.nosetest:
|
||||
stage: test
|
||||
script:
|
||||
- pip install -r requirements.txt
|
||||
- pip install -r test-requirements.txt
|
||||
- pytest --cov=client
|
||||
|
||||
nosetest-2.7:
|
||||
extends: .nosetest
|
||||
image: python:2.7-alpine
|
||||
nosetest-3.3:
|
||||
extends: .nosetest
|
||||
image: python:3.3-alpine
|
||||
nosetest-3.4:
|
||||
extends: .nosetest
|
||||
image: python:3.4-alpine
|
||||
nosetest-3.5:
|
||||
extends: .nosetest
|
||||
image: python:3.5-alpine
|
||||
nosetest-3.6:
|
||||
extends: .nosetest
|
||||
image: python:3.6-alpine
|
||||
nosetest-3.7:
|
||||
extends: .nosetest
|
||||
image: python:3.7-alpine
|
||||
nosetest-3.8:
|
||||
extends: .nosetest
|
||||
image: python:3.8-alpine
|
7
kubernetes/.openapi-generator-ignore
Normal file
7
kubernetes/.openapi-generator-ignore
Normal file
|
@ -0,0 +1,7 @@
|
|||
.gitignore
|
||||
git_push.sh
|
||||
requirements.txt
|
||||
test-requirements.txt
|
||||
setup.py
|
||||
.travis.yml
|
||||
tox.ini
|
2
kubernetes/.openapi-generator/COMMIT
Normal file
2
kubernetes/.openapi-generator/COMMIT
Normal file
|
@ -0,0 +1,2 @@
|
|||
Requested Commit/Tag : v4.3.0
|
||||
Actual Commit : c224cf484b020a7f5997d883cf331715df3fb52a
|
1
kubernetes/.openapi-generator/VERSION
Normal file
1
kubernetes/.openapi-generator/VERSION
Normal file
|
@ -0,0 +1 @@
|
|||
4.3.0
|
1
kubernetes/.openapi-generator/swagger.json.sha256
Normal file
1
kubernetes/.openapi-generator/swagger.json.sha256
Normal file
|
@ -0,0 +1 @@
|
|||
5f773c685cb5e7b97c1b3be4a7cff387a8077a4789c738dac715ba91b1c50eda
|
1527
kubernetes/README.md
Normal file
1527
kubernetes/README.md
Normal file
File diff suppressed because it is too large
Load diff
25
kubernetes/__init__.py
Normal file
25
kubernetes/__init__.py
Normal file
|
@ -0,0 +1,25 @@
|
|||
# Copyright 2022 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
__project__ = 'kubernetes'
|
||||
# The version is auto-updated. Please do not edit.
|
||||
__version__ = "32.0.0+snapshot"
|
||||
|
||||
from . import client
|
||||
from . import config
|
||||
from . import dynamic
|
||||
from . import watch
|
||||
from . import stream
|
||||
from . import utils
|
||||
from . import leaderelection
|
72
kubernetes/base/.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
72
kubernetes/base/.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
|
@ -0,0 +1,72 @@
|
|||
<!-- Thanks for sending a pull request! Here are some tips for you:
|
||||
|
||||
1. If this is your first time, please read our contributor guidelines: https://git.k8s.io/community/contributors/guide/first-contribution.md#your-first-contribution and developer guide https://git.k8s.io/community/contributors/devel/development.md#development-guide
|
||||
2. Please label this pull request according to what type of issue you are addressing, especially if this is a release targeted pull request. For reference on required PR/issue labels, read here:
|
||||
https://git.k8s.io/community/contributors/devel/sig-release/release.md#issuepr-kind-label
|
||||
3. Ensure you have added or ran the appropriate tests for your PR: https://git.k8s.io/community/contributors/devel/sig-testing/testing.md
|
||||
4. If you want *faster* PR reviews, read how: https://git.k8s.io/community/contributors/guide/pull-requests.md#best-practices-for-faster-reviews
|
||||
5. If the PR is unfinished, see how to mark it: https://git.k8s.io/community/contributors/guide/pull-requests.md#marking-unfinished-pull-requests
|
||||
-->
|
||||
|
||||
#### What type of PR is this?
|
||||
|
||||
<!--
|
||||
Add one of the following kinds:
|
||||
/kind bug
|
||||
/kind cleanup
|
||||
/kind documentation
|
||||
/kind feature
|
||||
/kind design
|
||||
|
||||
Optionally add one or more of the following kinds if applicable:
|
||||
/kind api-change
|
||||
/kind deprecation
|
||||
/kind failing-test
|
||||
/kind flake
|
||||
/kind regression
|
||||
-->
|
||||
|
||||
#### What this PR does / why we need it:
|
||||
|
||||
#### Which issue(s) this PR fixes:
|
||||
<!--
|
||||
*Automatically closes linked issue when PR is merged.
|
||||
Usage: `Fixes #<issue number>`, or `Fixes (paste link of issue)`.
|
||||
_If PR is about `failing-tests or flakes`, please post the related issues/tests in a comment and do not use `Fixes`_*
|
||||
-->
|
||||
Fixes #
|
||||
|
||||
#### Special notes for your reviewer:
|
||||
|
||||
#### Does this PR introduce a user-facing change?
|
||||
<!--
|
||||
If no, just write "NONE" in the release-note block below.
|
||||
If yes, a release note is required:
|
||||
Enter your extended release note in the block below. If the PR requires additional action from users switching to the new release, include the string "action required".
|
||||
|
||||
For more information on release notes see: https://git.k8s.io/community/contributors/guide/release-notes.md
|
||||
-->
|
||||
```release-note
|
||||
|
||||
```
|
||||
|
||||
#### Additional documentation e.g., KEPs (Kubernetes Enhancement Proposals), usage docs, etc.:
|
||||
|
||||
<!--
|
||||
This section can be blank if this pull request does not require a release note.
|
||||
|
||||
When adding links which point to resources within git repositories, like
|
||||
KEPs or supporting documentation, please reference a specific commit and avoid
|
||||
linking directly to the master branch. This ensures that links reference a
|
||||
specific point in time, rather than a document that may change over time.
|
||||
|
||||
See here for guidance on getting permanent links to files: https://help.github.com/en/articles/getting-permanent-links-to-files
|
||||
|
||||
Please use the following format for linking documentation:
|
||||
- [KEP]: <link>
|
||||
- [Usage]: <link>
|
||||
- [Other doc]: <link>
|
||||
-->
|
||||
```docs
|
||||
|
||||
```
|
95
kubernetes/base/.gitignore
vendored
Normal file
95
kubernetes/base/.gitignore
vendored
Normal file
|
@ -0,0 +1,95 @@
|
|||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
env/
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*,cover
|
||||
.hypothesis/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# IPython Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# pyenv
|
||||
.python-version
|
||||
|
||||
# celery beat schedule file
|
||||
celerybeat-schedule
|
||||
|
||||
# dotenv
|
||||
.env
|
||||
|
||||
# virtualenv
|
||||
venv/
|
||||
ENV/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# Intellij IDEA files
|
||||
.idea/*
|
||||
*.iml
|
||||
.vscode
|
||||
|
46
kubernetes/base/.travis.yml
Normal file
46
kubernetes/base/.travis.yml
Normal file
|
@ -0,0 +1,46 @@
|
|||
# ref: https://docs.travis-ci.com/user/languages/python
|
||||
language: python
|
||||
dist: xenial
|
||||
|
||||
stages:
|
||||
- verify boilerplate
|
||||
- test
|
||||
|
||||
install:
|
||||
- pip install tox
|
||||
|
||||
script:
|
||||
- ./run_tox.sh tox
|
||||
|
||||
jobs:
|
||||
include:
|
||||
- stage: verify boilerplate
|
||||
script: ./hack/verify-boilerplate.sh
|
||||
python: 3.7
|
||||
- stage: test
|
||||
python: 3.9
|
||||
env: TOXENV=update-pycodestyle
|
||||
- python: 3.9
|
||||
env: TOXENV=coverage,codecov
|
||||
- python: 3.7
|
||||
env: TOXENV=docs
|
||||
- python: 3.5
|
||||
env: TOXENV=py35
|
||||
- python: 3.5
|
||||
env: TOXENV=py35-functional
|
||||
- python: 3.6
|
||||
env: TOXENV=py36
|
||||
- python: 3.6
|
||||
env: TOXENV=py36-functional
|
||||
- python: 3.7
|
||||
env: TOXENV=py37
|
||||
- python: 3.7
|
||||
env: TOXENV=py37-functional
|
||||
- python: 3.8
|
||||
env: TOXENV=py38
|
||||
- python: 3.8
|
||||
env: TOXENV=py38-functional
|
||||
- python: 3.9
|
||||
env: TOXENV=py39
|
||||
- python: 3.9
|
||||
env: TOXENV=py39-functional
|
29
kubernetes/base/CONTRIBUTING.md
Normal file
29
kubernetes/base/CONTRIBUTING.md
Normal file
|
@ -0,0 +1,29 @@
|
|||
# Contributing
|
||||
|
||||
Thanks for taking the time to join our community and start contributing!
|
||||
|
||||
Any changes to utilities in this repo should be send as a PR to this repo.
|
||||
After the PR is merged, developers should create another PR in the main repo to update the submodule.
|
||||
See [this document](https://github.com/kubernetes-client/python/blob/master/devel/submodules.md) for more guidelines.
|
||||
|
||||
The [Contributor Guide](https://github.com/kubernetes/community/blob/master/contributors/guide/README.md)
|
||||
provides detailed instructions on how to get your ideas and bug fixes seen and accepted.
|
||||
|
||||
Please remember to sign the [CNCF CLA](https://github.com/kubernetes/community/blob/master/CLA.md) and
|
||||
read and observe the [Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
||||
|
||||
## Adding new Python modules or Python scripts
|
||||
If you add a new Python module please make sure it includes the correct header
|
||||
as found in:
|
||||
```
|
||||
hack/boilerplate/boilerplate.py.txt
|
||||
```
|
||||
|
||||
This module should not include a shebang line.
|
||||
|
||||
If you add a new Python helper script intended for developers usage, it should
|
||||
go into the directory `hack` and include a shebang line `#!/usr/bin/env python`
|
||||
at the top in addition to rest of the boilerplate text as in all other modules.
|
||||
|
||||
In addition this script's name should be added to the list
|
||||
`SKIP_FILES` at the top of hack/boilerplate/boilerplate.py.
|
201
kubernetes/base/LICENSE
Normal file
201
kubernetes/base/LICENSE
Normal file
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
9
kubernetes/base/OWNERS
Normal file
9
kubernetes/base/OWNERS
Normal file
|
@ -0,0 +1,9 @@
|
|||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- yliaog
|
||||
- roycaihw
|
||||
emeritus_approvers:
|
||||
- mbohlool
|
||||
reviewers:
|
||||
- fabianvf
|
13
kubernetes/base/README.md
Normal file
13
kubernetes/base/README.md
Normal file
|
@ -0,0 +1,13 @@
|
|||
# python-base
|
||||
|
||||
[![Build Status](https://travis-ci.org/kubernetes-client/python-base.svg?branch=master)](https://travis-ci.org/kubernetes-client/python-base)
|
||||
|
||||
This is the utility part of the [python client](https://github.com/kubernetes-client/python). It has been added to the main
|
||||
repo using git submodules. This structure allow other developers to create
|
||||
their own kubernetes client and still use standard kubernetes python utilities.
|
||||
For more information refer to [clients-library-structure](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/csi-client-structure-proposal.md).
|
||||
|
||||
## Contributing
|
||||
|
||||
Please see [CONTRIBUTING.md](CONTRIBUTING.md) for instructions on how to contribute.
|
||||
|
15
kubernetes/base/SECURITY_CONTACTS
Normal file
15
kubernetes/base/SECURITY_CONTACTS
Normal file
|
@ -0,0 +1,15 @@
|
|||
# Defined below are the security contacts for this repo.
|
||||
#
|
||||
# They are the contact point for the Product Security Team to reach out
|
||||
# to for triaging and handling of incoming issues.
|
||||
#
|
||||
# The below names agree to abide by the
|
||||
# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy)
|
||||
# and will be removed and replaced if they violate that agreement.
|
||||
#
|
||||
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
|
||||
# INSTRUCTIONS AT https://kubernetes.io/security/
|
||||
|
||||
mbohlool
|
||||
roycaihw
|
||||
yliaog
|
3
kubernetes/base/code-of-conduct.md
Normal file
3
kubernetes/base/code-of-conduct.md
Normal file
|
@ -0,0 +1,3 @@
|
|||
# Kubernetes Community Code of Conduct
|
||||
|
||||
Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)
|
49
kubernetes/base/config/__init__.py
Normal file
49
kubernetes/base/config/__init__.py
Normal file
|
@ -0,0 +1,49 @@
|
|||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from os.path import exists, expanduser
|
||||
|
||||
from .config_exception import ConfigException
|
||||
from .incluster_config import load_incluster_config
|
||||
from .kube_config import (KUBE_CONFIG_DEFAULT_LOCATION,
|
||||
list_kube_config_contexts, load_kube_config,
|
||||
load_kube_config_from_dict, new_client_from_config, new_client_from_config_dict)
|
||||
|
||||
|
||||
def load_config(**kwargs):
|
||||
"""
|
||||
Wrapper function to load the kube_config.
|
||||
It will initially try to load_kube_config from provided path,
|
||||
then check if the KUBE_CONFIG_DEFAULT_LOCATION exists
|
||||
If neither exists, it will fall back to load_incluster_config
|
||||
and inform the user accordingly.
|
||||
|
||||
:param kwargs: A combination of all possible kwargs that
|
||||
can be passed to either load_kube_config or
|
||||
load_incluster_config functions.
|
||||
"""
|
||||
if "config_file" in kwargs.keys():
|
||||
load_kube_config(**kwargs)
|
||||
elif "kube_config_path" in kwargs.keys():
|
||||
kwargs["config_file"] = kwargs.pop("kube_config_path", None)
|
||||
load_kube_config(**kwargs)
|
||||
elif exists(expanduser(KUBE_CONFIG_DEFAULT_LOCATION)):
|
||||
load_kube_config(**kwargs)
|
||||
else:
|
||||
print(
|
||||
"kube_config_path not provided and "
|
||||
"default location ({0}) does not exist. "
|
||||
"Using inCluster Config. "
|
||||
"This might not work.".format(KUBE_CONFIG_DEFAULT_LOCATION))
|
||||
load_incluster_config(**kwargs)
|
17
kubernetes/base/config/config_exception.py
Normal file
17
kubernetes/base/config/config_exception.py
Normal file
|
@ -0,0 +1,17 @@
|
|||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
class ConfigException(Exception):
|
||||
pass
|
84
kubernetes/base/config/dateutil.py
Normal file
84
kubernetes/base/config/dateutil.py
Normal file
|
@ -0,0 +1,84 @@
|
|||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import datetime
|
||||
import math
|
||||
import re
|
||||
|
||||
|
||||
class TimezoneInfo(datetime.tzinfo):
|
||||
def __init__(self, h, m):
|
||||
self._name = "UTC"
|
||||
if h != 0 and m != 0:
|
||||
self._name += "%+03d:%2d" % (h, m)
|
||||
self._delta = datetime.timedelta(hours=h, minutes=math.copysign(m, h))
|
||||
|
||||
def utcoffset(self, dt):
|
||||
return self._delta
|
||||
|
||||
def tzname(self, dt):
|
||||
return self._name
|
||||
|
||||
def dst(self, dt):
|
||||
return datetime.timedelta(0)
|
||||
|
||||
|
||||
UTC = TimezoneInfo(0, 0)
|
||||
|
||||
# ref https://www.ietf.org/rfc/rfc3339.txt
|
||||
_re_rfc3339 = re.compile(r"(\d\d\d\d)-(\d\d)-(\d\d)" # full-date
|
||||
r"[ Tt]" # Separator
|
||||
r"(\d\d):(\d\d):(\d\d)([.,]\d+)?" # partial-time
|
||||
r"([zZ ]|[-+]\d\d?:\d\d)?", # time-offset
|
||||
re.VERBOSE + re.IGNORECASE)
|
||||
_re_timezone = re.compile(r"([-+])(\d\d?):?(\d\d)?")
|
||||
|
||||
MICROSEC_PER_SEC = 1000000
|
||||
|
||||
|
||||
def parse_rfc3339(s):
|
||||
if isinstance(s, datetime.datetime):
|
||||
# no need to parse it, just make sure it has a timezone.
|
||||
if not s.tzinfo:
|
||||
return s.replace(tzinfo=UTC)
|
||||
return s
|
||||
groups = _re_rfc3339.search(s).groups()
|
||||
dt = [0] * 7
|
||||
for x in range(6):
|
||||
dt[x] = int(groups[x])
|
||||
us = 0
|
||||
if groups[6] is not None:
|
||||
partial_sec = float(groups[6].replace(",", "."))
|
||||
us = int(MICROSEC_PER_SEC * partial_sec)
|
||||
tz = UTC
|
||||
if groups[7] is not None and groups[7] != 'Z' and groups[7] != 'z':
|
||||
tz_groups = _re_timezone.search(groups[7]).groups()
|
||||
hour = int(tz_groups[1])
|
||||
minute = 0
|
||||
if tz_groups[0] == "-":
|
||||
hour *= -1
|
||||
if tz_groups[2]:
|
||||
minute = int(tz_groups[2])
|
||||
tz = TimezoneInfo(hour, minute)
|
||||
return datetime.datetime(
|
||||
year=dt[0], month=dt[1], day=dt[2],
|
||||
hour=dt[3], minute=dt[4], second=dt[5],
|
||||
microsecond=us, tzinfo=tz)
|
||||
|
||||
|
||||
def format_rfc3339(date_time):
|
||||
if date_time.tzinfo is None:
|
||||
date_time = date_time.replace(tzinfo=UTC)
|
||||
date_time = date_time.astimezone(UTC)
|
||||
return date_time.strftime('%Y-%m-%dT%H:%M:%SZ')
|
68
kubernetes/base/config/dateutil_test.py
Normal file
68
kubernetes/base/config/dateutil_test.py
Normal file
|
@ -0,0 +1,68 @@
|
|||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
from datetime import datetime
|
||||
|
||||
from .dateutil import UTC, TimezoneInfo, format_rfc3339, parse_rfc3339
|
||||
|
||||
|
||||
class DateUtilTest(unittest.TestCase):
|
||||
|
||||
def _parse_rfc3339_test(self, st, y, m, d, h, mn, s, us):
|
||||
actual = parse_rfc3339(st)
|
||||
expected = datetime(y, m, d, h, mn, s, us, UTC)
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
def test_parse_rfc3339(self):
|
||||
self._parse_rfc3339_test("2017-07-25T04:44:21Z",
|
||||
2017, 7, 25, 4, 44, 21, 0)
|
||||
self._parse_rfc3339_test("2017-07-25 04:44:21Z",
|
||||
2017, 7, 25, 4, 44, 21, 0)
|
||||
self._parse_rfc3339_test("2017-07-25T04:44:21",
|
||||
2017, 7, 25, 4, 44, 21, 0)
|
||||
self._parse_rfc3339_test("2017-07-25T04:44:21z",
|
||||
2017, 7, 25, 4, 44, 21, 0)
|
||||
self._parse_rfc3339_test("2017-07-25T04:44:21+03:00",
|
||||
2017, 7, 25, 1, 44, 21, 0)
|
||||
self._parse_rfc3339_test("2017-07-25T04:44:21-03:00",
|
||||
2017, 7, 25, 7, 44, 21, 0)
|
||||
|
||||
self._parse_rfc3339_test("2017-07-25T04:44:21,005Z",
|
||||
2017, 7, 25, 4, 44, 21, 5000)
|
||||
self._parse_rfc3339_test("2017-07-25T04:44:21.005Z",
|
||||
2017, 7, 25, 4, 44, 21, 5000)
|
||||
self._parse_rfc3339_test("2017-07-25 04:44:21.0050Z",
|
||||
2017, 7, 25, 4, 44, 21, 5000)
|
||||
self._parse_rfc3339_test("2017-07-25T04:44:21.5",
|
||||
2017, 7, 25, 4, 44, 21, 500000)
|
||||
self._parse_rfc3339_test("2017-07-25T04:44:21.005z",
|
||||
2017, 7, 25, 4, 44, 21, 5000)
|
||||
self._parse_rfc3339_test("2017-07-25T04:44:21.005+03:00",
|
||||
2017, 7, 25, 1, 44, 21, 5000)
|
||||
self._parse_rfc3339_test("2017-07-25T04:44:21.005-03:00",
|
||||
2017, 7, 25, 7, 44, 21, 5000)
|
||||
|
||||
def test_format_rfc3339(self):
|
||||
self.assertEqual(
|
||||
format_rfc3339(datetime(2017, 7, 25, 4, 44, 21, 0, UTC)),
|
||||
"2017-07-25T04:44:21Z")
|
||||
self.assertEqual(
|
||||
format_rfc3339(datetime(2017, 7, 25, 4, 44, 21, 0,
|
||||
TimezoneInfo(2, 0))),
|
||||
"2017-07-25T02:44:21Z")
|
||||
self.assertEqual(
|
||||
format_rfc3339(datetime(2017, 7, 25, 4, 44, 21, 0,
|
||||
TimezoneInfo(-2, 30))),
|
||||
"2017-07-25T07:14:21Z")
|
107
kubernetes/base/config/exec_provider.py
Normal file
107
kubernetes/base/config/exec_provider.py
Normal file
|
@ -0,0 +1,107 @@
|
|||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from .config_exception import ConfigException
|
||||
|
||||
|
||||
class ExecProvider(object):
|
||||
"""
|
||||
Implementation of the proposal for out-of-tree client
|
||||
authentication providers as described here --
|
||||
https://github.com/kubernetes/community/blob/master/contributors/design-proposals/auth/kubectl-exec-plugins.md
|
||||
|
||||
Missing from implementation:
|
||||
|
||||
* TLS cert support
|
||||
* caching
|
||||
"""
|
||||
|
||||
def __init__(self, exec_config, cwd, cluster=None):
|
||||
"""
|
||||
exec_config must be of type ConfigNode because we depend on
|
||||
safe_get(self, key) to correctly handle optional exec provider
|
||||
config parameters.
|
||||
"""
|
||||
for key in ['command', 'apiVersion']:
|
||||
if key not in exec_config:
|
||||
raise ConfigException(
|
||||
'exec: malformed request. missing key \'%s\'' % key)
|
||||
self.api_version = exec_config['apiVersion']
|
||||
self.args = [exec_config['command']]
|
||||
if exec_config.safe_get('args'):
|
||||
self.args.extend(exec_config['args'])
|
||||
self.env = os.environ.copy()
|
||||
if exec_config.safe_get('env'):
|
||||
additional_vars = {}
|
||||
for item in exec_config['env']:
|
||||
name = item['name']
|
||||
value = item['value']
|
||||
additional_vars[name] = value
|
||||
self.env.update(additional_vars)
|
||||
if exec_config.safe_get('provideClusterInfo'):
|
||||
self.cluster = cluster
|
||||
else:
|
||||
self.cluster = None
|
||||
self.cwd = cwd or None
|
||||
|
||||
def run(self, previous_response=None):
|
||||
is_interactive = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
|
||||
kubernetes_exec_info = {
|
||||
'apiVersion': self.api_version,
|
||||
'kind': 'ExecCredential',
|
||||
'spec': {
|
||||
'interactive': is_interactive
|
||||
}
|
||||
}
|
||||
if previous_response:
|
||||
kubernetes_exec_info['spec']['response'] = previous_response
|
||||
if self.cluster:
|
||||
kubernetes_exec_info['spec']['cluster'] = self.cluster
|
||||
|
||||
self.env['KUBERNETES_EXEC_INFO'] = json.dumps(kubernetes_exec_info)
|
||||
process = subprocess.Popen(
|
||||
self.args,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=sys.stderr if is_interactive else subprocess.PIPE,
|
||||
stdin=sys.stdin if is_interactive else None,
|
||||
cwd=self.cwd,
|
||||
env=self.env,
|
||||
universal_newlines=True,
|
||||
shell=True)
|
||||
(stdout, stderr) = process.communicate()
|
||||
exit_code = process.wait()
|
||||
if exit_code != 0:
|
||||
msg = 'exec: process returned %d' % exit_code
|
||||
stderr = stderr.strip()
|
||||
if stderr:
|
||||
msg += '. %s' % stderr
|
||||
raise ConfigException(msg)
|
||||
try:
|
||||
data = json.loads(stdout)
|
||||
except ValueError as de:
|
||||
raise ConfigException(
|
||||
'exec: failed to decode process output: %s' % de)
|
||||
for key in ('apiVersion', 'kind', 'status'):
|
||||
if key not in data:
|
||||
raise ConfigException(
|
||||
'exec: malformed response. missing key \'%s\'' % key)
|
||||
if data['apiVersion'] != self.api_version:
|
||||
raise ConfigException(
|
||||
'exec: plugin api version %s does not match %s' %
|
||||
(data['apiVersion'], self.api_version))
|
||||
return data['status']
|
188
kubernetes/base/config/exec_provider_test.py
Normal file
188
kubernetes/base/config/exec_provider_test.py
Normal file
|
@ -0,0 +1,188 @@
|
|||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import os
|
||||
import unittest
|
||||
|
||||
from unittest import mock
|
||||
|
||||
from .config_exception import ConfigException
|
||||
from .exec_provider import ExecProvider
|
||||
from .kube_config import ConfigNode
|
||||
|
||||
|
||||
class ExecProviderTest(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.input_ok = ConfigNode('test', {
|
||||
'command': 'aws-iam-authenticator',
|
||||
'args': ['token', '-i', 'dummy'],
|
||||
'apiVersion': 'client.authentication.k8s.io/v1beta1',
|
||||
'env': None
|
||||
})
|
||||
self.input_with_cluster = ConfigNode('test', {
|
||||
'command': 'aws-iam-authenticator',
|
||||
'args': ['token', '-i', 'dummy'],
|
||||
'apiVersion': 'client.authentication.k8s.io/v1beta1',
|
||||
'provideClusterInfo': True,
|
||||
'env': None
|
||||
})
|
||||
self.output_ok = """
|
||||
{
|
||||
"apiVersion": "client.authentication.k8s.io/v1beta1",
|
||||
"kind": "ExecCredential",
|
||||
"status": {
|
||||
"token": "dummy"
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
def test_missing_input_keys(self):
|
||||
exec_configs = [ConfigNode('test1', {}),
|
||||
ConfigNode('test2', {'command': ''}),
|
||||
ConfigNode('test3', {'apiVersion': ''})]
|
||||
for exec_config in exec_configs:
|
||||
with self.assertRaises(ConfigException) as context:
|
||||
ExecProvider(exec_config, None)
|
||||
self.assertIn('exec: malformed request. missing key',
|
||||
context.exception.args[0])
|
||||
|
||||
@mock.patch('subprocess.Popen')
|
||||
def test_error_code_returned(self, mock):
|
||||
instance = mock.return_value
|
||||
instance.wait.return_value = 1
|
||||
instance.communicate.return_value = ('', '')
|
||||
with self.assertRaises(ConfigException) as context:
|
||||
ep = ExecProvider(self.input_ok, None)
|
||||
ep.run()
|
||||
self.assertIn('exec: process returned %d' %
|
||||
instance.wait.return_value, context.exception.args[0])
|
||||
|
||||
@mock.patch('subprocess.Popen')
|
||||
def test_nonjson_output_returned(self, mock):
|
||||
instance = mock.return_value
|
||||
instance.wait.return_value = 0
|
||||
instance.communicate.return_value = ('', '')
|
||||
with self.assertRaises(ConfigException) as context:
|
||||
ep = ExecProvider(self.input_ok, None)
|
||||
ep.run()
|
||||
self.assertIn('exec: failed to decode process output',
|
||||
context.exception.args[0])
|
||||
|
||||
@mock.patch('subprocess.Popen')
|
||||
def test_missing_output_keys(self, mock):
|
||||
instance = mock.return_value
|
||||
instance.wait.return_value = 0
|
||||
outputs = [
|
||||
"""
|
||||
{
|
||||
"kind": "ExecCredential",
|
||||
"status": {
|
||||
"token": "dummy"
|
||||
}
|
||||
}
|
||||
""", """
|
||||
{
|
||||
"apiVersion": "client.authentication.k8s.io/v1beta1",
|
||||
"status": {
|
||||
"token": "dummy"
|
||||
}
|
||||
}
|
||||
""", """
|
||||
{
|
||||
"apiVersion": "client.authentication.k8s.io/v1beta1",
|
||||
"kind": "ExecCredential"
|
||||
}
|
||||
"""
|
||||
]
|
||||
for output in outputs:
|
||||
instance.communicate.return_value = (output, '')
|
||||
with self.assertRaises(ConfigException) as context:
|
||||
ep = ExecProvider(self.input_ok, None)
|
||||
ep.run()
|
||||
self.assertIn('exec: malformed response. missing key',
|
||||
context.exception.args[0])
|
||||
|
||||
@mock.patch('subprocess.Popen')
|
||||
def test_mismatched_api_version(self, mock):
|
||||
instance = mock.return_value
|
||||
instance.wait.return_value = 0
|
||||
wrong_api_version = 'client.authentication.k8s.io/v1'
|
||||
output = """
|
||||
{
|
||||
"apiVersion": "%s",
|
||||
"kind": "ExecCredential",
|
||||
"status": {
|
||||
"token": "dummy"
|
||||
}
|
||||
}
|
||||
""" % wrong_api_version
|
||||
instance.communicate.return_value = (output, '')
|
||||
with self.assertRaises(ConfigException) as context:
|
||||
ep = ExecProvider(self.input_ok, None)
|
||||
ep.run()
|
||||
self.assertIn(
|
||||
'exec: plugin api version %s does not match' %
|
||||
wrong_api_version,
|
||||
context.exception.args[0])
|
||||
|
||||
@mock.patch('subprocess.Popen')
|
||||
def test_ok_01(self, mock):
|
||||
instance = mock.return_value
|
||||
instance.wait.return_value = 0
|
||||
instance.communicate.return_value = (self.output_ok, '')
|
||||
ep = ExecProvider(self.input_ok, None)
|
||||
result = ep.run()
|
||||
self.assertTrue(isinstance(result, dict))
|
||||
self.assertTrue('token' in result)
|
||||
|
||||
@mock.patch('subprocess.Popen')
|
||||
def test_run_in_dir(self, mock):
|
||||
instance = mock.return_value
|
||||
instance.wait.return_value = 0
|
||||
instance.communicate.return_value = (self.output_ok, '')
|
||||
ep = ExecProvider(self.input_ok, '/some/directory')
|
||||
ep.run()
|
||||
self.assertEqual(mock.call_args[1]['cwd'], '/some/directory')
|
||||
|
||||
@mock.patch('subprocess.Popen')
|
||||
def test_ok_no_console_attached(self, mock):
|
||||
instance = mock.return_value
|
||||
instance.wait.return_value = 0
|
||||
instance.communicate.return_value = (self.output_ok, '')
|
||||
mock_stdout = unittest.mock.patch(
|
||||
'sys.stdout', new=None) # Simulate detached console
|
||||
with mock_stdout:
|
||||
ep = ExecProvider(self.input_ok, None)
|
||||
result = ep.run()
|
||||
self.assertTrue(isinstance(result, dict))
|
||||
self.assertTrue('token' in result)
|
||||
|
||||
@mock.patch('subprocess.Popen')
|
||||
def test_with_cluster_info(self, mock):
|
||||
instance = mock.return_value
|
||||
instance.wait.return_value = 0
|
||||
instance.communicate.return_value = (self.output_ok, '')
|
||||
ep = ExecProvider(self.input_with_cluster, None, {'server': 'name.company.com'})
|
||||
result = ep.run()
|
||||
self.assertTrue(isinstance(result, dict))
|
||||
self.assertTrue('token' in result)
|
||||
|
||||
obj = json.loads(mock.call_args.kwargs['env']['KUBERNETES_EXEC_INFO'])
|
||||
self.assertEqual(obj['spec']['cluster']['server'], 'name.company.com')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
121
kubernetes/base/config/incluster_config.py
Normal file
121
kubernetes/base/config/incluster_config.py
Normal file
|
@ -0,0 +1,121 @@
|
|||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import datetime
|
||||
import os
|
||||
|
||||
from kubernetes.client import Configuration
|
||||
|
||||
from .config_exception import ConfigException
|
||||
|
||||
SERVICE_HOST_ENV_NAME = "KUBERNETES_SERVICE_HOST"
|
||||
SERVICE_PORT_ENV_NAME = "KUBERNETES_SERVICE_PORT"
|
||||
SERVICE_TOKEN_FILENAME = "/var/run/secrets/kubernetes.io/serviceaccount/token"
|
||||
SERVICE_CERT_FILENAME = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
|
||||
|
||||
|
||||
def _join_host_port(host, port):
|
||||
"""Adapted golang's net.JoinHostPort"""
|
||||
template = "%s:%s"
|
||||
host_requires_bracketing = ':' in host or '%' in host
|
||||
if host_requires_bracketing:
|
||||
template = "[%s]:%s"
|
||||
return template % (host, port)
|
||||
|
||||
|
||||
class InClusterConfigLoader(object):
|
||||
def __init__(self,
|
||||
token_filename,
|
||||
cert_filename,
|
||||
try_refresh_token=True,
|
||||
environ=os.environ):
|
||||
self._token_filename = token_filename
|
||||
self._cert_filename = cert_filename
|
||||
self._environ = environ
|
||||
self._try_refresh_token = try_refresh_token
|
||||
self._token_refresh_period = datetime.timedelta(minutes=1)
|
||||
|
||||
def load_and_set(self, client_configuration=None):
|
||||
try_set_default = False
|
||||
if client_configuration is None:
|
||||
client_configuration = type.__call__(Configuration)
|
||||
try_set_default = True
|
||||
self._load_config()
|
||||
self._set_config(client_configuration)
|
||||
if try_set_default:
|
||||
Configuration.set_default(client_configuration)
|
||||
|
||||
def _load_config(self):
|
||||
if (SERVICE_HOST_ENV_NAME not in self._environ
|
||||
or SERVICE_PORT_ENV_NAME not in self._environ):
|
||||
raise ConfigException("Service host/port is not set.")
|
||||
|
||||
if (not self._environ[SERVICE_HOST_ENV_NAME]
|
||||
or not self._environ[SERVICE_PORT_ENV_NAME]):
|
||||
raise ConfigException("Service host/port is set but empty.")
|
||||
|
||||
self.host = ("https://" +
|
||||
_join_host_port(self._environ[SERVICE_HOST_ENV_NAME],
|
||||
self._environ[SERVICE_PORT_ENV_NAME]))
|
||||
|
||||
if not os.path.isfile(self._token_filename):
|
||||
raise ConfigException("Service token file does not exist.")
|
||||
|
||||
self._read_token_file()
|
||||
|
||||
if not os.path.isfile(self._cert_filename):
|
||||
raise ConfigException(
|
||||
"Service certification file does not exist.")
|
||||
|
||||
with open(self._cert_filename) as f:
|
||||
if not f.read():
|
||||
raise ConfigException("Cert file exists but empty.")
|
||||
|
||||
self.ssl_ca_cert = self._cert_filename
|
||||
|
||||
def _set_config(self, client_configuration):
|
||||
client_configuration.host = self.host
|
||||
client_configuration.ssl_ca_cert = self.ssl_ca_cert
|
||||
if self.token is not None:
|
||||
client_configuration.api_key['authorization'] = self.token
|
||||
if not self._try_refresh_token:
|
||||
return
|
||||
|
||||
def _refresh_api_key(client_configuration):
|
||||
if self.token_expires_at <= datetime.datetime.now():
|
||||
self._read_token_file()
|
||||
self._set_config(client_configuration)
|
||||
|
||||
client_configuration.refresh_api_key_hook = _refresh_api_key
|
||||
|
||||
def _read_token_file(self):
|
||||
with open(self._token_filename) as f:
|
||||
content = f.read()
|
||||
if not content:
|
||||
raise ConfigException("Token file exists but empty.")
|
||||
self.token = "bearer " + content
|
||||
self.token_expires_at = datetime.datetime.now(
|
||||
) + self._token_refresh_period
|
||||
|
||||
|
||||
def load_incluster_config(client_configuration=None, try_refresh_token=True):
|
||||
"""
|
||||
Use the service account kubernetes gives to pods to connect to kubernetes
|
||||
cluster. It's intended for clients that expect to be running inside a pod
|
||||
running on kubernetes. It will raise an exception if called from a process
|
||||
not running in a kubernetes environment."""
|
||||
InClusterConfigLoader(
|
||||
token_filename=SERVICE_TOKEN_FILENAME,
|
||||
cert_filename=SERVICE_CERT_FILENAME,
|
||||
try_refresh_token=try_refresh_token).load_and_set(client_configuration)
|
163
kubernetes/base/config/incluster_config_test.py
Normal file
163
kubernetes/base/config/incluster_config_test.py
Normal file
|
@ -0,0 +1,163 @@
|
|||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import datetime
|
||||
import os
|
||||
import tempfile
|
||||
import time
|
||||
import unittest
|
||||
|
||||
from kubernetes.client import Configuration
|
||||
|
||||
from .config_exception import ConfigException
|
||||
from .incluster_config import (SERVICE_HOST_ENV_NAME, SERVICE_PORT_ENV_NAME,
|
||||
InClusterConfigLoader, _join_host_port)
|
||||
|
||||
_TEST_TOKEN = "temp_token"
|
||||
_TEST_NEW_TOKEN = "temp_new_token"
|
||||
_TEST_CERT = "temp_cert"
|
||||
_TEST_HOST = "127.0.0.1"
|
||||
_TEST_PORT = "80"
|
||||
_TEST_HOST_PORT = "127.0.0.1:80"
|
||||
_TEST_IPV6_HOST = "::1"
|
||||
_TEST_IPV6_HOST_PORT = "[::1]:80"
|
||||
|
||||
_TEST_ENVIRON = {
|
||||
SERVICE_HOST_ENV_NAME: _TEST_HOST,
|
||||
SERVICE_PORT_ENV_NAME: _TEST_PORT
|
||||
}
|
||||
_TEST_IPV6_ENVIRON = {
|
||||
SERVICE_HOST_ENV_NAME: _TEST_IPV6_HOST,
|
||||
SERVICE_PORT_ENV_NAME: _TEST_PORT
|
||||
}
|
||||
|
||||
|
||||
class InClusterConfigTest(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self._temp_files = []
|
||||
|
||||
def tearDown(self):
|
||||
for f in self._temp_files:
|
||||
os.remove(f)
|
||||
|
||||
def _create_file_with_temp_content(self, content=""):
|
||||
handler, name = tempfile.mkstemp()
|
||||
self._temp_files.append(name)
|
||||
os.write(handler, str.encode(content))
|
||||
os.close(handler)
|
||||
return name
|
||||
|
||||
def get_test_loader(self,
|
||||
token_filename=None,
|
||||
cert_filename=None,
|
||||
environ=_TEST_ENVIRON):
|
||||
if not token_filename:
|
||||
token_filename = self._create_file_with_temp_content(_TEST_TOKEN)
|
||||
if not cert_filename:
|
||||
cert_filename = self._create_file_with_temp_content(_TEST_CERT)
|
||||
return InClusterConfigLoader(token_filename=token_filename,
|
||||
cert_filename=cert_filename,
|
||||
try_refresh_token=True,
|
||||
environ=environ)
|
||||
|
||||
def test_join_host_port(self):
|
||||
self.assertEqual(_TEST_HOST_PORT,
|
||||
_join_host_port(_TEST_HOST, _TEST_PORT))
|
||||
self.assertEqual(_TEST_IPV6_HOST_PORT,
|
||||
_join_host_port(_TEST_IPV6_HOST, _TEST_PORT))
|
||||
|
||||
def test_load_config(self):
|
||||
cert_filename = self._create_file_with_temp_content(_TEST_CERT)
|
||||
loader = self.get_test_loader(cert_filename=cert_filename)
|
||||
loader._load_config()
|
||||
self.assertEqual("https://" + _TEST_HOST_PORT, loader.host)
|
||||
self.assertEqual(cert_filename, loader.ssl_ca_cert)
|
||||
self.assertEqual('bearer ' + _TEST_TOKEN, loader.token)
|
||||
|
||||
def test_refresh_token(self):
|
||||
loader = self.get_test_loader()
|
||||
config = Configuration()
|
||||
loader.load_and_set(config)
|
||||
|
||||
self.assertEqual('bearer ' + _TEST_TOKEN,
|
||||
config.get_api_key_with_prefix('authorization'))
|
||||
self.assertEqual('bearer ' + _TEST_TOKEN, loader.token)
|
||||
self.assertIsNotNone(loader.token_expires_at)
|
||||
|
||||
old_token = loader.token
|
||||
old_token_expires_at = loader.token_expires_at
|
||||
loader._token_filename = self._create_file_with_temp_content(
|
||||
_TEST_NEW_TOKEN)
|
||||
self.assertEqual('bearer ' + _TEST_TOKEN,
|
||||
config.get_api_key_with_prefix('authorization'))
|
||||
|
||||
loader.token_expires_at = datetime.datetime.now()
|
||||
self.assertEqual('bearer ' + _TEST_NEW_TOKEN,
|
||||
config.get_api_key_with_prefix('authorization'))
|
||||
self.assertEqual('bearer ' + _TEST_NEW_TOKEN, loader.token)
|
||||
self.assertGreater(loader.token_expires_at, old_token_expires_at)
|
||||
|
||||
def _should_fail_load(self, config_loader, reason):
|
||||
try:
|
||||
config_loader.load_and_set()
|
||||
self.fail("Should fail because %s" % reason)
|
||||
except ConfigException:
|
||||
# expected
|
||||
pass
|
||||
|
||||
def test_no_port(self):
|
||||
loader = self.get_test_loader(
|
||||
environ={SERVICE_HOST_ENV_NAME: _TEST_HOST})
|
||||
self._should_fail_load(loader, "no port specified")
|
||||
|
||||
def test_empty_port(self):
|
||||
loader = self.get_test_loader(environ={
|
||||
SERVICE_HOST_ENV_NAME: _TEST_HOST,
|
||||
SERVICE_PORT_ENV_NAME: ""
|
||||
})
|
||||
self._should_fail_load(loader, "empty port specified")
|
||||
|
||||
def test_no_host(self):
|
||||
loader = self.get_test_loader(
|
||||
environ={SERVICE_PORT_ENV_NAME: _TEST_PORT})
|
||||
self._should_fail_load(loader, "no host specified")
|
||||
|
||||
def test_empty_host(self):
|
||||
loader = self.get_test_loader(environ={
|
||||
SERVICE_HOST_ENV_NAME: "",
|
||||
SERVICE_PORT_ENV_NAME: _TEST_PORT
|
||||
})
|
||||
self._should_fail_load(loader, "empty host specified")
|
||||
|
||||
def test_no_cert_file(self):
|
||||
loader = self.get_test_loader(cert_filename="not_exists_file_1123")
|
||||
self._should_fail_load(loader, "cert file does not exist")
|
||||
|
||||
def test_empty_cert_file(self):
|
||||
loader = self.get_test_loader(
|
||||
cert_filename=self._create_file_with_temp_content())
|
||||
self._should_fail_load(loader, "empty cert file provided")
|
||||
|
||||
def test_no_token_file(self):
|
||||
loader = self.get_test_loader(token_filename="not_exists_file_1123")
|
||||
self._should_fail_load(loader, "token file does not exist")
|
||||
|
||||
def test_empty_token_file(self):
|
||||
loader = self.get_test_loader(
|
||||
token_filename=self._create_file_with_temp_content())
|
||||
self._should_fail_load(loader, "empty token file provided")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
901
kubernetes/base/config/kube_config.py
Normal file
901
kubernetes/base/config/kube_config.py
Normal file
|
@ -0,0 +1,901 @@
|
|||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import atexit
|
||||
import base64
|
||||
import copy
|
||||
import datetime
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import platform
|
||||
import subprocess
|
||||
import tempfile
|
||||
import time
|
||||
from collections import namedtuple
|
||||
|
||||
import google.auth
|
||||
import google.auth.transport.requests
|
||||
import oauthlib.oauth2
|
||||
import urllib3
|
||||
import yaml
|
||||
from requests_oauthlib import OAuth2Session
|
||||
from six import PY3
|
||||
|
||||
from kubernetes.client import ApiClient, Configuration
|
||||
from kubernetes.config.exec_provider import ExecProvider
|
||||
|
||||
from .config_exception import ConfigException
|
||||
from .dateutil import UTC, format_rfc3339, parse_rfc3339
|
||||
|
||||
try:
|
||||
import adal
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
EXPIRY_SKEW_PREVENTION_DELAY = datetime.timedelta(minutes=5)
|
||||
KUBE_CONFIG_DEFAULT_LOCATION = os.environ.get('KUBECONFIG', '~/.kube/config')
|
||||
ENV_KUBECONFIG_PATH_SEPARATOR = ';' if platform.system() == 'Windows' else ':'
|
||||
_temp_files = {}
|
||||
|
||||
|
||||
def _cleanup_temp_files():
|
||||
global _temp_files
|
||||
for temp_file in _temp_files.values():
|
||||
try:
|
||||
os.remove(temp_file)
|
||||
except OSError:
|
||||
pass
|
||||
_temp_files = {}
|
||||
|
||||
|
||||
def _create_temp_file_with_content(content, temp_file_path=None):
|
||||
if len(_temp_files) == 0:
|
||||
atexit.register(_cleanup_temp_files)
|
||||
# Because we may change context several times, try to remember files we
|
||||
# created and reuse them at a small memory cost.
|
||||
content_key = str(content)
|
||||
if content_key in _temp_files:
|
||||
return _temp_files[content_key]
|
||||
if temp_file_path and not os.path.isdir(temp_file_path):
|
||||
os.makedirs(name=temp_file_path)
|
||||
fd, name = tempfile.mkstemp(dir=temp_file_path)
|
||||
os.close(fd)
|
||||
_temp_files[content_key] = name
|
||||
with open(name, 'wb') as fd:
|
||||
fd.write(content.encode() if isinstance(content, str) else content)
|
||||
return name
|
||||
|
||||
|
||||
def _is_expired(expiry):
|
||||
return ((parse_rfc3339(expiry) - EXPIRY_SKEW_PREVENTION_DELAY) <=
|
||||
datetime.datetime.now(tz=UTC))
|
||||
|
||||
|
||||
class FileOrData(object):
|
||||
"""Utility class to read content of obj[%data_key_name] or file's
|
||||
content of obj[%file_key_name] and represent it as file or data.
|
||||
Note that the data is preferred. The obj[%file_key_name] will be used iff
|
||||
obj['%data_key_name'] is not set or empty. Assumption is file content is
|
||||
raw data and data field is base64 string. The assumption can be changed
|
||||
with base64_file_content flag. If set to False, the content of the file
|
||||
will assumed to be base64 and read as is. The default True value will
|
||||
result in base64 encode of the file content after read."""
|
||||
|
||||
def __init__(self, obj, file_key_name, data_key_name=None,
|
||||
file_base_path="", base64_file_content=True,
|
||||
temp_file_path=None):
|
||||
if not data_key_name:
|
||||
data_key_name = file_key_name + "-data"
|
||||
self._file = None
|
||||
self._data = None
|
||||
self._base64_file_content = base64_file_content
|
||||
self._temp_file_path = temp_file_path
|
||||
if not obj:
|
||||
return
|
||||
if data_key_name in obj:
|
||||
self._data = obj[data_key_name]
|
||||
elif file_key_name in obj:
|
||||
self._file = os.path.normpath(
|
||||
os.path.join(file_base_path, obj[file_key_name]))
|
||||
|
||||
def as_file(self):
|
||||
"""If obj[%data_key_name] exists, return name of a file with base64
|
||||
decoded obj[%data_key_name] content otherwise obj[%file_key_name]."""
|
||||
use_data_if_no_file = not self._file and self._data
|
||||
if use_data_if_no_file:
|
||||
if self._base64_file_content:
|
||||
if isinstance(self._data, str):
|
||||
content = self._data.encode()
|
||||
else:
|
||||
content = self._data
|
||||
self._file = _create_temp_file_with_content(
|
||||
base64.standard_b64decode(content), self._temp_file_path)
|
||||
else:
|
||||
self._file = _create_temp_file_with_content(
|
||||
self._data, self._temp_file_path)
|
||||
if self._file and not os.path.isfile(self._file):
|
||||
raise ConfigException("File does not exist: %s" % self._file)
|
||||
return self._file
|
||||
|
||||
def as_data(self):
|
||||
"""If obj[%data_key_name] exists, Return obj[%data_key_name] otherwise
|
||||
base64 encoded string of obj[%file_key_name] file content."""
|
||||
use_file_if_no_data = not self._data and self._file
|
||||
if use_file_if_no_data:
|
||||
with open(self._file) as f:
|
||||
if self._base64_file_content:
|
||||
self._data = bytes.decode(
|
||||
base64.standard_b64encode(str.encode(f.read())))
|
||||
else:
|
||||
self._data = f.read()
|
||||
return self._data
|
||||
|
||||
|
||||
class CommandTokenSource(object):
|
||||
def __init__(self, cmd, args, tokenKey, expiryKey):
|
||||
self._cmd = cmd
|
||||
self._args = args
|
||||
if not tokenKey:
|
||||
self._tokenKey = '{.access_token}'
|
||||
else:
|
||||
self._tokenKey = tokenKey
|
||||
if not expiryKey:
|
||||
self._expiryKey = '{.token_expiry}'
|
||||
else:
|
||||
self._expiryKey = expiryKey
|
||||
|
||||
def token(self):
|
||||
fullCmd = self._cmd + (" ") + " ".join(self._args)
|
||||
process = subprocess.Popen(
|
||||
[self._cmd] + self._args,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
universal_newlines=True)
|
||||
(stdout, stderr) = process.communicate()
|
||||
exit_code = process.wait()
|
||||
if exit_code != 0:
|
||||
msg = 'cmd-path: process returned %d' % exit_code
|
||||
msg += "\nCmd: %s" % fullCmd
|
||||
stderr = stderr.strip()
|
||||
if stderr:
|
||||
msg += '\nStderr: %s' % stderr
|
||||
raise ConfigException(msg)
|
||||
try:
|
||||
data = json.loads(stdout)
|
||||
except ValueError as de:
|
||||
raise ConfigException(
|
||||
'exec: failed to decode process output: %s' % de)
|
||||
A = namedtuple('A', ['token', 'expiry'])
|
||||
return A(
|
||||
token=data['credential']['access_token'],
|
||||
expiry=parse_rfc3339(data['credential']['token_expiry']))
|
||||
|
||||
|
||||
class KubeConfigLoader(object):
|
||||
|
||||
def __init__(self, config_dict, active_context=None,
|
||||
get_google_credentials=None,
|
||||
config_base_path="",
|
||||
config_persister=None,
|
||||
temp_file_path=None):
|
||||
|
||||
if config_dict is None:
|
||||
raise ConfigException(
|
||||
'Invalid kube-config. '
|
||||
'Expected config_dict to not be None.')
|
||||
elif isinstance(config_dict, ConfigNode):
|
||||
self._config = config_dict
|
||||
else:
|
||||
self._config = ConfigNode('kube-config', config_dict)
|
||||
|
||||
self._current_context = None
|
||||
self._user = None
|
||||
self._cluster = None
|
||||
self.set_active_context(active_context)
|
||||
self._config_base_path = config_base_path
|
||||
self._config_persister = config_persister
|
||||
self._temp_file_path = temp_file_path
|
||||
|
||||
def _refresh_credentials_with_cmd_path():
|
||||
config = self._user['auth-provider']['config']
|
||||
cmd = config['cmd-path']
|
||||
if len(cmd) == 0:
|
||||
raise ConfigException(
|
||||
'missing access token cmd '
|
||||
'(cmd-path is an empty string in your kubeconfig file)')
|
||||
if 'scopes' in config and config['scopes'] != "":
|
||||
raise ConfigException(
|
||||
'scopes can only be used '
|
||||
'when kubectl is using a gcp service account key')
|
||||
args = []
|
||||
if 'cmd-args' in config:
|
||||
args = config['cmd-args'].split()
|
||||
else:
|
||||
fields = config['cmd-path'].split()
|
||||
cmd = fields[0]
|
||||
args = fields[1:]
|
||||
|
||||
commandTokenSource = CommandTokenSource(
|
||||
cmd, args,
|
||||
config.safe_get('token-key'),
|
||||
config.safe_get('expiry-key'))
|
||||
return commandTokenSource.token()
|
||||
|
||||
def _refresh_credentials():
|
||||
# Refresh credentials using cmd-path
|
||||
if ('auth-provider' in self._user and
|
||||
'config' in self._user['auth-provider'] and
|
||||
'cmd-path' in self._user['auth-provider']['config']):
|
||||
return _refresh_credentials_with_cmd_path()
|
||||
|
||||
credentials, project_id = google.auth.default(scopes=[
|
||||
'https://www.googleapis.com/auth/cloud-platform',
|
||||
'https://www.googleapis.com/auth/userinfo.email'
|
||||
])
|
||||
request = google.auth.transport.requests.Request()
|
||||
credentials.refresh(request)
|
||||
return credentials
|
||||
|
||||
if get_google_credentials:
|
||||
self._get_google_credentials = get_google_credentials
|
||||
else:
|
||||
self._get_google_credentials = _refresh_credentials
|
||||
|
||||
def set_active_context(self, context_name=None):
|
||||
if context_name is None:
|
||||
context_name = self._config['current-context']
|
||||
self._current_context = self._config['contexts'].get_with_name(
|
||||
context_name)
|
||||
if (self._current_context['context'].safe_get('user') and
|
||||
self._config.safe_get('users')):
|
||||
user = self._config['users'].get_with_name(
|
||||
self._current_context['context']['user'], safe=True)
|
||||
if user:
|
||||
self._user = user['user']
|
||||
else:
|
||||
self._user = None
|
||||
else:
|
||||
self._user = None
|
||||
self._cluster = self._config['clusters'].get_with_name(
|
||||
self._current_context['context']['cluster'])['cluster']
|
||||
|
||||
def _load_authentication(self):
|
||||
"""Read authentication from kube-config user section if exists.
|
||||
|
||||
This function goes through various authentication methods in user
|
||||
section of kube-config and stops if it finds a valid authentication
|
||||
method. The order of authentication methods is:
|
||||
|
||||
1. auth-provider (gcp, azure, oidc)
|
||||
2. token field (point to a token file)
|
||||
3. exec provided plugin
|
||||
4. username/password
|
||||
"""
|
||||
if not self._user:
|
||||
return
|
||||
if self._load_auth_provider_token():
|
||||
return
|
||||
if self._load_user_token():
|
||||
return
|
||||
if self._load_from_exec_plugin():
|
||||
return
|
||||
self._load_user_pass_token()
|
||||
|
||||
def _load_auth_provider_token(self):
|
||||
if 'auth-provider' not in self._user:
|
||||
return
|
||||
provider = self._user['auth-provider']
|
||||
if 'name' not in provider:
|
||||
return
|
||||
if provider['name'] == 'gcp':
|
||||
return self._load_gcp_token(provider)
|
||||
if provider['name'] == 'azure':
|
||||
return self._load_azure_token(provider)
|
||||
if provider['name'] == 'oidc':
|
||||
return self._load_oid_token(provider)
|
||||
|
||||
def _azure_is_expired(self, provider):
|
||||
expires_on = provider['config']['expires-on']
|
||||
if expires_on.isdigit():
|
||||
return int(expires_on) < time.time()
|
||||
else:
|
||||
exp_time = time.strptime(expires_on, '%Y-%m-%d %H:%M:%S.%f')
|
||||
return exp_time < time.gmtime()
|
||||
|
||||
def _load_azure_token(self, provider):
|
||||
if 'config' not in provider:
|
||||
return
|
||||
if 'access-token' not in provider['config']:
|
||||
return
|
||||
if 'expires-on' in provider['config']:
|
||||
if self._azure_is_expired(provider):
|
||||
self._refresh_azure_token(provider['config'])
|
||||
self.token = 'Bearer %s' % provider['config']['access-token']
|
||||
return self.token
|
||||
|
||||
def _refresh_azure_token(self, config):
|
||||
if 'adal' not in globals():
|
||||
raise ImportError('refresh token error, adal library not imported')
|
||||
|
||||
tenant = config['tenant-id']
|
||||
authority = 'https://login.microsoftonline.com/{}'.format(tenant)
|
||||
context = adal.AuthenticationContext(
|
||||
authority, validate_authority=True, api_version='1.0'
|
||||
)
|
||||
refresh_token = config['refresh-token']
|
||||
client_id = config['client-id']
|
||||
apiserver_id = '00000002-0000-0000-c000-000000000000'
|
||||
try:
|
||||
apiserver_id = config['apiserver-id']
|
||||
except ConfigException:
|
||||
# We've already set a default above
|
||||
pass
|
||||
token_response = context.acquire_token_with_refresh_token(
|
||||
refresh_token, client_id, apiserver_id)
|
||||
|
||||
provider = self._user['auth-provider']['config']
|
||||
provider.value['access-token'] = token_response['accessToken']
|
||||
provider.value['expires-on'] = token_response['expiresOn']
|
||||
if self._config_persister:
|
||||
self._config_persister()
|
||||
|
||||
def _load_gcp_token(self, provider):
|
||||
if (('config' not in provider) or
|
||||
('access-token' not in provider['config']) or
|
||||
('expiry' in provider['config'] and
|
||||
_is_expired(provider['config']['expiry']))):
|
||||
# token is not available or expired, refresh it
|
||||
self._refresh_gcp_token()
|
||||
|
||||
self.token = "Bearer %s" % provider['config']['access-token']
|
||||
if 'expiry' in provider['config']:
|
||||
self.expiry = parse_rfc3339(provider['config']['expiry'])
|
||||
return self.token
|
||||
|
||||
def _refresh_gcp_token(self):
|
||||
if 'config' not in self._user['auth-provider']:
|
||||
self._user['auth-provider'].value['config'] = {}
|
||||
provider = self._user['auth-provider']['config']
|
||||
credentials = self._get_google_credentials()
|
||||
provider.value['access-token'] = credentials.token
|
||||
provider.value['expiry'] = format_rfc3339(credentials.expiry)
|
||||
if self._config_persister:
|
||||
self._config_persister()
|
||||
|
||||
def _load_oid_token(self, provider):
|
||||
if 'config' not in provider:
|
||||
return
|
||||
|
||||
reserved_characters = frozenset(["=", "+", "/"])
|
||||
token = provider['config']['id-token']
|
||||
|
||||
if any(char in token for char in reserved_characters):
|
||||
# Invalid jwt, as it contains url-unsafe chars
|
||||
return
|
||||
|
||||
parts = token.split('.')
|
||||
if len(parts) != 3: # Not a valid JWT
|
||||
return
|
||||
|
||||
padding = (4 - len(parts[1]) % 4) * '='
|
||||
if len(padding) == 3:
|
||||
# According to spec, 3 padding characters cannot occur
|
||||
# in a valid jwt
|
||||
# https://tools.ietf.org/html/rfc7515#appendix-C
|
||||
return
|
||||
|
||||
if PY3:
|
||||
jwt_attributes = json.loads(
|
||||
base64.urlsafe_b64decode(parts[1] + padding).decode('utf-8')
|
||||
)
|
||||
else:
|
||||
jwt_attributes = json.loads(
|
||||
base64.b64decode(parts[1] + padding)
|
||||
)
|
||||
|
||||
expire = jwt_attributes.get('exp')
|
||||
|
||||
if ((expire is not None) and
|
||||
(_is_expired(datetime.datetime.fromtimestamp(expire,
|
||||
tz=UTC)))):
|
||||
self._refresh_oidc(provider)
|
||||
|
||||
if self._config_persister:
|
||||
self._config_persister()
|
||||
|
||||
self.token = "Bearer %s" % provider['config']['id-token']
|
||||
|
||||
return self.token
|
||||
|
||||
def _refresh_oidc(self, provider):
|
||||
config = Configuration()
|
||||
|
||||
if 'idp-certificate-authority-data' in provider['config']:
|
||||
ca_cert = tempfile.NamedTemporaryFile(delete=True)
|
||||
|
||||
if PY3:
|
||||
cert = base64.b64decode(
|
||||
provider['config']['idp-certificate-authority-data']
|
||||
).decode('utf-8')
|
||||
else:
|
||||
cert = base64.b64decode(
|
||||
provider['config']['idp-certificate-authority-data'] + "=="
|
||||
)
|
||||
|
||||
with open(ca_cert.name, 'w') as fh:
|
||||
fh.write(cert)
|
||||
|
||||
config.ssl_ca_cert = ca_cert.name
|
||||
|
||||
elif 'idp-certificate-authority' in provider['config']:
|
||||
config.ssl_ca_cert = provider['config']['idp-certificate-authority']
|
||||
|
||||
else:
|
||||
config.verify_ssl = False
|
||||
|
||||
client = ApiClient(configuration=config)
|
||||
|
||||
response = client.request(
|
||||
method="GET",
|
||||
url="%s/.well-known/openid-configuration"
|
||||
% provider['config']['idp-issuer-url']
|
||||
)
|
||||
|
||||
if response.status != 200:
|
||||
return
|
||||
|
||||
response = json.loads(response.data)
|
||||
|
||||
request = OAuth2Session(
|
||||
client_id=provider['config']['client-id'],
|
||||
token=provider['config']['refresh-token'],
|
||||
auto_refresh_kwargs={
|
||||
'client_id': provider['config']['client-id'],
|
||||
'client_secret': provider['config']['client-secret']
|
||||
},
|
||||
auto_refresh_url=response['token_endpoint']
|
||||
)
|
||||
|
||||
try:
|
||||
refresh = request.refresh_token(
|
||||
token_url=response['token_endpoint'],
|
||||
refresh_token=provider['config']['refresh-token'],
|
||||
auth=(provider['config']['client-id'],
|
||||
provider['config']['client-secret']),
|
||||
verify=config.ssl_ca_cert if config.verify_ssl else None
|
||||
)
|
||||
except oauthlib.oauth2.rfc6749.errors.InvalidClientIdError:
|
||||
return
|
||||
|
||||
provider['config'].value['id-token'] = refresh['id_token']
|
||||
provider['config'].value['refresh-token'] = refresh['refresh_token']
|
||||
|
||||
def _load_from_exec_plugin(self):
|
||||
if 'exec' not in self._user:
|
||||
return
|
||||
try:
|
||||
base_path = self._get_base_path(self._cluster.path)
|
||||
status = ExecProvider(self._user['exec'], base_path, self._cluster).run()
|
||||
if 'token' in status:
|
||||
self.token = "Bearer %s" % status['token']
|
||||
elif 'clientCertificateData' in status:
|
||||
# https://kubernetes.io/docs/reference/access-authn-authz/authentication/#input-and-output-formats
|
||||
# Plugin has provided certificates instead of a token.
|
||||
if 'clientKeyData' not in status:
|
||||
logging.error('exec: missing clientKeyData field in '
|
||||
'plugin output')
|
||||
return None
|
||||
self.cert_file = FileOrData(
|
||||
status, None,
|
||||
data_key_name='clientCertificateData',
|
||||
file_base_path=base_path,
|
||||
base64_file_content=False,
|
||||
temp_file_path=self._temp_file_path).as_file()
|
||||
self.key_file = FileOrData(
|
||||
status, None,
|
||||
data_key_name='clientKeyData',
|
||||
file_base_path=base_path,
|
||||
base64_file_content=False,
|
||||
temp_file_path=self._temp_file_path).as_file()
|
||||
else:
|
||||
logging.error('exec: missing token or clientCertificateData '
|
||||
'field in plugin output')
|
||||
return None
|
||||
if 'expirationTimestamp' in status:
|
||||
self.expiry = parse_rfc3339(status['expirationTimestamp'])
|
||||
return True
|
||||
except Exception as e:
|
||||
logging.error(str(e))
|
||||
|
||||
def _load_user_token(self):
|
||||
base_path = self._get_base_path(self._user.path)
|
||||
token = FileOrData(
|
||||
self._user, 'tokenFile', 'token',
|
||||
file_base_path=base_path,
|
||||
base64_file_content=False,
|
||||
temp_file_path=self._temp_file_path).as_data()
|
||||
if token:
|
||||
self.token = "Bearer %s" % token
|
||||
return True
|
||||
|
||||
def _load_user_pass_token(self):
|
||||
if 'username' in self._user and 'password' in self._user:
|
||||
self.token = urllib3.util.make_headers(
|
||||
basic_auth=(self._user['username'] + ':' +
|
||||
self._user['password'])).get('authorization')
|
||||
return True
|
||||
|
||||
def _get_base_path(self, config_path):
|
||||
if self._config_base_path is not None:
|
||||
return self._config_base_path
|
||||
if config_path is not None:
|
||||
return os.path.abspath(os.path.dirname(config_path))
|
||||
return ""
|
||||
|
||||
def _load_cluster_info(self):
|
||||
if 'server' in self._cluster:
|
||||
self.host = self._cluster['server'].rstrip('/')
|
||||
if self.host.startswith("https"):
|
||||
base_path = self._get_base_path(self._cluster.path)
|
||||
self.ssl_ca_cert = FileOrData(
|
||||
self._cluster, 'certificate-authority',
|
||||
file_base_path=base_path,
|
||||
temp_file_path=self._temp_file_path).as_file()
|
||||
if 'cert_file' not in self.__dict__:
|
||||
# cert_file could have been provided by
|
||||
# _load_from_exec_plugin; only load from the _user
|
||||
# section if we need it.
|
||||
self.cert_file = FileOrData(
|
||||
self._user, 'client-certificate',
|
||||
file_base_path=base_path,
|
||||
temp_file_path=self._temp_file_path).as_file()
|
||||
self.key_file = FileOrData(
|
||||
self._user, 'client-key',
|
||||
file_base_path=base_path,
|
||||
temp_file_path=self._temp_file_path).as_file()
|
||||
if 'insecure-skip-tls-verify' in self._cluster:
|
||||
self.verify_ssl = not self._cluster['insecure-skip-tls-verify']
|
||||
if 'tls-server-name' in self._cluster:
|
||||
self.tls_server_name = self._cluster['tls-server-name']
|
||||
|
||||
def _set_config(self, client_configuration):
|
||||
if 'token' in self.__dict__:
|
||||
client_configuration.api_key['authorization'] = self.token
|
||||
|
||||
def _refresh_api_key(client_configuration):
|
||||
if ('expiry' in self.__dict__ and _is_expired(self.expiry)):
|
||||
self._load_authentication()
|
||||
self._set_config(client_configuration)
|
||||
client_configuration.refresh_api_key_hook = _refresh_api_key
|
||||
# copy these keys directly from self to configuration object
|
||||
keys = ['host', 'ssl_ca_cert', 'cert_file', 'key_file', 'verify_ssl','tls_server_name']
|
||||
for key in keys:
|
||||
if key in self.__dict__:
|
||||
setattr(client_configuration, key, getattr(self, key))
|
||||
|
||||
def load_and_set(self, client_configuration):
|
||||
self._load_authentication()
|
||||
self._load_cluster_info()
|
||||
self._set_config(client_configuration)
|
||||
|
||||
def list_contexts(self):
|
||||
return [context.value for context in self._config['contexts']]
|
||||
|
||||
@property
|
||||
def current_context(self):
|
||||
return self._current_context.value
|
||||
|
||||
|
||||
class ConfigNode(object):
|
||||
"""Remembers each config key's path and construct a relevant exception
|
||||
message in case of missing keys. The assumption is all access keys are
|
||||
present in a well-formed kube-config."""
|
||||
|
||||
def __init__(self, name, value, path=None):
|
||||
self.name = name
|
||||
self.value = value
|
||||
self.path = path
|
||||
|
||||
def __contains__(self, key):
|
||||
return key in self.value
|
||||
|
||||
def __len__(self):
|
||||
return len(self.value)
|
||||
|
||||
def safe_get(self, key):
|
||||
if (isinstance(self.value, list) and isinstance(key, int) or
|
||||
key in self.value):
|
||||
return self.value[key]
|
||||
|
||||
def __getitem__(self, key):
|
||||
v = self.safe_get(key)
|
||||
if v is None:
|
||||
raise ConfigException(
|
||||
'Invalid kube-config file. Expected key %s in %s'
|
||||
% (key, self.name))
|
||||
if isinstance(v, dict) or isinstance(v, list):
|
||||
return ConfigNode('%s/%s' % (self.name, key), v, self.path)
|
||||
else:
|
||||
return v
|
||||
|
||||
def get_with_name(self, name, safe=False):
|
||||
if not isinstance(self.value, list):
|
||||
raise ConfigException(
|
||||
'Invalid kube-config file. Expected %s to be a list'
|
||||
% self.name)
|
||||
result = None
|
||||
for v in self.value:
|
||||
if 'name' not in v:
|
||||
raise ConfigException(
|
||||
'Invalid kube-config file. '
|
||||
'Expected all values in %s list to have \'name\' key'
|
||||
% self.name)
|
||||
if v['name'] == name:
|
||||
if result is None:
|
||||
result = v
|
||||
else:
|
||||
raise ConfigException(
|
||||
'Invalid kube-config file. '
|
||||
'Expected only one object with name %s in %s list'
|
||||
% (name, self.name))
|
||||
if result is not None:
|
||||
if isinstance(result, ConfigNode):
|
||||
return result
|
||||
else:
|
||||
return ConfigNode(
|
||||
'%s[name=%s]' %
|
||||
(self.name, name), result, self.path)
|
||||
if safe:
|
||||
return None
|
||||
raise ConfigException(
|
||||
'Invalid kube-config file. '
|
||||
'Expected object with name %s in %s list' % (name, self.name))
|
||||
|
||||
|
||||
class KubeConfigMerger:
|
||||
|
||||
"""Reads and merges configuration from one or more kube-config's.
|
||||
The property `config` can be passed to the KubeConfigLoader as config_dict.
|
||||
|
||||
It uses a path attribute from ConfigNode to store the path to kubeconfig.
|
||||
This path is required to load certs from relative paths.
|
||||
|
||||
A method `save_changes` updates changed kubeconfig's (it compares current
|
||||
state of dicts with).
|
||||
"""
|
||||
|
||||
def __init__(self, paths):
|
||||
self.paths = []
|
||||
self.config_files = {}
|
||||
self.config_merged = None
|
||||
if hasattr(paths, 'read'):
|
||||
self._load_config_from_file_like_object(paths)
|
||||
else:
|
||||
self._load_config_from_file_path(paths)
|
||||
|
||||
@property
|
||||
def config(self):
|
||||
return self.config_merged
|
||||
|
||||
def _load_config_from_file_like_object(self, string):
|
||||
if hasattr(string, 'getvalue'):
|
||||
config = yaml.safe_load(string.getvalue())
|
||||
else:
|
||||
config = yaml.safe_load(string.read())
|
||||
|
||||
if config is None:
|
||||
raise ConfigException(
|
||||
'Invalid kube-config.')
|
||||
if self.config_merged is None:
|
||||
self.config_merged = copy.deepcopy(config)
|
||||
# doesn't need to do any further merging
|
||||
|
||||
def _load_config_from_file_path(self, string):
|
||||
for path in string.split(ENV_KUBECONFIG_PATH_SEPARATOR):
|
||||
if path:
|
||||
path = os.path.expanduser(path)
|
||||
if os.path.exists(path):
|
||||
self.paths.append(path)
|
||||
self.load_config(path)
|
||||
self.config_saved = copy.deepcopy(self.config_files)
|
||||
|
||||
def load_config(self, path):
|
||||
with open(path) as f:
|
||||
config = yaml.safe_load(f)
|
||||
|
||||
if config is None:
|
||||
raise ConfigException(
|
||||
'Invalid kube-config. '
|
||||
'%s file is empty' % path)
|
||||
|
||||
if self.config_merged is None:
|
||||
config_merged = copy.deepcopy(config)
|
||||
for item in ('clusters', 'contexts', 'users'):
|
||||
config_merged[item] = []
|
||||
self.config_merged = ConfigNode(path, config_merged, path)
|
||||
for item in ('clusters', 'contexts', 'users'):
|
||||
self._merge(item, config.get(item, []) or [], path)
|
||||
|
||||
if 'current-context' in config:
|
||||
self.config_merged.value['current-context'] = config['current-context']
|
||||
|
||||
self.config_files[path] = config
|
||||
|
||||
def _merge(self, item, add_cfg, path):
|
||||
for new_item in add_cfg:
|
||||
for exists in self.config_merged.value[item]:
|
||||
if exists['name'] == new_item['name']:
|
||||
break
|
||||
else:
|
||||
self.config_merged.value[item].append(ConfigNode(
|
||||
'{}/{}'.format(path, new_item), new_item, path))
|
||||
|
||||
def save_changes(self):
|
||||
for path in self.paths:
|
||||
if self.config_saved[path] != self.config_files[path]:
|
||||
self.save_config(path)
|
||||
self.config_saved = copy.deepcopy(self.config_files)
|
||||
|
||||
def save_config(self, path):
|
||||
with open(path, 'w') as f:
|
||||
yaml.safe_dump(self.config_files[path], f,
|
||||
default_flow_style=False)
|
||||
|
||||
|
||||
def _get_kube_config_loader_for_yaml_file(
|
||||
filename, persist_config=False, **kwargs):
|
||||
return _get_kube_config_loader(
|
||||
filename=filename,
|
||||
persist_config=persist_config,
|
||||
**kwargs)
|
||||
|
||||
|
||||
def _get_kube_config_loader(
|
||||
filename=None,
|
||||
config_dict=None,
|
||||
persist_config=False,
|
||||
**kwargs):
|
||||
if config_dict is None:
|
||||
kcfg = KubeConfigMerger(filename)
|
||||
if persist_config and 'config_persister' not in kwargs:
|
||||
kwargs['config_persister'] = kcfg.save_changes
|
||||
|
||||
if kcfg.config is None:
|
||||
raise ConfigException(
|
||||
'Invalid kube-config file. '
|
||||
'No configuration found.')
|
||||
return KubeConfigLoader(
|
||||
config_dict=kcfg.config,
|
||||
config_base_path=None,
|
||||
**kwargs)
|
||||
else:
|
||||
return KubeConfigLoader(
|
||||
config_dict=config_dict,
|
||||
config_base_path=None,
|
||||
**kwargs)
|
||||
|
||||
|
||||
def list_kube_config_contexts(config_file=None):
|
||||
|
||||
if config_file is None:
|
||||
config_file = KUBE_CONFIG_DEFAULT_LOCATION
|
||||
|
||||
loader = _get_kube_config_loader(filename=config_file)
|
||||
return loader.list_contexts(), loader.current_context
|
||||
|
||||
|
||||
def load_kube_config(config_file=None, context=None,
|
||||
client_configuration=None,
|
||||
persist_config=True,
|
||||
temp_file_path=None):
|
||||
"""Loads authentication and cluster information from kube-config file
|
||||
and stores them in kubernetes.client.configuration.
|
||||
|
||||
:param config_file: Name of the kube-config file.
|
||||
:param context: set the active context. If is set to None, current_context
|
||||
from config file will be used.
|
||||
:param client_configuration: The kubernetes.client.Configuration to
|
||||
set configs to.
|
||||
:param persist_config: If True, config file will be updated when changed
|
||||
(e.g GCP token refresh).
|
||||
:param temp_file_path: store temp files path.
|
||||
"""
|
||||
|
||||
if config_file is None:
|
||||
config_file = KUBE_CONFIG_DEFAULT_LOCATION
|
||||
|
||||
loader = _get_kube_config_loader(
|
||||
filename=config_file, active_context=context,
|
||||
persist_config=persist_config,
|
||||
temp_file_path=temp_file_path)
|
||||
|
||||
if client_configuration is None:
|
||||
config = type.__call__(Configuration)
|
||||
loader.load_and_set(config)
|
||||
Configuration.set_default(config)
|
||||
else:
|
||||
loader.load_and_set(client_configuration)
|
||||
|
||||
|
||||
def load_kube_config_from_dict(config_dict, context=None,
|
||||
client_configuration=None,
|
||||
persist_config=True,
|
||||
temp_file_path=None):
|
||||
"""Loads authentication and cluster information from config_dict file
|
||||
and stores them in kubernetes.client.configuration.
|
||||
|
||||
:param config_dict: Takes the config file as a dict.
|
||||
:param context: set the active context. If is set to None, current_context
|
||||
from config file will be used.
|
||||
:param client_configuration: The kubernetes.client.Configuration to
|
||||
set configs to.
|
||||
:param persist_config: If True, config file will be updated when changed
|
||||
(e.g GCP token refresh).
|
||||
:param temp_file_path: store temp files path.
|
||||
"""
|
||||
if config_dict is None:
|
||||
raise ConfigException(
|
||||
'Invalid kube-config dict. '
|
||||
'No configuration found.')
|
||||
|
||||
loader = _get_kube_config_loader(
|
||||
config_dict=config_dict, active_context=context,
|
||||
persist_config=persist_config,
|
||||
temp_file_path=temp_file_path)
|
||||
|
||||
if client_configuration is None:
|
||||
config = type.__call__(Configuration)
|
||||
loader.load_and_set(config)
|
||||
Configuration.set_default(config)
|
||||
else:
|
||||
loader.load_and_set(client_configuration)
|
||||
|
||||
|
||||
def new_client_from_config(
|
||||
config_file=None,
|
||||
context=None,
|
||||
persist_config=True,
|
||||
client_configuration=None):
|
||||
"""
|
||||
Loads configuration the same as load_kube_config but returns an ApiClient
|
||||
to be used with any API object. This will allow the caller to concurrently
|
||||
talk with multiple clusters.
|
||||
"""
|
||||
if client_configuration is None:
|
||||
client_configuration = type.__call__(Configuration)
|
||||
load_kube_config(config_file=config_file, context=context,
|
||||
client_configuration=client_configuration,
|
||||
persist_config=persist_config)
|
||||
return ApiClient(configuration=client_configuration)
|
||||
|
||||
|
||||
def new_client_from_config_dict(
|
||||
config_dict=None,
|
||||
context=None,
|
||||
persist_config=True,
|
||||
temp_file_path=None,
|
||||
client_configuration=None):
|
||||
"""
|
||||
Loads configuration the same as load_kube_config_from_dict but returns an ApiClient
|
||||
to be used with any API object. This will allow the caller to concurrently
|
||||
talk with multiple clusters.
|
||||
"""
|
||||
if client_configuration is None:
|
||||
client_configuration = type.__call__(Configuration)
|
||||
load_kube_config_from_dict(config_dict=config_dict, context=context,
|
||||
client_configuration=client_configuration,
|
||||
persist_config=persist_config,
|
||||
temp_file_path=temp_file_path)
|
||||
return ApiClient(configuration=client_configuration)
|
1915
kubernetes/base/config/kube_config_test.py
Normal file
1915
kubernetes/base/config/kube_config_test.py
Normal file
File diff suppressed because it is too large
Load diff
15
kubernetes/base/dynamic/__init__.py
Normal file
15
kubernetes/base/dynamic/__init__.py
Normal file
|
@ -0,0 +1,15 @@
|
|||
# Copyright 2019 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from .client import * # NOQA
|
323
kubernetes/base/dynamic/client.py
Normal file
323
kubernetes/base/dynamic/client.py
Normal file
|
@ -0,0 +1,323 @@
|
|||
# Copyright 2019 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import six
|
||||
import json
|
||||
|
||||
from kubernetes import watch
|
||||
from kubernetes.client.rest import ApiException
|
||||
|
||||
from .discovery import EagerDiscoverer, LazyDiscoverer
|
||||
from .exceptions import api_exception, KubernetesValidateMissing
|
||||
from .resource import Resource, ResourceList, Subresource, ResourceInstance, ResourceField
|
||||
|
||||
try:
|
||||
import kubernetes_validate
|
||||
HAS_KUBERNETES_VALIDATE = True
|
||||
except ImportError:
|
||||
HAS_KUBERNETES_VALIDATE = False
|
||||
|
||||
try:
|
||||
from kubernetes_validate.utils import VersionNotSupportedError
|
||||
except ImportError:
|
||||
class VersionNotSupportedError(NotImplementedError):
|
||||
pass
|
||||
|
||||
__all__ = [
|
||||
'DynamicClient',
|
||||
'ResourceInstance',
|
||||
'Resource',
|
||||
'ResourceList',
|
||||
'Subresource',
|
||||
'EagerDiscoverer',
|
||||
'LazyDiscoverer',
|
||||
'ResourceField',
|
||||
]
|
||||
|
||||
|
||||
def meta_request(func):
|
||||
""" Handles parsing response structure and translating API Exceptions """
|
||||
def inner(self, *args, **kwargs):
|
||||
serialize_response = kwargs.pop('serialize', True)
|
||||
serializer = kwargs.pop('serializer', ResourceInstance)
|
||||
try:
|
||||
resp = func(self, *args, **kwargs)
|
||||
except ApiException as e:
|
||||
raise api_exception(e)
|
||||
if serialize_response:
|
||||
try:
|
||||
if six.PY2:
|
||||
return serializer(self, json.loads(resp.data))
|
||||
return serializer(self, json.loads(resp.data.decode('utf8')))
|
||||
except ValueError:
|
||||
if six.PY2:
|
||||
return resp.data
|
||||
return resp.data.decode('utf8')
|
||||
return resp
|
||||
|
||||
return inner
|
||||
|
||||
|
||||
class DynamicClient(object):
|
||||
""" A kubernetes client that dynamically discovers and interacts with
|
||||
the kubernetes API
|
||||
"""
|
||||
|
||||
def __init__(self, client, cache_file=None, discoverer=None):
|
||||
# Setting default here to delay evaluation of LazyDiscoverer class
|
||||
# until constructor is called
|
||||
discoverer = discoverer or LazyDiscoverer
|
||||
|
||||
self.client = client
|
||||
self.configuration = client.configuration
|
||||
self.__discoverer = discoverer(self, cache_file)
|
||||
|
||||
@property
|
||||
def resources(self):
|
||||
return self.__discoverer
|
||||
|
||||
@property
|
||||
def version(self):
|
||||
return self.__discoverer.version
|
||||
|
||||
def ensure_namespace(self, resource, namespace, body):
|
||||
namespace = namespace or body.get('metadata', {}).get('namespace')
|
||||
if not namespace:
|
||||
raise ValueError("Namespace is required for {}.{}".format(resource.group_version, resource.kind))
|
||||
return namespace
|
||||
|
||||
def serialize_body(self, body):
|
||||
"""Serialize body to raw dict so apiserver can handle it
|
||||
|
||||
:param body: kubernetes resource body, current support: Union[Dict, ResourceInstance]
|
||||
"""
|
||||
# This should match any `ResourceInstance` instances
|
||||
if callable(getattr(body, 'to_dict', None)):
|
||||
return body.to_dict()
|
||||
return body or {}
|
||||
|
||||
def get(self, resource, name=None, namespace=None, **kwargs):
|
||||
path = resource.path(name=name, namespace=namespace)
|
||||
return self.request('get', path, **kwargs)
|
||||
|
||||
def create(self, resource, body=None, namespace=None, **kwargs):
|
||||
body = self.serialize_body(body)
|
||||
if resource.namespaced:
|
||||
namespace = self.ensure_namespace(resource, namespace, body)
|
||||
path = resource.path(namespace=namespace)
|
||||
return self.request('post', path, body=body, **kwargs)
|
||||
|
||||
def delete(self, resource, name=None, namespace=None, body=None, label_selector=None, field_selector=None, **kwargs):
|
||||
if not (name or label_selector or field_selector):
|
||||
raise ValueError("At least one of name|label_selector|field_selector is required")
|
||||
if resource.namespaced and not (label_selector or field_selector or namespace):
|
||||
raise ValueError("At least one of namespace|label_selector|field_selector is required")
|
||||
path = resource.path(name=name, namespace=namespace)
|
||||
return self.request('delete', path, body=body, label_selector=label_selector, field_selector=field_selector, **kwargs)
|
||||
|
||||
def replace(self, resource, body=None, name=None, namespace=None, **kwargs):
|
||||
body = self.serialize_body(body)
|
||||
name = name or body.get('metadata', {}).get('name')
|
||||
if not name:
|
||||
raise ValueError("name is required to replace {}.{}".format(resource.group_version, resource.kind))
|
||||
if resource.namespaced:
|
||||
namespace = self.ensure_namespace(resource, namespace, body)
|
||||
path = resource.path(name=name, namespace=namespace)
|
||||
return self.request('put', path, body=body, **kwargs)
|
||||
|
||||
def patch(self, resource, body=None, name=None, namespace=None, **kwargs):
|
||||
body = self.serialize_body(body)
|
||||
name = name or body.get('metadata', {}).get('name')
|
||||
if not name:
|
||||
raise ValueError("name is required to patch {}.{}".format(resource.group_version, resource.kind))
|
||||
if resource.namespaced:
|
||||
namespace = self.ensure_namespace(resource, namespace, body)
|
||||
|
||||
content_type = kwargs.pop('content_type', 'application/strategic-merge-patch+json')
|
||||
path = resource.path(name=name, namespace=namespace)
|
||||
|
||||
return self.request('patch', path, body=body, content_type=content_type, **kwargs)
|
||||
|
||||
def server_side_apply(self, resource, body=None, name=None, namespace=None, force_conflicts=None, **kwargs):
|
||||
body = self.serialize_body(body)
|
||||
name = name or body.get('metadata', {}).get('name')
|
||||
if not name:
|
||||
raise ValueError("name is required to patch {}.{}".format(resource.group_version, resource.kind))
|
||||
if resource.namespaced:
|
||||
namespace = self.ensure_namespace(resource, namespace, body)
|
||||
|
||||
# force content type to 'application/apply-patch+yaml'
|
||||
kwargs.update({'content_type': 'application/apply-patch+yaml'})
|
||||
path = resource.path(name=name, namespace=namespace)
|
||||
|
||||
return self.request('patch', path, body=body, force_conflicts=force_conflicts, **kwargs)
|
||||
|
||||
def watch(self, resource, namespace=None, name=None, label_selector=None, field_selector=None, resource_version=None, timeout=None, watcher=None):
|
||||
"""
|
||||
Stream events for a resource from the Kubernetes API
|
||||
|
||||
:param resource: The API resource object that will be used to query the API
|
||||
:param namespace: The namespace to query
|
||||
:param name: The name of the resource instance to query
|
||||
:param label_selector: The label selector with which to filter results
|
||||
:param field_selector: The field selector with which to filter results
|
||||
:param resource_version: The version with which to filter results. Only events with
|
||||
a resource_version greater than this value will be returned
|
||||
:param timeout: The amount of time in seconds to wait before terminating the stream
|
||||
:param watcher: The Watcher object that will be used to stream the resource
|
||||
|
||||
:return: Event object with these keys:
|
||||
'type': The type of event such as "ADDED", "DELETED", etc.
|
||||
'raw_object': a dict representing the watched object.
|
||||
'object': A ResourceInstance wrapping raw_object.
|
||||
|
||||
Example:
|
||||
client = DynamicClient(k8s_client)
|
||||
watcher = watch.Watch()
|
||||
v1_pods = client.resources.get(api_version='v1', kind='Pod')
|
||||
|
||||
for e in v1_pods.watch(resource_version=0, namespace=default, timeout=5, watcher=watcher):
|
||||
print(e['type'])
|
||||
print(e['object'].metadata)
|
||||
# If you want to gracefully stop the stream watcher
|
||||
watcher.stop()
|
||||
"""
|
||||
if not watcher: watcher = watch.Watch()
|
||||
|
||||
# Use field selector to query for named instance so the watch parameter is handled properly.
|
||||
if name:
|
||||
field_selector = f"metadata.name={name}"
|
||||
|
||||
for event in watcher.stream(
|
||||
resource.get,
|
||||
namespace=namespace,
|
||||
field_selector=field_selector,
|
||||
label_selector=label_selector,
|
||||
resource_version=resource_version,
|
||||
serialize=False,
|
||||
timeout_seconds=timeout
|
||||
):
|
||||
event['object'] = ResourceInstance(resource, event['object'])
|
||||
yield event
|
||||
|
||||
@meta_request
|
||||
def request(self, method, path, body=None, **params):
|
||||
if not path.startswith('/'):
|
||||
path = '/' + path
|
||||
|
||||
path_params = params.get('path_params', {})
|
||||
query_params = params.get('query_params', [])
|
||||
if params.get('pretty') is not None:
|
||||
query_params.append(('pretty', params['pretty']))
|
||||
if params.get('_continue') is not None:
|
||||
query_params.append(('continue', params['_continue']))
|
||||
if params.get('include_uninitialized') is not None:
|
||||
query_params.append(('includeUninitialized', params['include_uninitialized']))
|
||||
if params.get('field_selector') is not None:
|
||||
query_params.append(('fieldSelector', params['field_selector']))
|
||||
if params.get('label_selector') is not None:
|
||||
query_params.append(('labelSelector', params['label_selector']))
|
||||
if params.get('limit') is not None:
|
||||
query_params.append(('limit', params['limit']))
|
||||
if params.get('resource_version') is not None:
|
||||
query_params.append(('resourceVersion', params['resource_version']))
|
||||
if params.get('timeout_seconds') is not None:
|
||||
query_params.append(('timeoutSeconds', params['timeout_seconds']))
|
||||
if params.get('watch') is not None:
|
||||
query_params.append(('watch', params['watch']))
|
||||
if params.get('grace_period_seconds') is not None:
|
||||
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
|
||||
if params.get('propagation_policy') is not None:
|
||||
query_params.append(('propagationPolicy', params['propagation_policy']))
|
||||
if params.get('orphan_dependents') is not None:
|
||||
query_params.append(('orphanDependents', params['orphan_dependents']))
|
||||
if params.get('dry_run') is not None:
|
||||
query_params.append(('dryRun', params['dry_run']))
|
||||
if params.get('field_manager') is not None:
|
||||
query_params.append(('fieldManager', params['field_manager']))
|
||||
if params.get('force_conflicts') is not None:
|
||||
query_params.append(('force', params['force_conflicts']))
|
||||
|
||||
header_params = params.get('header_params', {})
|
||||
form_params = []
|
||||
local_var_files = {}
|
||||
|
||||
# Checking Accept header.
|
||||
new_header_params = dict((key.lower(), value) for key, value in header_params.items())
|
||||
if not 'accept' in new_header_params:
|
||||
header_params['Accept'] = self.client.select_header_accept([
|
||||
'application/json',
|
||||
'application/yaml',
|
||||
])
|
||||
|
||||
# HTTP header `Content-Type`
|
||||
if params.get('content_type'):
|
||||
header_params['Content-Type'] = params['content_type']
|
||||
else:
|
||||
header_params['Content-Type'] = self.client.select_header_content_type(['*/*'])
|
||||
|
||||
# Authentication setting
|
||||
auth_settings = ['BearerToken']
|
||||
|
||||
api_response = self.client.call_api(
|
||||
path,
|
||||
method.upper(),
|
||||
path_params,
|
||||
query_params,
|
||||
header_params,
|
||||
body=body,
|
||||
post_params=form_params,
|
||||
async_req=params.get('async_req'),
|
||||
files=local_var_files,
|
||||
auth_settings=auth_settings,
|
||||
_preload_content=False,
|
||||
_return_http_data_only=params.get('_return_http_data_only', True),
|
||||
_request_timeout=params.get('_request_timeout')
|
||||
)
|
||||
if params.get('async_req'):
|
||||
return api_response.get()
|
||||
else:
|
||||
return api_response
|
||||
|
||||
def validate(self, definition, version=None, strict=False):
|
||||
"""validate checks a kubernetes resource definition
|
||||
|
||||
Args:
|
||||
definition (dict): resource definition
|
||||
version (str): version of kubernetes to validate against
|
||||
strict (bool): whether unexpected additional properties should be considered errors
|
||||
|
||||
Returns:
|
||||
warnings (list), errors (list): warnings are missing validations, errors are validation failures
|
||||
"""
|
||||
if not HAS_KUBERNETES_VALIDATE:
|
||||
raise KubernetesValidateMissing()
|
||||
|
||||
errors = list()
|
||||
warnings = list()
|
||||
try:
|
||||
if version is None:
|
||||
try:
|
||||
version = self.version['kubernetes']['gitVersion']
|
||||
except KeyError:
|
||||
version = kubernetes_validate.latest_version()
|
||||
kubernetes_validate.validate(definition, version, strict)
|
||||
except kubernetes_validate.utils.ValidationError as e:
|
||||
errors.append("resource definition validation error at %s: %s" % ('.'.join([str(item) for item in e.path]), e.message)) # noqa: B306
|
||||
except VersionNotSupportedError:
|
||||
errors.append("Kubernetes version %s is not supported by kubernetes-validate" % version)
|
||||
except kubernetes_validate.utils.SchemaNotFoundError as e:
|
||||
warnings.append("Could not find schema for object kind %s with API version %s in Kubernetes version %s (possibly Custom Resource?)" %
|
||||
(e.kind, e.api_version, e.version))
|
||||
return warnings, errors
|
433
kubernetes/base/dynamic/discovery.py
Normal file
433
kubernetes/base/dynamic/discovery.py
Normal file
|
@ -0,0 +1,433 @@
|
|||
# Copyright 2019 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import six
|
||||
import json
|
||||
import logging
|
||||
import hashlib
|
||||
import tempfile
|
||||
from functools import partial
|
||||
from collections import defaultdict
|
||||
from abc import abstractmethod, abstractproperty
|
||||
|
||||
from urllib3.exceptions import ProtocolError, MaxRetryError
|
||||
|
||||
from kubernetes import __version__
|
||||
from .exceptions import NotFoundError, ResourceNotFoundError, ResourceNotUniqueError, ApiException, ServiceUnavailableError
|
||||
from .resource import Resource, ResourceList
|
||||
|
||||
|
||||
DISCOVERY_PREFIX = 'apis'
|
||||
|
||||
|
||||
class Discoverer(object):
|
||||
"""
|
||||
A convenient container for storing discovered API resources. Allows
|
||||
easy searching and retrieval of specific resources.
|
||||
|
||||
Subclasses implement the abstract methods with different loading strategies.
|
||||
"""
|
||||
|
||||
def __init__(self, client, cache_file):
|
||||
self.client = client
|
||||
default_cache_id = self.client.configuration.host
|
||||
if six.PY3:
|
||||
default_cache_id = default_cache_id.encode('utf-8')
|
||||
try:
|
||||
default_cachefile_name = 'osrcp-{0}.json'.format(hashlib.md5(default_cache_id, usedforsecurity=False).hexdigest())
|
||||
except TypeError:
|
||||
# usedforsecurity is only supported in 3.9+
|
||||
default_cachefile_name = 'osrcp-{0}.json'.format(hashlib.md5(default_cache_id).hexdigest())
|
||||
self.__cache_file = cache_file or os.path.join(tempfile.gettempdir(), default_cachefile_name)
|
||||
self.__init_cache()
|
||||
|
||||
def __init_cache(self, refresh=False):
|
||||
if refresh or not os.path.exists(self.__cache_file):
|
||||
self._cache = {'library_version': __version__}
|
||||
refresh = True
|
||||
else:
|
||||
try:
|
||||
with open(self.__cache_file, 'r') as f:
|
||||
self._cache = json.load(f, cls=partial(CacheDecoder, self.client))
|
||||
if self._cache.get('library_version') != __version__:
|
||||
# Version mismatch, need to refresh cache
|
||||
self.invalidate_cache()
|
||||
except Exception as e:
|
||||
logging.error("load cache error: %s", e)
|
||||
self.invalidate_cache()
|
||||
self._load_server_info()
|
||||
self.discover()
|
||||
if refresh:
|
||||
self._write_cache()
|
||||
|
||||
def _write_cache(self):
|
||||
try:
|
||||
with open(self.__cache_file, 'w') as f:
|
||||
json.dump(self._cache, f, cls=CacheEncoder)
|
||||
except Exception:
|
||||
# Failing to write the cache isn't a big enough error to crash on
|
||||
pass
|
||||
|
||||
def invalidate_cache(self):
|
||||
self.__init_cache(refresh=True)
|
||||
|
||||
@abstractproperty
|
||||
def api_groups(self):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def search(self, prefix=None, group=None, api_version=None, kind=None, **kwargs):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def discover(self):
|
||||
pass
|
||||
|
||||
@property
|
||||
def version(self):
|
||||
return self.__version
|
||||
|
||||
def default_groups(self, request_resources=False):
|
||||
groups = {}
|
||||
groups['api'] = { '': {
|
||||
'v1': (ResourceGroup( True, resources=self.get_resources_for_api_version('api', '', 'v1', True) )
|
||||
if request_resources else ResourceGroup(True))
|
||||
}}
|
||||
|
||||
groups[DISCOVERY_PREFIX] = {'': {
|
||||
'v1': ResourceGroup(True, resources = {"List": [ResourceList(self.client)]})
|
||||
}}
|
||||
return groups
|
||||
|
||||
def parse_api_groups(self, request_resources=False, update=False):
|
||||
""" Discovers all API groups present in the cluster """
|
||||
if not self._cache.get('resources') or update:
|
||||
self._cache['resources'] = self._cache.get('resources', {})
|
||||
groups_response = self.client.request('GET', '/{}'.format(DISCOVERY_PREFIX)).groups
|
||||
|
||||
groups = self.default_groups(request_resources=request_resources)
|
||||
|
||||
for group in groups_response:
|
||||
new_group = {}
|
||||
for version_raw in group['versions']:
|
||||
version = version_raw['version']
|
||||
resource_group = self._cache.get('resources', {}).get(DISCOVERY_PREFIX, {}).get(group['name'], {}).get(version)
|
||||
preferred = version_raw == group['preferredVersion']
|
||||
resources = resource_group.resources if resource_group else {}
|
||||
if request_resources:
|
||||
resources = self.get_resources_for_api_version(DISCOVERY_PREFIX, group['name'], version, preferred)
|
||||
new_group[version] = ResourceGroup(preferred, resources=resources)
|
||||
groups[DISCOVERY_PREFIX][group['name']] = new_group
|
||||
self._cache['resources'].update(groups)
|
||||
self._write_cache()
|
||||
|
||||
return self._cache['resources']
|
||||
|
||||
def _load_server_info(self):
|
||||
def just_json(_, serialized):
|
||||
return serialized
|
||||
|
||||
if not self._cache.get('version'):
|
||||
try:
|
||||
self._cache['version'] = {
|
||||
'kubernetes': self.client.request('get', '/version', serializer=just_json)
|
||||
}
|
||||
except (ValueError, MaxRetryError) as e:
|
||||
if isinstance(e, MaxRetryError) and not isinstance(e.reason, ProtocolError):
|
||||
raise
|
||||
if not self.client.configuration.host.startswith("https://"):
|
||||
raise ValueError("Host value %s should start with https:// when talking to HTTPS endpoint" %
|
||||
self.client.configuration.host)
|
||||
else:
|
||||
raise
|
||||
|
||||
self.__version = self._cache['version']
|
||||
|
||||
def get_resources_for_api_version(self, prefix, group, version, preferred):
|
||||
""" returns a dictionary of resources associated with provided (prefix, group, version)"""
|
||||
|
||||
resources = defaultdict(list)
|
||||
subresources = {}
|
||||
|
||||
path = '/'.join(filter(None, [prefix, group, version]))
|
||||
try:
|
||||
resources_response = self.client.request('GET', path).resources or []
|
||||
except ServiceUnavailableError:
|
||||
resources_response = []
|
||||
|
||||
resources_raw = list(filter(lambda resource: '/' not in resource['name'], resources_response))
|
||||
subresources_raw = list(filter(lambda resource: '/' in resource['name'], resources_response))
|
||||
for subresource in subresources_raw:
|
||||
resource, name = subresource['name'].split('/', 1)
|
||||
if not subresources.get(resource):
|
||||
subresources[resource] = {}
|
||||
subresources[resource][name] = subresource
|
||||
|
||||
for resource in resources_raw:
|
||||
# Prevent duplicate keys
|
||||
for key in ('prefix', 'group', 'api_version', 'client', 'preferred'):
|
||||
resource.pop(key, None)
|
||||
|
||||
resourceobj = Resource(
|
||||
prefix=prefix,
|
||||
group=group,
|
||||
api_version=version,
|
||||
client=self.client,
|
||||
preferred=preferred,
|
||||
subresources=subresources.get(resource['name']),
|
||||
**resource
|
||||
)
|
||||
resources[resource['kind']].append(resourceobj)
|
||||
|
||||
resource_list = ResourceList(self.client, group=group, api_version=version, base_kind=resource['kind'])
|
||||
resources[resource_list.kind].append(resource_list)
|
||||
return resources
|
||||
|
||||
def get(self, **kwargs):
|
||||
""" Same as search, but will throw an error if there are multiple or no
|
||||
results. If there are multiple results and only one is an exact match
|
||||
on api_version, that resource will be returned.
|
||||
"""
|
||||
results = self.search(**kwargs)
|
||||
# If there are multiple matches, prefer exact matches on api_version
|
||||
if len(results) > 1 and kwargs.get('api_version'):
|
||||
results = [
|
||||
result for result in results if result.group_version == kwargs['api_version']
|
||||
]
|
||||
# If there are multiple matches, prefer non-List kinds
|
||||
if len(results) > 1 and not all([isinstance(x, ResourceList) for x in results]):
|
||||
results = [result for result in results if not isinstance(result, ResourceList)]
|
||||
if len(results) == 1:
|
||||
return results[0]
|
||||
elif not results:
|
||||
raise ResourceNotFoundError('No matches found for {}'.format(kwargs))
|
||||
else:
|
||||
raise ResourceNotUniqueError('Multiple matches found for {}: {}'.format(kwargs, results))
|
||||
|
||||
|
||||
class LazyDiscoverer(Discoverer):
|
||||
""" A convenient container for storing discovered API resources. Allows
|
||||
easy searching and retrieval of specific resources.
|
||||
|
||||
Resources for the cluster are loaded lazily.
|
||||
"""
|
||||
|
||||
def __init__(self, client, cache_file):
|
||||
Discoverer.__init__(self, client, cache_file)
|
||||
self.__update_cache = False
|
||||
|
||||
def discover(self):
|
||||
self.__resources = self.parse_api_groups(request_resources=False)
|
||||
|
||||
def __maybe_write_cache(self):
|
||||
if self.__update_cache:
|
||||
self._write_cache()
|
||||
self.__update_cache = False
|
||||
|
||||
@property
|
||||
def api_groups(self):
|
||||
return self.parse_api_groups(request_resources=False, update=True)['apis'].keys()
|
||||
|
||||
def search(self, **kwargs):
|
||||
# In first call, ignore ResourceNotFoundError and set default value for results
|
||||
try:
|
||||
results = self.__search(self.__build_search(**kwargs), self.__resources, [])
|
||||
except ResourceNotFoundError:
|
||||
results = []
|
||||
if not results:
|
||||
self.invalidate_cache()
|
||||
results = self.__search(self.__build_search(**kwargs), self.__resources, [])
|
||||
self.__maybe_write_cache()
|
||||
return results
|
||||
|
||||
def __search(self, parts, resources, reqParams):
|
||||
part = parts[0]
|
||||
if part != '*':
|
||||
|
||||
resourcePart = resources.get(part)
|
||||
if not resourcePart:
|
||||
return []
|
||||
elif isinstance(resourcePart, ResourceGroup):
|
||||
if len(reqParams) != 2:
|
||||
raise ValueError("prefix and group params should be present, have %s" % reqParams)
|
||||
# Check if we've requested resources for this group
|
||||
if not resourcePart.resources:
|
||||
prefix, group, version = reqParams[0], reqParams[1], part
|
||||
try:
|
||||
resourcePart.resources = self.get_resources_for_api_version(
|
||||
prefix, group, part, resourcePart.preferred)
|
||||
except NotFoundError:
|
||||
raise ResourceNotFoundError
|
||||
|
||||
self._cache['resources'][prefix][group][version] = resourcePart
|
||||
self.__update_cache = True
|
||||
return self.__search(parts[1:], resourcePart.resources, reqParams)
|
||||
elif isinstance(resourcePart, dict):
|
||||
# In this case parts [0] will be a specified prefix, group, version
|
||||
# as we recurse
|
||||
return self.__search(parts[1:], resourcePart, reqParams + [part] )
|
||||
else:
|
||||
if parts[1] != '*' and isinstance(parts[1], dict):
|
||||
for _resource in resourcePart:
|
||||
for term, value in parts[1].items():
|
||||
if getattr(_resource, term) == value:
|
||||
return [_resource]
|
||||
|
||||
return []
|
||||
else:
|
||||
return resourcePart
|
||||
else:
|
||||
matches = []
|
||||
for key in resources.keys():
|
||||
matches.extend(self.__search([key] + parts[1:], resources, reqParams))
|
||||
return matches
|
||||
|
||||
def __build_search(self, prefix=None, group=None, api_version=None, kind=None, **kwargs):
|
||||
if not group and api_version and '/' in api_version:
|
||||
group, api_version = api_version.split('/')
|
||||
|
||||
items = [prefix, group, api_version, kind, kwargs]
|
||||
return list(map(lambda x: x or '*', items))
|
||||
|
||||
def __iter__(self):
|
||||
for prefix, groups in self.__resources.items():
|
||||
for group, versions in groups.items():
|
||||
for version, rg in versions.items():
|
||||
# Request resources for this groupVersion if we haven't yet
|
||||
if not rg.resources:
|
||||
rg.resources = self.get_resources_for_api_version(
|
||||
prefix, group, version, rg.preferred)
|
||||
self._cache['resources'][prefix][group][version] = rg
|
||||
self.__update_cache = True
|
||||
for _, resource in six.iteritems(rg.resources):
|
||||
yield resource
|
||||
self.__maybe_write_cache()
|
||||
|
||||
|
||||
class EagerDiscoverer(Discoverer):
|
||||
""" A convenient container for storing discovered API resources. Allows
|
||||
easy searching and retrieval of specific resources.
|
||||
|
||||
All resources are discovered for the cluster upon object instantiation.
|
||||
"""
|
||||
|
||||
def update(self, resources):
|
||||
self.__resources = resources
|
||||
|
||||
def __init__(self, client, cache_file):
|
||||
Discoverer.__init__(self, client, cache_file)
|
||||
|
||||
def discover(self):
|
||||
self.__resources = self.parse_api_groups(request_resources=True)
|
||||
|
||||
@property
|
||||
def api_groups(self):
|
||||
""" list available api groups """
|
||||
return self.parse_api_groups(request_resources=True, update=True)['apis'].keys()
|
||||
|
||||
|
||||
def search(self, **kwargs):
|
||||
""" Takes keyword arguments and returns matching resources. The search
|
||||
will happen in the following order:
|
||||
prefix: The api prefix for a resource, ie, /api, /oapi, /apis. Can usually be ignored
|
||||
group: The api group of a resource. Will also be extracted from api_version if it is present there
|
||||
api_version: The api version of a resource
|
||||
kind: The kind of the resource
|
||||
arbitrary arguments (see below), in random order
|
||||
|
||||
The arbitrary arguments can be any valid attribute for an Resource object
|
||||
"""
|
||||
results = self.__search(self.__build_search(**kwargs), self.__resources)
|
||||
if not results:
|
||||
self.invalidate_cache()
|
||||
results = self.__search(self.__build_search(**kwargs), self.__resources)
|
||||
return results
|
||||
|
||||
def __build_search(self, prefix=None, group=None, api_version=None, kind=None, **kwargs):
|
||||
if not group and api_version and '/' in api_version:
|
||||
group, api_version = api_version.split('/')
|
||||
|
||||
items = [prefix, group, api_version, kind, kwargs]
|
||||
return list(map(lambda x: x or '*', items))
|
||||
|
||||
def __search(self, parts, resources):
|
||||
part = parts[0]
|
||||
resourcePart = resources.get(part)
|
||||
|
||||
if part != '*' and resourcePart:
|
||||
if isinstance(resourcePart, ResourceGroup):
|
||||
return self.__search(parts[1:], resourcePart.resources)
|
||||
elif isinstance(resourcePart, dict):
|
||||
return self.__search(parts[1:], resourcePart)
|
||||
else:
|
||||
if parts[1] != '*' and isinstance(parts[1], dict):
|
||||
for _resource in resourcePart:
|
||||
for term, value in parts[1].items():
|
||||
if getattr(_resource, term) == value:
|
||||
return [_resource]
|
||||
return []
|
||||
else:
|
||||
return resourcePart
|
||||
elif part == '*':
|
||||
matches = []
|
||||
for key in resources.keys():
|
||||
matches.extend(self.__search([key] + parts[1:], resources))
|
||||
return matches
|
||||
return []
|
||||
|
||||
def __iter__(self):
|
||||
for _, groups in self.__resources.items():
|
||||
for _, versions in groups.items():
|
||||
for _, resources in versions.items():
|
||||
for _, resource in resources.items():
|
||||
yield resource
|
||||
|
||||
|
||||
class ResourceGroup(object):
|
||||
"""Helper class for Discoverer container"""
|
||||
def __init__(self, preferred, resources=None):
|
||||
self.preferred = preferred
|
||||
self.resources = resources or {}
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
'_type': 'ResourceGroup',
|
||||
'preferred': self.preferred,
|
||||
'resources': self.resources,
|
||||
}
|
||||
|
||||
|
||||
class CacheEncoder(json.JSONEncoder):
|
||||
|
||||
def default(self, o):
|
||||
return o.to_dict()
|
||||
|
||||
|
||||
class CacheDecoder(json.JSONDecoder):
|
||||
def __init__(self, client, *args, **kwargs):
|
||||
self.client = client
|
||||
json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs)
|
||||
|
||||
def object_hook(self, obj):
|
||||
if '_type' not in obj:
|
||||
return obj
|
||||
_type = obj.pop('_type')
|
||||
if _type == 'Resource':
|
||||
return Resource(client=self.client, **obj)
|
||||
elif _type == 'ResourceList':
|
||||
return ResourceList(self.client, **obj)
|
||||
elif _type == 'ResourceGroup':
|
||||
return ResourceGroup(obj['preferred'], resources=self.object_hook(obj['resources']))
|
||||
return obj
|
110
kubernetes/base/dynamic/exceptions.py
Normal file
110
kubernetes/base/dynamic/exceptions.py
Normal file
|
@ -0,0 +1,110 @@
|
|||
# Copyright 2019 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from kubernetes.client.rest import ApiException
|
||||
|
||||
|
||||
def api_exception(e):
|
||||
"""
|
||||
Returns the proper Exception class for the given kubernetes.client.rest.ApiException object
|
||||
https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#success-codes
|
||||
"""
|
||||
_, _, exc_traceback = sys.exc_info()
|
||||
tb = '\n'.join(traceback.format_tb(exc_traceback))
|
||||
return {
|
||||
400: BadRequestError,
|
||||
401: UnauthorizedError,
|
||||
403: ForbiddenError,
|
||||
404: NotFoundError,
|
||||
405: MethodNotAllowedError,
|
||||
409: ConflictError,
|
||||
410: GoneError,
|
||||
422: UnprocessibleEntityError,
|
||||
429: TooManyRequestsError,
|
||||
500: InternalServerError,
|
||||
503: ServiceUnavailableError,
|
||||
504: ServerTimeoutError,
|
||||
}.get(e.status, DynamicApiError)(e, tb)
|
||||
|
||||
|
||||
class DynamicApiError(ApiException):
|
||||
""" Generic API Error for the dynamic client """
|
||||
def __init__(self, e, tb=None):
|
||||
self.status = e.status
|
||||
self.reason = e.reason
|
||||
self.body = e.body
|
||||
self.headers = e.headers
|
||||
self.original_traceback = tb
|
||||
|
||||
def __str__(self):
|
||||
error_message = [str(self.status), "Reason: {}".format(self.reason)]
|
||||
if self.headers:
|
||||
error_message.append("HTTP response headers: {}".format(self.headers))
|
||||
|
||||
if self.body:
|
||||
error_message.append("HTTP response body: {}".format(self.body))
|
||||
|
||||
if self.original_traceback:
|
||||
error_message.append("Original traceback: \n{}".format(self.original_traceback))
|
||||
|
||||
return '\n'.join(error_message)
|
||||
|
||||
def summary(self):
|
||||
if self.body:
|
||||
if self.headers and self.headers.get('Content-Type') == 'application/json':
|
||||
message = json.loads(self.body).get('message')
|
||||
if message:
|
||||
return message
|
||||
|
||||
return self.body
|
||||
else:
|
||||
return "{} Reason: {}".format(self.status, self.reason)
|
||||
|
||||
class ResourceNotFoundError(Exception):
|
||||
""" Resource was not found in available APIs """
|
||||
class ResourceNotUniqueError(Exception):
|
||||
""" Parameters given matched multiple API resources """
|
||||
|
||||
class KubernetesValidateMissing(Exception):
|
||||
""" kubernetes-validate is not installed """
|
||||
|
||||
# HTTP Errors
|
||||
class BadRequestError(DynamicApiError):
|
||||
""" 400: StatusBadRequest """
|
||||
class UnauthorizedError(DynamicApiError):
|
||||
""" 401: StatusUnauthorized """
|
||||
class ForbiddenError(DynamicApiError):
|
||||
""" 403: StatusForbidden """
|
||||
class NotFoundError(DynamicApiError):
|
||||
""" 404: StatusNotFound """
|
||||
class MethodNotAllowedError(DynamicApiError):
|
||||
""" 405: StatusMethodNotAllowed """
|
||||
class ConflictError(DynamicApiError):
|
||||
""" 409: StatusConflict """
|
||||
class GoneError(DynamicApiError):
|
||||
""" 410: StatusGone """
|
||||
class UnprocessibleEntityError(DynamicApiError):
|
||||
""" 422: StatusUnprocessibleEntity """
|
||||
class TooManyRequestsError(DynamicApiError):
|
||||
""" 429: StatusTooManyRequests """
|
||||
class InternalServerError(DynamicApiError):
|
||||
""" 500: StatusInternalServer """
|
||||
class ServiceUnavailableError(DynamicApiError):
|
||||
""" 503: StatusServiceUnavailable """
|
||||
class ServerTimeoutError(DynamicApiError):
|
||||
""" 504: StatusServerTimeout """
|
405
kubernetes/base/dynamic/resource.py
Normal file
405
kubernetes/base/dynamic/resource.py
Normal file
|
@ -0,0 +1,405 @@
|
|||
# Copyright 2019 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import copy
|
||||
import yaml
|
||||
from functools import partial
|
||||
|
||||
from pprint import pformat
|
||||
|
||||
|
||||
class Resource(object):
|
||||
""" Represents an API resource type, containing the information required to build urls for requests """
|
||||
|
||||
def __init__(self, prefix=None, group=None, api_version=None, kind=None,
|
||||
namespaced=False, verbs=None, name=None, preferred=False, client=None,
|
||||
singularName=None, shortNames=None, categories=None, subresources=None, **kwargs):
|
||||
|
||||
if None in (api_version, kind, prefix):
|
||||
raise ValueError("At least prefix, kind, and api_version must be provided")
|
||||
|
||||
self.prefix = prefix
|
||||
self.group = group
|
||||
self.api_version = api_version
|
||||
self.kind = kind
|
||||
self.namespaced = namespaced
|
||||
self.verbs = verbs
|
||||
self.name = name
|
||||
self.preferred = preferred
|
||||
self.client = client
|
||||
self.singular_name = singularName or (name[:-1] if name else "")
|
||||
self.short_names = shortNames
|
||||
self.categories = categories
|
||||
self.subresources = {
|
||||
k: Subresource(self, **v) for k, v in (subresources or {}).items()
|
||||
}
|
||||
|
||||
self.extra_args = kwargs
|
||||
|
||||
def to_dict(self):
|
||||
d = {
|
||||
'_type': 'Resource',
|
||||
'prefix': self.prefix,
|
||||
'group': self.group,
|
||||
'api_version': self.api_version,
|
||||
'kind': self.kind,
|
||||
'namespaced': self.namespaced,
|
||||
'verbs': self.verbs,
|
||||
'name': self.name,
|
||||
'preferred': self.preferred,
|
||||
'singularName': self.singular_name,
|
||||
'shortNames': self.short_names,
|
||||
'categories': self.categories,
|
||||
'subresources': {k: sr.to_dict() for k, sr in self.subresources.items()},
|
||||
}
|
||||
d.update(self.extra_args)
|
||||
return d
|
||||
|
||||
@property
|
||||
def group_version(self):
|
||||
if self.group:
|
||||
return '{}/{}'.format(self.group, self.api_version)
|
||||
return self.api_version
|
||||
|
||||
def __repr__(self):
|
||||
return '<{}({}/{})>'.format(self.__class__.__name__, self.group_version, self.name)
|
||||
|
||||
@property
|
||||
def urls(self):
|
||||
full_prefix = '{}/{}'.format(self.prefix, self.group_version)
|
||||
resource_name = self.name.lower()
|
||||
return {
|
||||
'base': '/{}/{}'.format(full_prefix, resource_name),
|
||||
'namespaced_base': '/{}/namespaces/{{namespace}}/{}'.format(full_prefix, resource_name),
|
||||
'full': '/{}/{}/{{name}}'.format(full_prefix, resource_name),
|
||||
'namespaced_full': '/{}/namespaces/{{namespace}}/{}/{{name}}'.format(full_prefix, resource_name)
|
||||
}
|
||||
|
||||
def path(self, name=None, namespace=None):
|
||||
url_type = []
|
||||
path_params = {}
|
||||
if self.namespaced and namespace:
|
||||
url_type.append('namespaced')
|
||||
path_params['namespace'] = namespace
|
||||
if name:
|
||||
url_type.append('full')
|
||||
path_params['name'] = name
|
||||
else:
|
||||
url_type.append('base')
|
||||
return self.urls['_'.join(url_type)].format(**path_params)
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name in self.subresources:
|
||||
return self.subresources[name]
|
||||
return partial(getattr(self.client, name), self)
|
||||
|
||||
|
||||
class ResourceList(Resource):
|
||||
""" Represents a list of API objects """
|
||||
|
||||
def __init__(self, client, group='', api_version='v1', base_kind='', kind=None, base_resource_lookup=None):
|
||||
self.client = client
|
||||
self.group = group
|
||||
self.api_version = api_version
|
||||
self.kind = kind or '{}List'.format(base_kind)
|
||||
self.base_kind = base_kind
|
||||
self.base_resource_lookup = base_resource_lookup
|
||||
self.__base_resource = None
|
||||
|
||||
def base_resource(self):
|
||||
if self.__base_resource:
|
||||
return self.__base_resource
|
||||
elif self.base_resource_lookup:
|
||||
self.__base_resource = self.client.resources.get(**self.base_resource_lookup)
|
||||
return self.__base_resource
|
||||
elif self.base_kind:
|
||||
self.__base_resource = self.client.resources.get(group=self.group, api_version=self.api_version, kind=self.base_kind)
|
||||
return self.__base_resource
|
||||
return None
|
||||
|
||||
def _items_to_resources(self, body):
|
||||
""" Takes a List body and return a dictionary with the following structure:
|
||||
{
|
||||
'api_version': str,
|
||||
'kind': str,
|
||||
'items': [{
|
||||
'resource': Resource,
|
||||
'name': str,
|
||||
'namespace': str,
|
||||
}]
|
||||
}
|
||||
"""
|
||||
if body is None:
|
||||
raise ValueError("You must provide a body when calling methods on a ResourceList")
|
||||
|
||||
api_version = body['apiVersion']
|
||||
kind = body['kind']
|
||||
items = body.get('items')
|
||||
if not items:
|
||||
raise ValueError('The `items` field in the body must be populated when calling methods on a ResourceList')
|
||||
|
||||
if self.kind != kind:
|
||||
raise ValueError('Methods on a {} must be called with a body containing the same kind. Received {} instead'.format(self.kind, kind))
|
||||
|
||||
return {
|
||||
'api_version': api_version,
|
||||
'kind': kind,
|
||||
'items': [self._item_to_resource(item) for item in items]
|
||||
}
|
||||
|
||||
def _item_to_resource(self, item):
|
||||
metadata = item.get('metadata', {})
|
||||
resource = self.base_resource()
|
||||
if not resource:
|
||||
api_version = item.get('apiVersion', self.api_version)
|
||||
kind = item.get('kind', self.base_kind)
|
||||
resource = self.client.resources.get(api_version=api_version, kind=kind)
|
||||
return {
|
||||
'resource': resource,
|
||||
'definition': item,
|
||||
'name': metadata.get('name'),
|
||||
'namespace': metadata.get('namespace')
|
||||
}
|
||||
|
||||
def get(self, body, name=None, namespace=None, **kwargs):
|
||||
if name:
|
||||
raise ValueError('Operations on ResourceList objects do not support the `name` argument')
|
||||
resource_list = self._items_to_resources(body)
|
||||
response = copy.deepcopy(body)
|
||||
|
||||
response['items'] = [
|
||||
item['resource'].get(name=item['name'], namespace=item['namespace'] or namespace, **kwargs).to_dict()
|
||||
for item in resource_list['items']
|
||||
]
|
||||
return ResourceInstance(self, response)
|
||||
|
||||
def delete(self, body, name=None, namespace=None, **kwargs):
|
||||
if name:
|
||||
raise ValueError('Operations on ResourceList objects do not support the `name` argument')
|
||||
resource_list = self._items_to_resources(body)
|
||||
response = copy.deepcopy(body)
|
||||
|
||||
response['items'] = [
|
||||
item['resource'].delete(name=item['name'], namespace=item['namespace'] or namespace, **kwargs).to_dict()
|
||||
for item in resource_list['items']
|
||||
]
|
||||
return ResourceInstance(self, response)
|
||||
|
||||
def verb_mapper(self, verb, body, **kwargs):
|
||||
resource_list = self._items_to_resources(body)
|
||||
response = copy.deepcopy(body)
|
||||
response['items'] = [
|
||||
getattr(item['resource'], verb)(body=item['definition'], **kwargs).to_dict()
|
||||
for item in resource_list['items']
|
||||
]
|
||||
return ResourceInstance(self, response)
|
||||
|
||||
def create(self, *args, **kwargs):
|
||||
return self.verb_mapper('create', *args, **kwargs)
|
||||
|
||||
def replace(self, *args, **kwargs):
|
||||
return self.verb_mapper('replace', *args, **kwargs)
|
||||
|
||||
def patch(self, *args, **kwargs):
|
||||
return self.verb_mapper('patch', *args, **kwargs)
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
'_type': 'ResourceList',
|
||||
'group': self.group,
|
||||
'api_version': self.api_version,
|
||||
'kind': self.kind,
|
||||
'base_kind': self.base_kind
|
||||
}
|
||||
|
||||
def __getattr__(self, name):
|
||||
if self.base_resource():
|
||||
return getattr(self.base_resource(), name)
|
||||
return None
|
||||
|
||||
|
||||
class Subresource(Resource):
|
||||
""" Represents a subresource of an API resource. This generally includes operations
|
||||
like scale, as well as status objects for an instantiated resource
|
||||
"""
|
||||
|
||||
def __init__(self, parent, **kwargs):
|
||||
self.parent = parent
|
||||
self.prefix = parent.prefix
|
||||
self.group = parent.group
|
||||
self.api_version = parent.api_version
|
||||
self.kind = kwargs.pop('kind')
|
||||
self.name = kwargs.pop('name')
|
||||
self.subresource = kwargs.pop('subresource', None) or self.name.split('/')[1]
|
||||
self.namespaced = kwargs.pop('namespaced', False)
|
||||
self.verbs = kwargs.pop('verbs', None)
|
||||
self.extra_args = kwargs
|
||||
|
||||
#TODO(fabianvf): Determine proper way to handle differences between resources + subresources
|
||||
def create(self, body=None, name=None, namespace=None, **kwargs):
|
||||
name = name or body.get('metadata', {}).get('name')
|
||||
body = self.parent.client.serialize_body(body)
|
||||
if self.parent.namespaced:
|
||||
namespace = self.parent.client.ensure_namespace(self.parent, namespace, body)
|
||||
path = self.path(name=name, namespace=namespace)
|
||||
return self.parent.client.request('post', path, body=body, **kwargs)
|
||||
|
||||
@property
|
||||
def urls(self):
|
||||
full_prefix = '{}/{}'.format(self.prefix, self.group_version)
|
||||
return {
|
||||
'full': '/{}/{}/{{name}}/{}'.format(full_prefix, self.parent.name, self.subresource),
|
||||
'namespaced_full': '/{}/namespaces/{{namespace}}/{}/{{name}}/{}'.format(full_prefix, self.parent.name, self.subresource)
|
||||
}
|
||||
|
||||
def __getattr__(self, name):
|
||||
return partial(getattr(self.parent.client, name), self)
|
||||
|
||||
def to_dict(self):
|
||||
d = {
|
||||
'kind': self.kind,
|
||||
'name': self.name,
|
||||
'subresource': self.subresource,
|
||||
'namespaced': self.namespaced,
|
||||
'verbs': self.verbs
|
||||
}
|
||||
d.update(self.extra_args)
|
||||
return d
|
||||
|
||||
|
||||
class ResourceInstance(object):
|
||||
""" A parsed instance of an API resource. It exists solely to
|
||||
ease interaction with API objects by allowing attributes to
|
||||
be accessed with '.' notation.
|
||||
"""
|
||||
|
||||
def __init__(self, client, instance):
|
||||
self.client = client
|
||||
# If we have a list of resources, then set the apiVersion and kind of
|
||||
# each resource in 'items'
|
||||
kind = instance['kind']
|
||||
if kind.endswith('List') and 'items' in instance:
|
||||
kind = instance['kind'][:-4]
|
||||
if not instance['items']:
|
||||
instance['items'] = []
|
||||
for item in instance['items']:
|
||||
if 'apiVersion' not in item:
|
||||
item['apiVersion'] = instance['apiVersion']
|
||||
if 'kind' not in item:
|
||||
item['kind'] = kind
|
||||
|
||||
self.attributes = self.__deserialize(instance)
|
||||
self.__initialised = True
|
||||
|
||||
def __deserialize(self, field):
|
||||
if isinstance(field, dict):
|
||||
return ResourceField(params={
|
||||
k: self.__deserialize(v) for k, v in field.items()
|
||||
})
|
||||
elif isinstance(field, (list, tuple)):
|
||||
return [self.__deserialize(item) for item in field]
|
||||
else:
|
||||
return field
|
||||
|
||||
def __serialize(self, field):
|
||||
if isinstance(field, ResourceField):
|
||||
return {
|
||||
k: self.__serialize(v) for k, v in field.__dict__.items()
|
||||
}
|
||||
elif isinstance(field, (list, tuple)):
|
||||
return [self.__serialize(item) for item in field]
|
||||
elif isinstance(field, ResourceInstance):
|
||||
return field.to_dict()
|
||||
else:
|
||||
return field
|
||||
|
||||
def to_dict(self):
|
||||
return self.__serialize(self.attributes)
|
||||
|
||||
def to_str(self):
|
||||
return repr(self)
|
||||
|
||||
def __repr__(self):
|
||||
return "ResourceInstance[{}]:\n {}".format(
|
||||
self.attributes.kind,
|
||||
' '.join(yaml.safe_dump(self.to_dict()).splitlines(True))
|
||||
)
|
||||
|
||||
def __getattr__(self, name):
|
||||
if not '_ResourceInstance__initialised' in self.__dict__:
|
||||
return super(ResourceInstance, self).__getattr__(name)
|
||||
return getattr(self.attributes, name)
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
if not '_ResourceInstance__initialised' in self.__dict__:
|
||||
return super(ResourceInstance, self).__setattr__(name, value)
|
||||
elif name in self.__dict__:
|
||||
return super(ResourceInstance, self).__setattr__(name, value)
|
||||
else:
|
||||
self.attributes[name] = value
|
||||
|
||||
def __getitem__(self, name):
|
||||
return self.attributes[name]
|
||||
|
||||
def __setitem__(self, name, value):
|
||||
self.attributes[name] = value
|
||||
|
||||
def __dir__(self):
|
||||
return dir(type(self)) + list(self.attributes.__dict__.keys())
|
||||
|
||||
|
||||
class ResourceField(object):
|
||||
""" A parsed instance of an API resource attribute. It exists
|
||||
solely to ease interaction with API objects by allowing
|
||||
attributes to be accessed with '.' notation
|
||||
"""
|
||||
|
||||
def __init__(self, params):
|
||||
self.__dict__.update(**params)
|
||||
|
||||
def __repr__(self):
|
||||
return pformat(self.__dict__)
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.__dict__ == other.__dict__
|
||||
|
||||
def __getitem__(self, name):
|
||||
return self.__dict__.get(name)
|
||||
|
||||
# Here resource.items will return items if available or resource.__dict__.items function if not
|
||||
# resource.get will call resource.__dict__.get after attempting resource.__dict__.get('get')
|
||||
def __getattr__(self, name):
|
||||
return self.__dict__.get(name, getattr(self.__dict__, name, None))
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
self.__dict__[name] = value
|
||||
|
||||
def __dir__(self):
|
||||
return dir(type(self)) + list(self.__dict__.keys())
|
||||
|
||||
def __iter__(self):
|
||||
for k, v in self.__dict__.items():
|
||||
yield (k, v)
|
||||
|
||||
def to_dict(self):
|
||||
return self.__serialize(self)
|
||||
|
||||
def __serialize(self, field):
|
||||
if isinstance(field, ResourceField):
|
||||
return {
|
||||
k: self.__serialize(v) for k, v in field.__dict__.items()
|
||||
}
|
||||
if isinstance(field, (list, tuple)):
|
||||
return [self.__serialize(item) for item in field]
|
||||
return field
|
571
kubernetes/base/dynamic/test_client.py
Normal file
571
kubernetes/base/dynamic/test_client.py
Normal file
|
@ -0,0 +1,571 @@
|
|||
# Copyright 2019 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import time
|
||||
import unittest
|
||||
import uuid
|
||||
|
||||
from kubernetes.e2e_test import base
|
||||
from kubernetes.client import api_client
|
||||
|
||||
from . import DynamicClient
|
||||
from .resource import ResourceInstance, ResourceField
|
||||
from .exceptions import ResourceNotFoundError
|
||||
|
||||
|
||||
def short_uuid():
|
||||
id = str(uuid.uuid4())
|
||||
return id[-12:]
|
||||
|
||||
|
||||
class TestDynamicClient(unittest.TestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.config = base.get_e2e_configuration()
|
||||
|
||||
def test_cluster_custom_resources(self):
|
||||
client = DynamicClient(api_client.ApiClient(configuration=self.config))
|
||||
|
||||
with self.assertRaises(ResourceNotFoundError):
|
||||
changeme_api = client.resources.get(
|
||||
api_version='apps.example.com/v1', kind='ClusterChangeMe')
|
||||
|
||||
crd_api = client.resources.get(
|
||||
api_version='apiextensions.k8s.io/v1beta1',
|
||||
kind='CustomResourceDefinition')
|
||||
name = 'clusterchangemes.apps.example.com'
|
||||
crd_manifest = {
|
||||
'apiVersion': 'apiextensions.k8s.io/v1beta1',
|
||||
'kind': 'CustomResourceDefinition',
|
||||
'metadata': {
|
||||
'name': name,
|
||||
},
|
||||
'spec': {
|
||||
'group': 'apps.example.com',
|
||||
'names': {
|
||||
'kind': 'ClusterChangeMe',
|
||||
'listKind': 'ClusterChangeMeList',
|
||||
'plural': 'clusterchangemes',
|
||||
'singular': 'clusterchangeme',
|
||||
},
|
||||
'scope': 'Cluster',
|
||||
'version': 'v1',
|
||||
'subresources': {
|
||||
'status': {}
|
||||
}
|
||||
}
|
||||
}
|
||||
resp = crd_api.create(crd_manifest)
|
||||
|
||||
self.assertEqual(name, resp.metadata.name)
|
||||
self.assertTrue(resp.status)
|
||||
|
||||
resp = crd_api.get(
|
||||
name=name,
|
||||
)
|
||||
self.assertEqual(name, resp.metadata.name)
|
||||
self.assertTrue(resp.status)
|
||||
|
||||
try:
|
||||
changeme_api = client.resources.get(
|
||||
api_version='apps.example.com/v1', kind='ClusterChangeMe')
|
||||
except ResourceNotFoundError:
|
||||
# Need to wait a sec for the discovery layer to get updated
|
||||
time.sleep(2)
|
||||
changeme_api = client.resources.get(
|
||||
api_version='apps.example.com/v1', kind='ClusterChangeMe')
|
||||
resp = changeme_api.get()
|
||||
self.assertEqual(resp.items, [])
|
||||
changeme_name = 'custom-resource' + short_uuid()
|
||||
changeme_manifest = {
|
||||
'apiVersion': 'apps.example.com/v1',
|
||||
'kind': 'ClusterChangeMe',
|
||||
'metadata': {
|
||||
'name': changeme_name,
|
||||
},
|
||||
'spec': {}
|
||||
}
|
||||
|
||||
resp = changeme_api.create(body=changeme_manifest)
|
||||
self.assertEqual(resp.metadata.name, changeme_name)
|
||||
|
||||
resp = changeme_api.get(name=changeme_name)
|
||||
self.assertEqual(resp.metadata.name, changeme_name)
|
||||
|
||||
changeme_manifest['spec']['size'] = 3
|
||||
resp = changeme_api.patch(
|
||||
body=changeme_manifest,
|
||||
content_type='application/merge-patch+json'
|
||||
)
|
||||
self.assertEqual(resp.spec.size, 3)
|
||||
|
||||
resp = changeme_api.get(name=changeme_name)
|
||||
self.assertEqual(resp.spec.size, 3)
|
||||
|
||||
resp = changeme_api.get()
|
||||
self.assertEqual(len(resp.items), 1)
|
||||
|
||||
resp = changeme_api.delete(
|
||||
name=changeme_name,
|
||||
)
|
||||
|
||||
resp = changeme_api.get()
|
||||
self.assertEqual(len(resp.items), 0)
|
||||
|
||||
resp = crd_api.delete(
|
||||
name=name,
|
||||
)
|
||||
|
||||
time.sleep(2)
|
||||
client.resources.invalidate_cache()
|
||||
with self.assertRaises(ResourceNotFoundError):
|
||||
changeme_api = client.resources.get(
|
||||
api_version='apps.example.com/v1', kind='ClusterChangeMe')
|
||||
|
||||
def test_async_namespaced_custom_resources(self):
|
||||
client = DynamicClient(api_client.ApiClient(configuration=self.config))
|
||||
|
||||
with self.assertRaises(ResourceNotFoundError):
|
||||
changeme_api = client.resources.get(
|
||||
api_version='apps.example.com/v1', kind='ChangeMe')
|
||||
|
||||
crd_api = client.resources.get(
|
||||
api_version='apiextensions.k8s.io/v1beta1',
|
||||
kind='CustomResourceDefinition')
|
||||
|
||||
name = 'changemes.apps.example.com'
|
||||
|
||||
crd_manifest = {
|
||||
'apiVersion': 'apiextensions.k8s.io/v1beta1',
|
||||
'kind': 'CustomResourceDefinition',
|
||||
'metadata': {
|
||||
'name': name,
|
||||
},
|
||||
'spec': {
|
||||
'group': 'apps.example.com',
|
||||
'names': {
|
||||
'kind': 'ChangeMe',
|
||||
'listKind': 'ChangeMeList',
|
||||
'plural': 'changemes',
|
||||
'singular': 'changeme',
|
||||
},
|
||||
'scope': 'Namespaced',
|
||||
'version': 'v1',
|
||||
'subresources': {
|
||||
'status': {}
|
||||
}
|
||||
}
|
||||
}
|
||||
async_resp = crd_api.create(crd_manifest, async_req=True)
|
||||
|
||||
self.assertEqual(name, async_resp.metadata.name)
|
||||
self.assertTrue(async_resp.status)
|
||||
|
||||
async_resp = crd_api.get(
|
||||
name=name,
|
||||
async_req=True
|
||||
)
|
||||
self.assertEqual(name, async_resp.metadata.name)
|
||||
self.assertTrue(async_resp.status)
|
||||
|
||||
try:
|
||||
changeme_api = client.resources.get(
|
||||
api_version='apps.example.com/v1', kind='ChangeMe')
|
||||
except ResourceNotFoundError:
|
||||
# Need to wait a sec for the discovery layer to get updated
|
||||
time.sleep(2)
|
||||
changeme_api = client.resources.get(
|
||||
api_version='apps.example.com/v1', kind='ChangeMe')
|
||||
|
||||
async_resp = changeme_api.get(async_req=True)
|
||||
self.assertEqual(async_resp.items, [])
|
||||
|
||||
changeme_name = 'custom-resource' + short_uuid()
|
||||
changeme_manifest = {
|
||||
'apiVersion': 'apps.example.com/v1',
|
||||
'kind': 'ChangeMe',
|
||||
'metadata': {
|
||||
'name': changeme_name,
|
||||
},
|
||||
'spec': {}
|
||||
}
|
||||
|
||||
async_resp = changeme_api.create(body=changeme_manifest, namespace='default', async_req=True)
|
||||
self.assertEqual(async_resp.metadata.name, changeme_name)
|
||||
|
||||
async_resp = changeme_api.get(name=changeme_name, namespace='default', async_req=True)
|
||||
self.assertEqual(async_resp.metadata.name, changeme_name)
|
||||
|
||||
changeme_manifest['spec']['size'] = 3
|
||||
async_resp = changeme_api.patch(
|
||||
body=changeme_manifest,
|
||||
namespace='default',
|
||||
content_type='application/merge-patch+json',
|
||||
async_req=True
|
||||
)
|
||||
self.assertEqual(async_resp.spec.size, 3)
|
||||
|
||||
async_resp = changeme_api.get(name=changeme_name, namespace='default', async_req=True)
|
||||
self.assertEqual(async_resp.spec.size, 3)
|
||||
|
||||
async_resp = changeme_api.get(namespace='default', async_req=True)
|
||||
self.assertEqual(len(async_resp.items), 1)
|
||||
|
||||
async_resp = changeme_api.get(async_req=True)
|
||||
self.assertEqual(len(async_resp.items), 1)
|
||||
|
||||
async_resp = changeme_api.delete(
|
||||
name=changeme_name,
|
||||
namespace='default',
|
||||
async_req=True
|
||||
)
|
||||
|
||||
async_resp = changeme_api.get(namespace='default', async_req=True)
|
||||
self.assertEqual(len(async_resp.items), 0)
|
||||
|
||||
async_resp = changeme_api.get(async_req=True)
|
||||
self.assertEqual(len(async_resp.items), 0)
|
||||
|
||||
async_resp = crd_api.delete(
|
||||
name=name,
|
||||
async_req=True
|
||||
)
|
||||
|
||||
time.sleep(2)
|
||||
client.resources.invalidate_cache()
|
||||
with self.assertRaises(ResourceNotFoundError):
|
||||
changeme_api = client.resources.get(
|
||||
api_version='apps.example.com/v1', kind='ChangeMe')
|
||||
|
||||
def test_namespaced_custom_resources(self):
|
||||
client = DynamicClient(api_client.ApiClient(configuration=self.config))
|
||||
|
||||
with self.assertRaises(ResourceNotFoundError):
|
||||
changeme_api = client.resources.get(
|
||||
api_version='apps.example.com/v1', kind='ChangeMe')
|
||||
|
||||
crd_api = client.resources.get(
|
||||
api_version='apiextensions.k8s.io/v1beta1',
|
||||
kind='CustomResourceDefinition')
|
||||
name = 'changemes.apps.example.com'
|
||||
crd_manifest = {
|
||||
'apiVersion': 'apiextensions.k8s.io/v1beta1',
|
||||
'kind': 'CustomResourceDefinition',
|
||||
'metadata': {
|
||||
'name': name,
|
||||
},
|
||||
'spec': {
|
||||
'group': 'apps.example.com',
|
||||
'names': {
|
||||
'kind': 'ChangeMe',
|
||||
'listKind': 'ChangeMeList',
|
||||
'plural': 'changemes',
|
||||
'singular': 'changeme',
|
||||
},
|
||||
'scope': 'Namespaced',
|
||||
'version': 'v1',
|
||||
'subresources': {
|
||||
'status': {}
|
||||
}
|
||||
}
|
||||
}
|
||||
resp = crd_api.create(crd_manifest)
|
||||
|
||||
self.assertEqual(name, resp.metadata.name)
|
||||
self.assertTrue(resp.status)
|
||||
|
||||
resp = crd_api.get(
|
||||
name=name,
|
||||
)
|
||||
self.assertEqual(name, resp.metadata.name)
|
||||
self.assertTrue(resp.status)
|
||||
|
||||
try:
|
||||
changeme_api = client.resources.get(
|
||||
api_version='apps.example.com/v1', kind='ChangeMe')
|
||||
except ResourceNotFoundError:
|
||||
# Need to wait a sec for the discovery layer to get updated
|
||||
time.sleep(2)
|
||||
changeme_api = client.resources.get(
|
||||
api_version='apps.example.com/v1', kind='ChangeMe')
|
||||
resp = changeme_api.get()
|
||||
self.assertEqual(resp.items, [])
|
||||
changeme_name = 'custom-resource' + short_uuid()
|
||||
changeme_manifest = {
|
||||
'apiVersion': 'apps.example.com/v1',
|
||||
'kind': 'ChangeMe',
|
||||
'metadata': {
|
||||
'name': changeme_name,
|
||||
},
|
||||
'spec': {}
|
||||
}
|
||||
|
||||
resp = changeme_api.create(body=changeme_manifest, namespace='default')
|
||||
self.assertEqual(resp.metadata.name, changeme_name)
|
||||
|
||||
resp = changeme_api.get(name=changeme_name, namespace='default')
|
||||
self.assertEqual(resp.metadata.name, changeme_name)
|
||||
|
||||
changeme_manifest['spec']['size'] = 3
|
||||
resp = changeme_api.patch(
|
||||
body=changeme_manifest,
|
||||
namespace='default',
|
||||
content_type='application/merge-patch+json'
|
||||
)
|
||||
self.assertEqual(resp.spec.size, 3)
|
||||
|
||||
resp = changeme_api.get(name=changeme_name, namespace='default')
|
||||
self.assertEqual(resp.spec.size, 3)
|
||||
|
||||
resp = changeme_api.get(namespace='default')
|
||||
self.assertEqual(len(resp.items), 1)
|
||||
|
||||
resp = changeme_api.get()
|
||||
self.assertEqual(len(resp.items), 1)
|
||||
|
||||
resp = changeme_api.delete(
|
||||
name=changeme_name,
|
||||
namespace='default'
|
||||
)
|
||||
|
||||
resp = changeme_api.get(namespace='default')
|
||||
self.assertEqual(len(resp.items), 0)
|
||||
|
||||
resp = changeme_api.get()
|
||||
self.assertEqual(len(resp.items), 0)
|
||||
|
||||
resp = crd_api.delete(
|
||||
name=name,
|
||||
)
|
||||
|
||||
time.sleep(2)
|
||||
client.resources.invalidate_cache()
|
||||
with self.assertRaises(ResourceNotFoundError):
|
||||
changeme_api = client.resources.get(
|
||||
api_version='apps.example.com/v1', kind='ChangeMe')
|
||||
|
||||
def test_service_apis(self):
|
||||
client = DynamicClient(api_client.ApiClient(configuration=self.config))
|
||||
api = client.resources.get(api_version='v1', kind='Service')
|
||||
|
||||
name = 'frontend-' + short_uuid()
|
||||
service_manifest = {'apiVersion': 'v1',
|
||||
'kind': 'Service',
|
||||
'metadata': {'labels': {'name': name},
|
||||
'name': name,
|
||||
'resourceversion': 'v1'},
|
||||
'spec': {'ports': [{'name': 'port',
|
||||
'port': 80,
|
||||
'protocol': 'TCP',
|
||||
'targetPort': 80}],
|
||||
'selector': {'name': name}}}
|
||||
|
||||
resp = api.create(
|
||||
body=service_manifest,
|
||||
namespace='default'
|
||||
)
|
||||
self.assertEqual(name, resp.metadata.name)
|
||||
self.assertTrue(resp.status)
|
||||
|
||||
resp = api.get(
|
||||
name=name,
|
||||
namespace='default'
|
||||
)
|
||||
self.assertEqual(name, resp.metadata.name)
|
||||
self.assertTrue(resp.status)
|
||||
|
||||
service_manifest['spec']['ports'] = [{'name': 'new',
|
||||
'port': 8080,
|
||||
'protocol': 'TCP',
|
||||
'targetPort': 8080}]
|
||||
resp = api.patch(
|
||||
body=service_manifest,
|
||||
name=name,
|
||||
namespace='default'
|
||||
)
|
||||
self.assertEqual(2, len(resp.spec.ports))
|
||||
self.assertTrue(resp.status)
|
||||
|
||||
resp = api.delete(
|
||||
name=name, body={},
|
||||
namespace='default'
|
||||
)
|
||||
|
||||
def test_replication_controller_apis(self):
|
||||
client = DynamicClient(api_client.ApiClient(configuration=self.config))
|
||||
api = client.resources.get(
|
||||
api_version='v1', kind='ReplicationController')
|
||||
|
||||
name = 'frontend-' + short_uuid()
|
||||
rc_manifest = {
|
||||
'apiVersion': 'v1',
|
||||
'kind': 'ReplicationController',
|
||||
'metadata': {'labels': {'name': name},
|
||||
'name': name},
|
||||
'spec': {'replicas': 2,
|
||||
'selector': {'name': name},
|
||||
'template': {'metadata': {
|
||||
'labels': {'name': name}},
|
||||
'spec': {'containers': [{
|
||||
'image': 'nginx',
|
||||
'name': 'nginx',
|
||||
'ports': [{'containerPort': 80,
|
||||
'protocol': 'TCP'}]}]}}}}
|
||||
|
||||
resp = api.create(
|
||||
body=rc_manifest, namespace='default')
|
||||
self.assertEqual(name, resp.metadata.name)
|
||||
self.assertEqual(2, resp.spec.replicas)
|
||||
|
||||
resp = api.get(
|
||||
name=name, namespace='default')
|
||||
self.assertEqual(name, resp.metadata.name)
|
||||
self.assertEqual(2, resp.spec.replicas)
|
||||
|
||||
api.delete(
|
||||
name=name,
|
||||
namespace='default',
|
||||
propagation_policy='Background')
|
||||
|
||||
def test_configmap_apis(self):
|
||||
client = DynamicClient(api_client.ApiClient(configuration=self.config))
|
||||
api = client.resources.get(api_version='v1', kind='ConfigMap')
|
||||
|
||||
name = 'test-configmap-' + short_uuid()
|
||||
test_configmap = {
|
||||
"kind": "ConfigMap",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": name,
|
||||
"labels": {
|
||||
"e2e-test": "true",
|
||||
},
|
||||
},
|
||||
"data": {
|
||||
"config.json": "{\"command\":\"/usr/bin/mysqld_safe\"}",
|
||||
"frontend.cnf": "[mysqld]\nbind-address = 10.0.0.3\n"
|
||||
}
|
||||
}
|
||||
|
||||
resp = api.create(
|
||||
body=test_configmap, namespace='default'
|
||||
)
|
||||
self.assertEqual(name, resp.metadata.name)
|
||||
|
||||
resp = api.get(
|
||||
name=name, namespace='default', label_selector="e2e-test=true")
|
||||
self.assertEqual(name, resp.metadata.name)
|
||||
|
||||
count = 0
|
||||
for _ in client.watch(api, timeout=10, namespace="default", name=name):
|
||||
count += 1
|
||||
self.assertTrue(count > 0, msg="no events received for watch")
|
||||
|
||||
test_configmap['data']['config.json'] = "{}"
|
||||
resp = api.patch(
|
||||
name=name, namespace='default', body=test_configmap)
|
||||
|
||||
resp = api.delete(
|
||||
name=name, body={}, namespace='default')
|
||||
|
||||
resp = api.get(
|
||||
namespace='default',
|
||||
pretty=True,
|
||||
label_selector="e2e-test=true")
|
||||
self.assertEqual([], resp.items)
|
||||
|
||||
def test_node_apis(self):
|
||||
client = DynamicClient(api_client.ApiClient(configuration=self.config))
|
||||
api = client.resources.get(api_version='v1', kind='Node')
|
||||
|
||||
for item in api.get().items:
|
||||
node = api.get(name=item.metadata.name)
|
||||
self.assertTrue(len(dict(node.metadata.labels)) > 0)
|
||||
|
||||
# test_node_apis_partial_object_metadata lists all nodes in the cluster,
|
||||
# but only retrieves object metadata
|
||||
def test_node_apis_partial_object_metadata(self):
|
||||
client = DynamicClient(api_client.ApiClient(configuration=self.config))
|
||||
api = client.resources.get(api_version='v1', kind='Node')
|
||||
|
||||
params = {
|
||||
'header_params': {
|
||||
'Accept': 'application/json;as=PartialObjectMetadataList;v=v1;g=meta.k8s.io'}}
|
||||
resp = api.get(**params)
|
||||
self.assertEqual('PartialObjectMetadataList', resp.kind)
|
||||
self.assertEqual('meta.k8s.io/v1', resp.apiVersion)
|
||||
|
||||
params = {
|
||||
'header_params': {
|
||||
'aCcePt': 'application/json;as=PartialObjectMetadataList;v=v1;g=meta.k8s.io'}}
|
||||
resp = api.get(**params)
|
||||
self.assertEqual('PartialObjectMetadataList', resp.kind)
|
||||
self.assertEqual('meta.k8s.io/v1', resp.apiVersion)
|
||||
|
||||
def test_server_side_apply_api(self):
|
||||
client = DynamicClient(api_client.ApiClient(configuration=self.config))
|
||||
api = client.resources.get(
|
||||
api_version='v1', kind='Pod')
|
||||
|
||||
name = 'pod-' + short_uuid()
|
||||
pod_manifest = {
|
||||
'apiVersion': 'v1',
|
||||
'kind': 'Pod',
|
||||
'metadata': {'labels': {'name': name},
|
||||
'name': name},
|
||||
'spec': {'containers': [{
|
||||
'image': 'nginx',
|
||||
'name': 'nginx',
|
||||
'ports': [{'containerPort': 80,
|
||||
'protocol': 'TCP'}]}]}}
|
||||
|
||||
resp = api.server_side_apply(
|
||||
namespace='default', body=pod_manifest,
|
||||
field_manager='kubernetes-unittests', dry_run="All")
|
||||
self.assertEqual('kubernetes-unittests', resp.metadata.managedFields[0].manager)
|
||||
|
||||
|
||||
class TestDynamicClientSerialization(unittest.TestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
config = base.get_e2e_configuration()
|
||||
cls.client = DynamicClient(api_client.ApiClient(configuration=config))
|
||||
cls.pod_manifest = {
|
||||
'apiVersion': 'v1',
|
||||
'kind': 'Pod',
|
||||
'metadata': {'name': 'foo-pod'},
|
||||
'spec': {'containers': [{'name': "main", 'image': "busybox"}]},
|
||||
}
|
||||
|
||||
def test_dict_type(self):
|
||||
self.assertEqual(self.client.serialize_body(self.pod_manifest), self.pod_manifest)
|
||||
|
||||
def test_resource_instance_type(self):
|
||||
inst = ResourceInstance(self.client, self.pod_manifest)
|
||||
self.assertEqual(self.client.serialize_body(inst), self.pod_manifest)
|
||||
|
||||
def test_resource_field(self):
|
||||
"""`ResourceField` is a special type which overwrites `__getattr__` method to return `None`
|
||||
when a non-existent attribute was accessed. which means it can pass any `hasattr(...)` tests.
|
||||
"""
|
||||
params = {
|
||||
"foo": "bar",
|
||||
"self": True
|
||||
}
|
||||
res = ResourceField(params=params)
|
||||
self.assertEqual(res["foo"], params["foo"])
|
||||
self.assertEqual(res["self"], params["self"])
|
||||
self.assertEqual(self.client.serialize_body(res), params)
|
61
kubernetes/base/dynamic/test_discovery.py
Normal file
61
kubernetes/base/dynamic/test_discovery.py
Normal file
|
@ -0,0 +1,61 @@
|
|||
# Copyright 2019 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import unittest
|
||||
|
||||
from kubernetes.e2e_test import base
|
||||
from kubernetes.client import api_client
|
||||
|
||||
from . import DynamicClient
|
||||
|
||||
|
||||
class TestDiscoverer(unittest.TestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.config = base.get_e2e_configuration()
|
||||
|
||||
def test_init_cache_from_file(self):
|
||||
client = DynamicClient(api_client.ApiClient(configuration=self.config))
|
||||
client.resources.get(api_version='v1', kind='Node')
|
||||
mtime1 = os.path.getmtime(client.resources._Discoverer__cache_file)
|
||||
|
||||
client = DynamicClient(api_client.ApiClient(configuration=self.config))
|
||||
client.resources.get(api_version='v1', kind='Node')
|
||||
mtime2 = os.path.getmtime(client.resources._Discoverer__cache_file)
|
||||
|
||||
# test no Discoverer._write_cache called
|
||||
self.assertTrue(mtime1 == mtime2)
|
||||
|
||||
def test_cache_decoder_resource_and_subresource(self):
|
||||
client = DynamicClient(api_client.ApiClient(configuration=self.config))
|
||||
# first invalidate cache
|
||||
client.resources.invalidate_cache()
|
||||
|
||||
# do Discoverer.__init__
|
||||
client = DynamicClient(api_client.ApiClient(configuration=self.config))
|
||||
# the resources of client will use _cache['resources'] in memory
|
||||
deploy1 = client.resources.get(kind='Deployment')
|
||||
|
||||
# do Discoverer.__init__
|
||||
client = DynamicClient(api_client.ApiClient(configuration=self.config))
|
||||
# the resources of client will use _cache['resources'] decode from cache file
|
||||
deploy2 = client.resources.get(kind='Deployment')
|
||||
|
||||
# test Resource is the same
|
||||
self.assertTrue(deploy1 == deploy2)
|
||||
|
||||
# test Subresource is the same
|
||||
self.assertTrue(deploy1.status == deploy2.status)
|
205
kubernetes/base/hack/boilerplate/boilerplate.py
Executable file
205
kubernetes/base/hack/boilerplate/boilerplate.py
Executable file
|
@ -0,0 +1,205 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import datetime
|
||||
import difflib
|
||||
import glob
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
# list all the files contain a shebang line and should be ignored by this
|
||||
# script
|
||||
SKIP_FILES = ['hack/boilerplate/boilerplate.py']
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"filenames",
|
||||
help="list of files to check, all files if unspecified",
|
||||
nargs='*')
|
||||
|
||||
rootdir = os.path.dirname(__file__) + "/../../"
|
||||
rootdir = os.path.abspath(rootdir)
|
||||
parser.add_argument(
|
||||
"--rootdir", default=rootdir, help="root directory to examine")
|
||||
|
||||
default_boilerplate_dir = os.path.join(rootdir, "hack/boilerplate")
|
||||
parser.add_argument(
|
||||
"--boilerplate-dir", default=default_boilerplate_dir)
|
||||
|
||||
parser.add_argument(
|
||||
"-v", "--verbose",
|
||||
help="give verbose output regarding why a file does not pass",
|
||||
action="store_true")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
verbose_out = sys.stderr if args.verbose else open("/dev/null", "w")
|
||||
|
||||
|
||||
def get_refs():
|
||||
refs = {}
|
||||
|
||||
for path in glob.glob(os.path.join(
|
||||
args.boilerplate_dir, "boilerplate.*.txt")):
|
||||
extension = os.path.basename(path).split(".")[1]
|
||||
|
||||
ref_file = open(path, 'r')
|
||||
ref = ref_file.read().splitlines()
|
||||
ref_file.close()
|
||||
refs[extension] = ref
|
||||
|
||||
return refs
|
||||
|
||||
|
||||
def file_passes(filename, refs, regexs):
|
||||
try:
|
||||
f = open(filename, 'r')
|
||||
except Exception as exc:
|
||||
print("Unable to open %s: %s" % (filename, exc), file=verbose_out)
|
||||
return False
|
||||
|
||||
data = f.read()
|
||||
f.close()
|
||||
|
||||
basename = os.path.basename(filename)
|
||||
extension = file_extension(filename)
|
||||
|
||||
if extension != "":
|
||||
ref = refs[extension]
|
||||
else:
|
||||
ref = refs[basename]
|
||||
|
||||
# remove extra content from the top of files
|
||||
if extension == "sh":
|
||||
p = regexs["shebang"]
|
||||
(data, found) = p.subn("", data, 1)
|
||||
|
||||
data = data.splitlines()
|
||||
|
||||
# if our test file is smaller than the reference it surely fails!
|
||||
if len(ref) > len(data):
|
||||
print('File %s smaller than reference (%d < %d)' %
|
||||
(filename, len(data), len(ref)),
|
||||
file=verbose_out)
|
||||
return False
|
||||
|
||||
# trim our file to the same number of lines as the reference file
|
||||
data = data[:len(ref)]
|
||||
|
||||
p = regexs["year"]
|
||||
for d in data:
|
||||
if p.search(d):
|
||||
print('File %s has the YEAR field, but missing the year of date' %
|
||||
filename, file=verbose_out)
|
||||
return False
|
||||
|
||||
# Replace all occurrences of regex "2014|2015|2016|2017|2018" with "YEAR"
|
||||
p = regexs["date"]
|
||||
for i, d in enumerate(data):
|
||||
(data[i], found) = p.subn('YEAR', d)
|
||||
if found != 0:
|
||||
break
|
||||
|
||||
# if we don't match the reference at this point, fail
|
||||
if ref != data:
|
||||
print("Header in %s does not match reference, diff:" %
|
||||
filename, file=verbose_out)
|
||||
if args.verbose:
|
||||
print(file=verbose_out)
|
||||
for line in difflib.unified_diff(
|
||||
ref, data, 'reference', filename, lineterm=''):
|
||||
print(line, file=verbose_out)
|
||||
print(file=verbose_out)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def file_extension(filename):
|
||||
return os.path.splitext(filename)[1].split(".")[-1].lower()
|
||||
|
||||
|
||||
def normalize_files(files):
|
||||
newfiles = []
|
||||
for pathname in files:
|
||||
newfiles.append(pathname)
|
||||
for i, pathname in enumerate(newfiles):
|
||||
if not os.path.isabs(pathname):
|
||||
newfiles[i] = os.path.join(args.rootdir, pathname)
|
||||
|
||||
return newfiles
|
||||
|
||||
|
||||
def get_files(extensions):
|
||||
|
||||
files = []
|
||||
if len(args.filenames) > 0:
|
||||
files = args.filenames
|
||||
else:
|
||||
for root, dirs, walkfiles in os.walk(args.rootdir):
|
||||
for name in walkfiles:
|
||||
pathname = os.path.join(root, name)
|
||||
files.append(pathname)
|
||||
|
||||
files = normalize_files(files)
|
||||
outfiles = []
|
||||
for pathname in files:
|
||||
basename = os.path.basename(pathname)
|
||||
extension = file_extension(pathname)
|
||||
if extension in extensions or basename in extensions:
|
||||
outfiles.append(pathname)
|
||||
|
||||
outfiles = list(set(outfiles) - set(normalize_files(SKIP_FILES)))
|
||||
return outfiles
|
||||
|
||||
|
||||
def get_dates():
|
||||
years = datetime.datetime.now().year
|
||||
return '(%s)' % '|'.join((str(year) for year in range(2014, years+1)))
|
||||
|
||||
|
||||
def get_regexs():
|
||||
regexs = {}
|
||||
# Search for "YEAR" which exists in the boilerplate,
|
||||
# but shouldn't in the real thing
|
||||
regexs["year"] = re.compile('YEAR')
|
||||
# get_dates return 2014, 2015, 2016, 2017, or 2018 until the current year
|
||||
# as a regex like: "(2014|2015|2016|2017|2018)";
|
||||
# company holder names can be anything
|
||||
regexs["date"] = re.compile(get_dates())
|
||||
# strip #!.* from shell scripts
|
||||
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
|
||||
return regexs
|
||||
|
||||
|
||||
def main():
|
||||
regexs = get_regexs()
|
||||
refs = get_refs()
|
||||
filenames = get_files(refs.keys())
|
||||
|
||||
for filename in filenames:
|
||||
if not file_passes(filename, refs, regexs):
|
||||
print(filename, file=sys.stdout)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
13
kubernetes/base/hack/boilerplate/boilerplate.py.txt
Normal file
13
kubernetes/base/hack/boilerplate/boilerplate.py.txt
Normal file
|
@ -0,0 +1,13 @@
|
|||
# Copyright YEAR The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
13
kubernetes/base/hack/boilerplate/boilerplate.sh.txt
Normal file
13
kubernetes/base/hack/boilerplate/boilerplate.sh.txt
Normal file
|
@ -0,0 +1,13 @@
|
|||
# Copyright YEAR The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
35
kubernetes/base/hack/verify-boilerplate.sh
Executable file
35
kubernetes/base/hack/verify-boilerplate.sh
Executable file
|
@ -0,0 +1,35 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
boilerDir="${KUBE_ROOT}/hack/boilerplate"
|
||||
boiler="${boilerDir}/boilerplate.py"
|
||||
|
||||
files_need_boilerplate=($(${boiler} "$@"))
|
||||
|
||||
# Run boilerplate check
|
||||
if [[ ${#files_need_boilerplate[@]} -gt 0 ]]; then
|
||||
for file in "${files_need_boilerplate[@]}"; do
|
||||
echo "Boilerplate header is wrong for: ${file}" >&2
|
||||
done
|
||||
|
||||
exit 1
|
||||
fi
|
18
kubernetes/base/leaderelection/README.md
Normal file
18
kubernetes/base/leaderelection/README.md
Normal file
|
@ -0,0 +1,18 @@
|
|||
## Leader Election Example
|
||||
This example demonstrates how to use the leader election library.
|
||||
|
||||
## Running
|
||||
Run the following command in multiple separate terminals preferably an odd number.
|
||||
Each running process uses a unique identifier displayed when it starts to run.
|
||||
|
||||
- When a program runs, if a lock object already exists with the specified name,
|
||||
all candidates will start as followers.
|
||||
- If a lock object does not exist with the specified name then whichever candidate
|
||||
creates a lock object first will become the leader and the rest will be followers.
|
||||
- The user will be prompted about the status of the candidates and transitions.
|
||||
|
||||
### Command to run
|
||||
```python example.py```
|
||||
|
||||
Now kill the existing leader. You will see from the terminal outputs that one of the
|
||||
remaining running processes will be elected as the new leader.
|
13
kubernetes/base/leaderelection/__init__.py
Normal file
13
kubernetes/base/leaderelection/__init__.py
Normal file
|
@ -0,0 +1,13 @@
|
|||
# Copyright 2021 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
59
kubernetes/base/leaderelection/electionconfig.py
Normal file
59
kubernetes/base/leaderelection/electionconfig.py
Normal file
|
@ -0,0 +1,59 @@
|
|||
# Copyright 2021 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
import logging
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
|
||||
class Config:
|
||||
# Validate config, exit if an error is detected
|
||||
def __init__(self, lock, lease_duration, renew_deadline, retry_period, onstarted_leading, onstopped_leading):
|
||||
self.jitter_factor = 1.2
|
||||
|
||||
if lock is None:
|
||||
sys.exit("lock cannot be None")
|
||||
self.lock = lock
|
||||
|
||||
if lease_duration <= renew_deadline:
|
||||
sys.exit("lease_duration must be greater than renew_deadline")
|
||||
|
||||
if renew_deadline <= self.jitter_factor * retry_period:
|
||||
sys.exit("renewDeadline must be greater than retry_period*jitter_factor")
|
||||
|
||||
if lease_duration < 1:
|
||||
sys.exit("lease_duration must be greater than one")
|
||||
|
||||
if renew_deadline < 1:
|
||||
sys.exit("renew_deadline must be greater than one")
|
||||
|
||||
if retry_period < 1:
|
||||
sys.exit("retry_period must be greater than one")
|
||||
|
||||
self.lease_duration = lease_duration
|
||||
self.renew_deadline = renew_deadline
|
||||
self.retry_period = retry_period
|
||||
|
||||
if onstarted_leading is None:
|
||||
sys.exit("callback onstarted_leading cannot be None")
|
||||
self.onstarted_leading = onstarted_leading
|
||||
|
||||
if onstopped_leading is None:
|
||||
self.onstopped_leading = self.on_stoppedleading_callback
|
||||
else:
|
||||
self.onstopped_leading = onstopped_leading
|
||||
|
||||
# Default callback for when the current candidate if a leader, stops leading
|
||||
def on_stoppedleading_callback(self):
|
||||
logging.info("stopped leading".format(self.lock.identity))
|
54
kubernetes/base/leaderelection/example.py
Normal file
54
kubernetes/base/leaderelection/example.py
Normal file
|
@ -0,0 +1,54 @@
|
|||
# Copyright 2021 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import uuid
|
||||
from kubernetes import client, config
|
||||
from kubernetes.leaderelection import leaderelection
|
||||
from kubernetes.leaderelection.resourcelock.configmaplock import ConfigMapLock
|
||||
from kubernetes.leaderelection import electionconfig
|
||||
|
||||
|
||||
# Authenticate using config file
|
||||
config.load_kube_config(config_file=r"")
|
||||
|
||||
# Parameters required from the user
|
||||
|
||||
# A unique identifier for this candidate
|
||||
candidate_id = uuid.uuid4()
|
||||
|
||||
# Name of the lock object to be created
|
||||
lock_name = "examplepython"
|
||||
|
||||
# Kubernetes namespace
|
||||
lock_namespace = "default"
|
||||
|
||||
|
||||
# The function that a user wants to run once a candidate is elected as a leader
|
||||
def example_func():
|
||||
print("I am leader")
|
||||
|
||||
|
||||
# A user can choose not to provide any callbacks for what to do when a candidate fails to lead - onStoppedLeading()
|
||||
# In that case, a default callback function will be used
|
||||
|
||||
# Create config
|
||||
config = electionconfig.Config(ConfigMapLock(lock_name, lock_namespace, candidate_id), lease_duration=17,
|
||||
renew_deadline=15, retry_period=5, onstarted_leading=example_func,
|
||||
onstopped_leading=None)
|
||||
|
||||
# Enter leader election
|
||||
leaderelection.LeaderElection(config).run()
|
||||
|
||||
# User can choose to do another round of election or simply exit
|
||||
print("Exited leader election")
|
191
kubernetes/base/leaderelection/leaderelection.py
Normal file
191
kubernetes/base/leaderelection/leaderelection.py
Normal file
|
@ -0,0 +1,191 @@
|
|||
# Copyright 2021 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import datetime
|
||||
import sys
|
||||
import time
|
||||
import json
|
||||
import threading
|
||||
from .leaderelectionrecord import LeaderElectionRecord
|
||||
import logging
|
||||
# if condition to be removed when support for python2 will be removed
|
||||
if sys.version_info > (3, 0):
|
||||
from http import HTTPStatus
|
||||
else:
|
||||
import httplib
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
"""
|
||||
This package implements leader election using an annotation in a Kubernetes object.
|
||||
The onstarted_leading function is run in a thread and when it returns, if it does
|
||||
it might not be safe to run it again in a process.
|
||||
|
||||
At first all candidates are considered followers. The one to create a lock or update
|
||||
an existing lock first becomes the leader and remains so until it keeps renewing its
|
||||
lease.
|
||||
"""
|
||||
|
||||
|
||||
class LeaderElection:
|
||||
def __init__(self, election_config):
|
||||
if election_config is None:
|
||||
sys.exit("argument config not passed")
|
||||
|
||||
# Latest record observed in the created lock object
|
||||
self.observed_record = None
|
||||
|
||||
# The configuration set for this candidate
|
||||
self.election_config = election_config
|
||||
|
||||
# Latest update time of the lock
|
||||
self.observed_time_milliseconds = 0
|
||||
|
||||
# Point of entry to Leader election
|
||||
def run(self):
|
||||
# Try to create/ acquire a lock
|
||||
if self.acquire():
|
||||
logging.info("{} successfully acquired lease".format(self.election_config.lock.identity))
|
||||
|
||||
# Start leading and call OnStartedLeading()
|
||||
threading.daemon = True
|
||||
threading.Thread(target=self.election_config.onstarted_leading).start()
|
||||
|
||||
self.renew_loop()
|
||||
|
||||
# Failed to update lease, run OnStoppedLeading callback
|
||||
self.election_config.onstopped_leading()
|
||||
|
||||
def acquire(self):
|
||||
# Follower
|
||||
logging.info("{} is a follower".format(self.election_config.lock.identity))
|
||||
retry_period = self.election_config.retry_period
|
||||
|
||||
while True:
|
||||
succeeded = self.try_acquire_or_renew()
|
||||
|
||||
if succeeded:
|
||||
return True
|
||||
|
||||
time.sleep(retry_period)
|
||||
|
||||
def renew_loop(self):
|
||||
# Leader
|
||||
logging.info("Leader has entered renew loop and will try to update lease continuously")
|
||||
|
||||
retry_period = self.election_config.retry_period
|
||||
renew_deadline = self.election_config.renew_deadline * 1000
|
||||
|
||||
while True:
|
||||
timeout = int(time.time() * 1000) + renew_deadline
|
||||
succeeded = False
|
||||
|
||||
while int(time.time() * 1000) < timeout:
|
||||
succeeded = self.try_acquire_or_renew()
|
||||
|
||||
if succeeded:
|
||||
break
|
||||
time.sleep(retry_period)
|
||||
|
||||
if succeeded:
|
||||
time.sleep(retry_period)
|
||||
continue
|
||||
|
||||
# failed to renew, return
|
||||
return
|
||||
|
||||
def try_acquire_or_renew(self):
|
||||
now_timestamp = time.time()
|
||||
now = datetime.datetime.fromtimestamp(now_timestamp)
|
||||
|
||||
# Check if lock is created
|
||||
lock_status, old_election_record = self.election_config.lock.get(self.election_config.lock.name,
|
||||
self.election_config.lock.namespace)
|
||||
|
||||
# create a default Election record for this candidate
|
||||
leader_election_record = LeaderElectionRecord(self.election_config.lock.identity,
|
||||
str(self.election_config.lease_duration), str(now), str(now))
|
||||
|
||||
# A lock is not created with that name, try to create one
|
||||
if not lock_status:
|
||||
# To be removed when support for python2 will be removed
|
||||
if sys.version_info > (3, 0):
|
||||
if json.loads(old_election_record.body)['code'] != HTTPStatus.NOT_FOUND:
|
||||
logging.info("Error retrieving resource lock {} as {}".format(self.election_config.lock.name,
|
||||
old_election_record.reason))
|
||||
return False
|
||||
else:
|
||||
if json.loads(old_election_record.body)['code'] != httplib.NOT_FOUND:
|
||||
logging.info("Error retrieving resource lock {} as {}".format(self.election_config.lock.name,
|
||||
old_election_record.reason))
|
||||
return False
|
||||
|
||||
logging.info("{} is trying to create a lock".format(leader_election_record.holder_identity))
|
||||
create_status = self.election_config.lock.create(name=self.election_config.lock.name,
|
||||
namespace=self.election_config.lock.namespace,
|
||||
election_record=leader_election_record)
|
||||
|
||||
if create_status is False:
|
||||
logging.info("{} Failed to create lock".format(leader_election_record.holder_identity))
|
||||
return False
|
||||
|
||||
self.observed_record = leader_election_record
|
||||
self.observed_time_milliseconds = int(time.time() * 1000)
|
||||
return True
|
||||
|
||||
# A lock exists with that name
|
||||
# Validate old_election_record
|
||||
if old_election_record is None:
|
||||
# try to update lock with proper annotation and election record
|
||||
return self.update_lock(leader_election_record)
|
||||
|
||||
if (old_election_record.holder_identity is None or old_election_record.lease_duration is None
|
||||
or old_election_record.acquire_time is None or old_election_record.renew_time is None):
|
||||
# try to update lock with proper annotation and election record
|
||||
return self.update_lock(leader_election_record)
|
||||
|
||||
# Report transitions
|
||||
if self.observed_record and self.observed_record.holder_identity != old_election_record.holder_identity:
|
||||
logging.info("Leader has switched to {}".format(old_election_record.holder_identity))
|
||||
|
||||
if self.observed_record is None or old_election_record.__dict__ != self.observed_record.__dict__:
|
||||
self.observed_record = old_election_record
|
||||
self.observed_time_milliseconds = int(time.time() * 1000)
|
||||
|
||||
# If This candidate is not the leader and lease duration is yet to finish
|
||||
if (self.election_config.lock.identity != self.observed_record.holder_identity
|
||||
and self.observed_time_milliseconds + self.election_config.lease_duration * 1000 > int(now_timestamp * 1000)):
|
||||
logging.info("yet to finish lease_duration, lease held by {} and has not expired".format(old_election_record.holder_identity))
|
||||
return False
|
||||
|
||||
# If this candidate is the Leader
|
||||
if self.election_config.lock.identity == self.observed_record.holder_identity:
|
||||
# Leader updates renewTime, but keeps acquire_time unchanged
|
||||
leader_election_record.acquire_time = self.observed_record.acquire_time
|
||||
|
||||
return self.update_lock(leader_election_record)
|
||||
|
||||
def update_lock(self, leader_election_record):
|
||||
# Update object with latest election record
|
||||
update_status = self.election_config.lock.update(self.election_config.lock.name,
|
||||
self.election_config.lock.namespace,
|
||||
leader_election_record)
|
||||
|
||||
if update_status is False:
|
||||
logging.info("{} failed to acquire lease".format(leader_election_record.holder_identity))
|
||||
return False
|
||||
|
||||
self.observed_record = leader_election_record
|
||||
self.observed_time_milliseconds = int(time.time() * 1000)
|
||||
logging.info("leader {} has successfully acquired lease".format(leader_election_record.holder_identity))
|
||||
return True
|
270
kubernetes/base/leaderelection/leaderelection_test.py
Normal file
270
kubernetes/base/leaderelection/leaderelection_test.py
Normal file
|
@ -0,0 +1,270 @@
|
|||
# Copyright 2021 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from . import leaderelection
|
||||
from .leaderelectionrecord import LeaderElectionRecord
|
||||
from kubernetes.client.rest import ApiException
|
||||
from . import electionconfig
|
||||
import unittest
|
||||
import threading
|
||||
import json
|
||||
import time
|
||||
import pytest
|
||||
|
||||
thread_lock = threading.RLock()
|
||||
|
||||
class LeaderElectionTest(unittest.TestCase):
|
||||
def test_simple_leader_election(self):
|
||||
election_history = []
|
||||
leadership_history = []
|
||||
|
||||
def on_create():
|
||||
election_history.append("create record")
|
||||
leadership_history.append("get leadership")
|
||||
|
||||
def on_update():
|
||||
election_history.append("update record")
|
||||
|
||||
def on_change():
|
||||
election_history.append("change record")
|
||||
|
||||
mock_lock = MockResourceLock("mock", "mock_namespace", "mock", thread_lock, on_create, on_update, on_change, None)
|
||||
|
||||
def on_started_leading():
|
||||
leadership_history.append("start leading")
|
||||
|
||||
def on_stopped_leading():
|
||||
leadership_history.append("stop leading")
|
||||
|
||||
# Create config 4.5 4 3
|
||||
config = electionconfig.Config(lock=mock_lock, lease_duration=2.5,
|
||||
renew_deadline=2, retry_period=1.5, onstarted_leading=on_started_leading,
|
||||
onstopped_leading=on_stopped_leading)
|
||||
|
||||
# Enter leader election
|
||||
leaderelection.LeaderElection(config).run()
|
||||
|
||||
self.assert_history(election_history, ["create record", "update record", "update record", "update record"])
|
||||
self.assert_history(leadership_history, ["get leadership", "start leading", "stop leading"])
|
||||
|
||||
def test_leader_election(self):
|
||||
election_history = []
|
||||
leadership_history = []
|
||||
|
||||
def on_create_A():
|
||||
election_history.append("A creates record")
|
||||
leadership_history.append("A gets leadership")
|
||||
|
||||
def on_update_A():
|
||||
election_history.append("A updates record")
|
||||
|
||||
def on_change_A():
|
||||
election_history.append("A gets leadership")
|
||||
|
||||
mock_lock_A = MockResourceLock("mock", "mock_namespace", "MockA", thread_lock, on_create_A, on_update_A, on_change_A, None)
|
||||
mock_lock_A.renew_count_max = 3
|
||||
|
||||
def on_started_leading_A():
|
||||
leadership_history.append("A starts leading")
|
||||
|
||||
def on_stopped_leading_A():
|
||||
leadership_history.append("A stops leading")
|
||||
|
||||
config_A = electionconfig.Config(lock=mock_lock_A, lease_duration=2.5,
|
||||
renew_deadline=2, retry_period=1.5, onstarted_leading=on_started_leading_A,
|
||||
onstopped_leading=on_stopped_leading_A)
|
||||
|
||||
def on_create_B():
|
||||
election_history.append("B creates record")
|
||||
leadership_history.append("B gets leadership")
|
||||
|
||||
def on_update_B():
|
||||
election_history.append("B updates record")
|
||||
|
||||
def on_change_B():
|
||||
leadership_history.append("B gets leadership")
|
||||
|
||||
mock_lock_B = MockResourceLock("mock", "mock_namespace", "MockB", thread_lock, on_create_B, on_update_B, on_change_B, None)
|
||||
mock_lock_B.renew_count_max = 4
|
||||
|
||||
def on_started_leading_B():
|
||||
leadership_history.append("B starts leading")
|
||||
|
||||
def on_stopped_leading_B():
|
||||
leadership_history.append("B stops leading")
|
||||
|
||||
config_B = electionconfig.Config(lock=mock_lock_B, lease_duration=2.5,
|
||||
renew_deadline=2, retry_period=1.5, onstarted_leading=on_started_leading_B,
|
||||
onstopped_leading=on_stopped_leading_B)
|
||||
|
||||
mock_lock_B.leader_record = mock_lock_A.leader_record
|
||||
|
||||
threading.daemon = True
|
||||
# Enter leader election for A
|
||||
threading.Thread(target=leaderelection.LeaderElection(config_A).run()).start()
|
||||
|
||||
# Enter leader election for B
|
||||
threading.Thread(target=leaderelection.LeaderElection(config_B).run()).start()
|
||||
|
||||
time.sleep(5)
|
||||
|
||||
self.assert_history(election_history,
|
||||
["A creates record",
|
||||
"A updates record",
|
||||
"A updates record",
|
||||
"B updates record",
|
||||
"B updates record",
|
||||
"B updates record",
|
||||
"B updates record"])
|
||||
self.assert_history(leadership_history,
|
||||
["A gets leadership",
|
||||
"A starts leading",
|
||||
"A stops leading",
|
||||
"B gets leadership",
|
||||
"B starts leading",
|
||||
"B stops leading"])
|
||||
|
||||
|
||||
"""Expected behavior: to check if the leader stops leading if it fails to update the lock within the renew_deadline
|
||||
and stops leading after finally timing out. The difference between each try comes out to be approximately the sleep
|
||||
time.
|
||||
Example:
|
||||
create record: 0s
|
||||
on try update: 1.5s
|
||||
on update: zzz s
|
||||
on try update: 3s
|
||||
on update: zzz s
|
||||
on try update: 4.5s
|
||||
on try update: 6s
|
||||
Timeout - Leader Exits"""
|
||||
def test_Leader_election_with_renew_deadline(self):
|
||||
election_history = []
|
||||
leadership_history = []
|
||||
|
||||
def on_create():
|
||||
election_history.append("create record")
|
||||
leadership_history.append("get leadership")
|
||||
|
||||
def on_update():
|
||||
election_history.append("update record")
|
||||
|
||||
def on_change():
|
||||
election_history.append("change record")
|
||||
|
||||
def on_try_update():
|
||||
election_history.append("try update record")
|
||||
|
||||
mock_lock = MockResourceLock("mock", "mock_namespace", "mock", thread_lock, on_create, on_update, on_change, on_try_update)
|
||||
mock_lock.renew_count_max = 3
|
||||
|
||||
def on_started_leading():
|
||||
leadership_history.append("start leading")
|
||||
|
||||
def on_stopped_leading():
|
||||
leadership_history.append("stop leading")
|
||||
|
||||
# Create config
|
||||
config = electionconfig.Config(lock=mock_lock, lease_duration=2.5,
|
||||
renew_deadline=2, retry_period=1.5, onstarted_leading=on_started_leading,
|
||||
onstopped_leading=on_stopped_leading)
|
||||
|
||||
# Enter leader election
|
||||
leaderelection.LeaderElection(config).run()
|
||||
|
||||
self.assert_history(election_history,
|
||||
["create record",
|
||||
"try update record",
|
||||
"update record",
|
||||
"try update record",
|
||||
"update record",
|
||||
"try update record",
|
||||
"try update record"])
|
||||
|
||||
self.assert_history(leadership_history, ["get leadership", "start leading", "stop leading"])
|
||||
|
||||
def assert_history(self, history, expected):
|
||||
self.assertIsNotNone(expected)
|
||||
self.assertIsNotNone(history)
|
||||
self.assertEqual(len(expected), len(history))
|
||||
|
||||
for idx in range(len(history)):
|
||||
self.assertEqual(history[idx], expected[idx],
|
||||
msg="Not equal at index {}, expected {}, got {}".format(idx, expected[idx],
|
||||
history[idx]))
|
||||
|
||||
|
||||
class MockResourceLock:
|
||||
def __init__(self, name, namespace, identity, shared_lock, on_create=None, on_update=None, on_change=None, on_try_update=None):
|
||||
# self.leader_record is shared between two MockResourceLock objects
|
||||
self.leader_record = []
|
||||
self.renew_count = 0
|
||||
self.renew_count_max = 4
|
||||
self.name = name
|
||||
self.namespace = namespace
|
||||
self.identity = str(identity)
|
||||
self.lock = shared_lock
|
||||
|
||||
self.on_create = on_create
|
||||
self.on_update = on_update
|
||||
self.on_change = on_change
|
||||
self.on_try_update = on_try_update
|
||||
|
||||
def get(self, name, namespace):
|
||||
self.lock.acquire()
|
||||
try:
|
||||
if self.leader_record:
|
||||
return True, self.leader_record[0]
|
||||
|
||||
ApiException.body = json.dumps({'code': 404})
|
||||
return False, ApiException
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def create(self, name, namespace, election_record):
|
||||
self.lock.acquire()
|
||||
try:
|
||||
if len(self.leader_record) == 1:
|
||||
return False
|
||||
self.leader_record.append(election_record)
|
||||
self.on_create()
|
||||
self.renew_count += 1
|
||||
return True
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def update(self, name, namespace, updated_record):
|
||||
self.lock.acquire()
|
||||
try:
|
||||
if self.on_try_update:
|
||||
self.on_try_update()
|
||||
if self.renew_count >= self.renew_count_max:
|
||||
return False
|
||||
|
||||
old_record = self.leader_record[0]
|
||||
self.leader_record[0] = updated_record
|
||||
|
||||
self.on_update()
|
||||
|
||||
if old_record.holder_identity != updated_record.holder_identity:
|
||||
self.on_change()
|
||||
|
||||
self.renew_count += 1
|
||||
return True
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
22
kubernetes/base/leaderelection/leaderelectionrecord.py
Normal file
22
kubernetes/base/leaderelection/leaderelectionrecord.py
Normal file
|
@ -0,0 +1,22 @@
|
|||
# Copyright 2021 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
class LeaderElectionRecord:
|
||||
# Annotation used in the lock object
|
||||
def __init__(self, holder_identity, lease_duration, acquire_time, renew_time):
|
||||
self.holder_identity = holder_identity
|
||||
self.lease_duration = lease_duration
|
||||
self.acquire_time = acquire_time
|
||||
self.renew_time = renew_time
|
13
kubernetes/base/leaderelection/resourcelock/__init__.py
Normal file
13
kubernetes/base/leaderelection/resourcelock/__init__.py
Normal file
|
@ -0,0 +1,13 @@
|
|||
# Copyright 2021 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
129
kubernetes/base/leaderelection/resourcelock/configmaplock.py
Normal file
129
kubernetes/base/leaderelection/resourcelock/configmaplock.py
Normal file
|
@ -0,0 +1,129 @@
|
|||
# Copyright 2021 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from kubernetes.client.rest import ApiException
|
||||
from kubernetes import client, config
|
||||
from kubernetes.client.api_client import ApiClient
|
||||
from ..leaderelectionrecord import LeaderElectionRecord
|
||||
import json
|
||||
import logging
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
|
||||
class ConfigMapLock:
|
||||
def __init__(self, name, namespace, identity):
|
||||
"""
|
||||
:param name: name of the lock
|
||||
:param namespace: namespace
|
||||
:param identity: A unique identifier that the candidate is using
|
||||
"""
|
||||
self.api_instance = client.CoreV1Api()
|
||||
self.leader_electionrecord_annotationkey = 'control-plane.alpha.kubernetes.io/leader'
|
||||
self.name = name
|
||||
self.namespace = namespace
|
||||
self.identity = str(identity)
|
||||
self.configmap_reference = None
|
||||
self.lock_record = {
|
||||
'holderIdentity': None,
|
||||
'leaseDurationSeconds': None,
|
||||
'acquireTime': None,
|
||||
'renewTime': None
|
||||
}
|
||||
|
||||
# get returns the election record from a ConfigMap Annotation
|
||||
def get(self, name, namespace):
|
||||
"""
|
||||
:param name: Name of the configmap object information to get
|
||||
:param namespace: Namespace in which the configmap object is to be searched
|
||||
:return: 'True, election record' if object found else 'False, exception response'
|
||||
"""
|
||||
try:
|
||||
api_response = self.api_instance.read_namespaced_config_map(name, namespace)
|
||||
|
||||
# If an annotation does not exist - add the leader_electionrecord_annotationkey
|
||||
annotations = api_response.metadata.annotations
|
||||
if annotations is None or annotations == '':
|
||||
api_response.metadata.annotations = {self.leader_electionrecord_annotationkey: ''}
|
||||
self.configmap_reference = api_response
|
||||
return True, None
|
||||
|
||||
# If an annotation exists but, the leader_electionrecord_annotationkey does not then add it as a key
|
||||
if not annotations.get(self.leader_electionrecord_annotationkey):
|
||||
api_response.metadata.annotations = {self.leader_electionrecord_annotationkey: ''}
|
||||
self.configmap_reference = api_response
|
||||
return True, None
|
||||
|
||||
lock_record = self.get_lock_object(json.loads(annotations[self.leader_electionrecord_annotationkey]))
|
||||
|
||||
self.configmap_reference = api_response
|
||||
return True, lock_record
|
||||
except ApiException as e:
|
||||
return False, e
|
||||
|
||||
def create(self, name, namespace, election_record):
|
||||
"""
|
||||
:param electionRecord: Annotation string
|
||||
:param name: Name of the configmap object to be created
|
||||
:param namespace: Namespace in which the configmap object is to be created
|
||||
:return: 'True' if object is created else 'False' if failed
|
||||
"""
|
||||
body = client.V1ConfigMap(
|
||||
metadata={"name": name,
|
||||
"annotations": {self.leader_electionrecord_annotationkey: json.dumps(self.get_lock_dict(election_record))}})
|
||||
|
||||
try:
|
||||
api_response = self.api_instance.create_namespaced_config_map(namespace, body, pretty=True)
|
||||
return True
|
||||
except ApiException as e:
|
||||
logging.info("Failed to create lock as {}".format(e))
|
||||
return False
|
||||
|
||||
def update(self, name, namespace, updated_record):
|
||||
"""
|
||||
:param name: name of the lock to be updated
|
||||
:param namespace: namespace the lock is in
|
||||
:param updated_record: the updated election record
|
||||
:return: True if update is successful False if it fails
|
||||
"""
|
||||
try:
|
||||
# Set the updated record
|
||||
self.configmap_reference.metadata.annotations[self.leader_electionrecord_annotationkey] = json.dumps(self.get_lock_dict(updated_record))
|
||||
api_response = self.api_instance.replace_namespaced_config_map(name=name, namespace=namespace,
|
||||
body=self.configmap_reference)
|
||||
return True
|
||||
except ApiException as e:
|
||||
logging.info("Failed to update lock as {}".format(e))
|
||||
return False
|
||||
|
||||
def get_lock_object(self, lock_record):
|
||||
leader_election_record = LeaderElectionRecord(None, None, None, None)
|
||||
|
||||
if lock_record.get('holderIdentity'):
|
||||
leader_election_record.holder_identity = lock_record['holderIdentity']
|
||||
if lock_record.get('leaseDurationSeconds'):
|
||||
leader_election_record.lease_duration = lock_record['leaseDurationSeconds']
|
||||
if lock_record.get('acquireTime'):
|
||||
leader_election_record.acquire_time = lock_record['acquireTime']
|
||||
if lock_record.get('renewTime'):
|
||||
leader_election_record.renew_time = lock_record['renewTime']
|
||||
|
||||
return leader_election_record
|
||||
|
||||
def get_lock_dict(self, leader_election_record):
|
||||
self.lock_record['holderIdentity'] = leader_election_record.holder_identity
|
||||
self.lock_record['leaseDurationSeconds'] = leader_election_record.lease_duration
|
||||
self.lock_record['acquireTime'] = leader_election_record.acquire_time
|
||||
self.lock_record['renewTime'] = leader_election_record.renew_time
|
||||
|
||||
return self.lock_record
|
53
kubernetes/base/run_tox.sh
Executable file
53
kubernetes/base/run_tox.sh
Executable file
|
@ -0,0 +1,53 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
RUNNING_DIR=$(pwd)
|
||||
TMP_DIR=$(mktemp -d)
|
||||
|
||||
function cleanup()
|
||||
{
|
||||
cd "${RUNNING_DIR}"
|
||||
}
|
||||
trap cleanup EXIT SIGINT
|
||||
|
||||
|
||||
SCRIPT_ROOT=$(dirname "${BASH_SOURCE}")
|
||||
pushd "${SCRIPT_ROOT}" > /dev/null
|
||||
SCRIPT_ROOT=`pwd`
|
||||
popd > /dev/null
|
||||
|
||||
cd "${TMP_DIR}"
|
||||
git clone https://github.com/kubernetes-client/python.git
|
||||
cd python
|
||||
git config user.email "kubernetes-client@k8s.com"
|
||||
git config user.name "kubernetes client"
|
||||
git rm -rf kubernetes/base
|
||||
git commit -m "DO NOT MERGE, removing submodule for testing only"
|
||||
mkdir kubernetes/base
|
||||
cp -r "${SCRIPT_ROOT}/." kubernetes/base
|
||||
rm -rf kubernetes/base/.git
|
||||
rm -rf kubernetes/base/.tox
|
||||
git add kubernetes/base
|
||||
git commit -m "DO NOT MERGE, adding changes for testing."
|
||||
git status
|
||||
|
||||
echo "Running tox from the main repo on $TOXENV environment"
|
||||
# Run the user-provided command.
|
||||
"${@}"
|
15
kubernetes/base/stream/__init__.py
Normal file
15
kubernetes/base/stream/__init__.py
Normal file
|
@ -0,0 +1,15 @@
|
|||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from .stream import stream, portforward
|
50
kubernetes/base/stream/stream.py
Normal file
50
kubernetes/base/stream/stream.py
Normal file
|
@ -0,0 +1,50 @@
|
|||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import functools
|
||||
|
||||
from . import ws_client
|
||||
|
||||
|
||||
def _websocket_request(websocket_request, force_kwargs, api_method, *args, **kwargs):
|
||||
"""Override the ApiClient.request method with an alternative websocket based
|
||||
method and call the supplied Kubernetes API method with that in place."""
|
||||
if force_kwargs:
|
||||
for kwarg, value in force_kwargs.items():
|
||||
kwargs[kwarg] = value
|
||||
api_client = api_method.__self__.api_client
|
||||
# old generated code's api client has config. new ones has configuration
|
||||
try:
|
||||
configuration = api_client.configuration
|
||||
except AttributeError:
|
||||
configuration = api_client.config
|
||||
prev_request = api_client.request
|
||||
binary = kwargs.pop('binary', False)
|
||||
try:
|
||||
api_client.request = functools.partial(websocket_request, configuration, binary=binary)
|
||||
out = api_method(*args, **kwargs)
|
||||
# The api_client insists on converting this to a string using its representation, so we have
|
||||
# to do this dance to strip it of the b' prefix and ' suffix, encode it byte-per-byte (latin1),
|
||||
# escape all of the unicode \x*'s, then encode it back byte-by-byte
|
||||
# However, if _preload_content=False is passed, then the entire WSClient is returned instead
|
||||
# of a response, and we want to leave it alone
|
||||
if binary and kwargs.get('_preload_content', True):
|
||||
out = out[2:-1].encode('latin1').decode('unicode_escape').encode('latin1')
|
||||
return out
|
||||
finally:
|
||||
api_client.request = prev_request
|
||||
|
||||
|
||||
stream = functools.partial(_websocket_request, ws_client.websocket_call, None)
|
||||
portforward = functools.partial(_websocket_request, ws_client.portforward_call, {'_preload_content':False})
|
571
kubernetes/base/stream/ws_client.py
Normal file
571
kubernetes/base/stream/ws_client.py
Normal file
|
@ -0,0 +1,571 @@
|
|||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import sys
|
||||
|
||||
from kubernetes.client.rest import ApiException, ApiValueError
|
||||
|
||||
import certifi
|
||||
import collections
|
||||
import select
|
||||
import socket
|
||||
import ssl
|
||||
import threading
|
||||
import time
|
||||
|
||||
import six
|
||||
import yaml
|
||||
|
||||
|
||||
from six.moves.urllib.parse import urlencode, urlparse, urlunparse
|
||||
from six import StringIO, BytesIO
|
||||
|
||||
from websocket import WebSocket, ABNF, enableTrace
|
||||
from base64 import urlsafe_b64decode
|
||||
from requests.utils import should_bypass_proxies
|
||||
|
||||
STDIN_CHANNEL = 0
|
||||
STDOUT_CHANNEL = 1
|
||||
STDERR_CHANNEL = 2
|
||||
ERROR_CHANNEL = 3
|
||||
RESIZE_CHANNEL = 4
|
||||
|
||||
class _IgnoredIO:
|
||||
def write(self, _x):
|
||||
pass
|
||||
|
||||
def getvalue(self):
|
||||
raise TypeError("Tried to read_all() from a WSClient configured to not capture. Did you mean `capture_all=True`?")
|
||||
|
||||
|
||||
class WSClient:
|
||||
def __init__(self, configuration, url, headers, capture_all, binary=False):
|
||||
"""A websocket client with support for channels.
|
||||
|
||||
Exec command uses different channels for different streams. for
|
||||
example, 0 is stdin, 1 is stdout and 2 is stderr. Some other API calls
|
||||
like port forwarding can forward different pods' streams to different
|
||||
channels.
|
||||
"""
|
||||
self._connected = False
|
||||
self._channels = {}
|
||||
self.binary = binary
|
||||
self.newline = '\n' if not self.binary else b'\n'
|
||||
if capture_all:
|
||||
self._all = StringIO() if not self.binary else BytesIO()
|
||||
else:
|
||||
self._all = _IgnoredIO()
|
||||
self.sock = create_websocket(configuration, url, headers)
|
||||
self._connected = True
|
||||
self._returncode = None
|
||||
|
||||
def peek_channel(self, channel, timeout=0):
|
||||
"""Peek a channel and return part of the input,
|
||||
empty string otherwise."""
|
||||
self.update(timeout=timeout)
|
||||
if channel in self._channels:
|
||||
return self._channels[channel]
|
||||
return ""
|
||||
|
||||
def read_channel(self, channel, timeout=0):
|
||||
"""Read data from a channel."""
|
||||
if channel not in self._channels:
|
||||
ret = self.peek_channel(channel, timeout)
|
||||
else:
|
||||
ret = self._channels[channel]
|
||||
if channel in self._channels:
|
||||
del self._channels[channel]
|
||||
return ret
|
||||
|
||||
def readline_channel(self, channel, timeout=None):
|
||||
"""Read a line from a channel."""
|
||||
if timeout is None:
|
||||
timeout = float("inf")
|
||||
start = time.time()
|
||||
while self.is_open() and time.time() - start < timeout:
|
||||
if channel in self._channels:
|
||||
data = self._channels[channel]
|
||||
if self.newline in data:
|
||||
index = data.find(self.newline)
|
||||
ret = data[:index]
|
||||
data = data[index+1:]
|
||||
if data:
|
||||
self._channels[channel] = data
|
||||
else:
|
||||
del self._channels[channel]
|
||||
return ret
|
||||
self.update(timeout=(timeout - time.time() + start))
|
||||
|
||||
def write_channel(self, channel, data):
|
||||
"""Write data to a channel."""
|
||||
# check if we're writing binary data or not
|
||||
binary = six.PY3 and type(data) == six.binary_type
|
||||
opcode = ABNF.OPCODE_BINARY if binary else ABNF.OPCODE_TEXT
|
||||
|
||||
channel_prefix = chr(channel)
|
||||
if binary:
|
||||
channel_prefix = six.binary_type(channel_prefix, "ascii")
|
||||
|
||||
payload = channel_prefix + data
|
||||
self.sock.send(payload, opcode=opcode)
|
||||
|
||||
def peek_stdout(self, timeout=0):
|
||||
"""Same as peek_channel with channel=1."""
|
||||
return self.peek_channel(STDOUT_CHANNEL, timeout=timeout)
|
||||
|
||||
def read_stdout(self, timeout=None):
|
||||
"""Same as read_channel with channel=1."""
|
||||
return self.read_channel(STDOUT_CHANNEL, timeout=timeout)
|
||||
|
||||
def readline_stdout(self, timeout=None):
|
||||
"""Same as readline_channel with channel=1."""
|
||||
return self.readline_channel(STDOUT_CHANNEL, timeout=timeout)
|
||||
|
||||
def peek_stderr(self, timeout=0):
|
||||
"""Same as peek_channel with channel=2."""
|
||||
return self.peek_channel(STDERR_CHANNEL, timeout=timeout)
|
||||
|
||||
def read_stderr(self, timeout=None):
|
||||
"""Same as read_channel with channel=2."""
|
||||
return self.read_channel(STDERR_CHANNEL, timeout=timeout)
|
||||
|
||||
def readline_stderr(self, timeout=None):
|
||||
"""Same as readline_channel with channel=2."""
|
||||
return self.readline_channel(STDERR_CHANNEL, timeout=timeout)
|
||||
|
||||
def read_all(self):
|
||||
"""Return buffered data received on stdout and stderr channels.
|
||||
This is useful for non-interactive call where a set of command passed
|
||||
to the API call and their result is needed after the call is concluded.
|
||||
Should be called after run_forever() or update()
|
||||
|
||||
TODO: Maybe we can process this and return a more meaningful map with
|
||||
channels mapped for each input.
|
||||
"""
|
||||
out = self._all.getvalue()
|
||||
self._all = self._all.__class__()
|
||||
self._channels = {}
|
||||
return out
|
||||
|
||||
def is_open(self):
|
||||
"""True if the connection is still alive."""
|
||||
return self._connected
|
||||
|
||||
def write_stdin(self, data):
|
||||
"""The same as write_channel with channel=0."""
|
||||
self.write_channel(STDIN_CHANNEL, data)
|
||||
|
||||
def update(self, timeout=0):
|
||||
"""Update channel buffers with at most one complete frame of input."""
|
||||
if not self.is_open():
|
||||
return
|
||||
if not self.sock.connected:
|
||||
self._connected = False
|
||||
return
|
||||
|
||||
# The options here are:
|
||||
# select.select() - this will work on most OS, however, it has a
|
||||
# limitation of only able to read fd numbers up to 1024.
|
||||
# i.e. does not scale well. This was the original
|
||||
# implementation.
|
||||
# select.poll() - this will work on most unix based OS, but not as
|
||||
# efficient as epoll. Will work for fd numbers above 1024.
|
||||
# select.epoll() - newest and most efficient way of polling.
|
||||
# However, only works on linux.
|
||||
if hasattr(select, "poll"):
|
||||
poll = select.poll()
|
||||
poll.register(self.sock.sock, select.POLLIN)
|
||||
if timeout is not None:
|
||||
timeout *= 1_000 # poll method uses milliseconds as the time unit
|
||||
r = poll.poll(timeout)
|
||||
poll.unregister(self.sock.sock)
|
||||
else:
|
||||
r, _, _ = select.select(
|
||||
(self.sock.sock, ), (), (), timeout)
|
||||
|
||||
if r:
|
||||
op_code, frame = self.sock.recv_data_frame(True)
|
||||
if op_code == ABNF.OPCODE_CLOSE:
|
||||
self._connected = False
|
||||
return
|
||||
elif op_code == ABNF.OPCODE_BINARY or op_code == ABNF.OPCODE_TEXT:
|
||||
data = frame.data
|
||||
if six.PY3 and not self.binary:
|
||||
data = data.decode("utf-8", "replace")
|
||||
if len(data) > 1:
|
||||
channel = data[0]
|
||||
if six.PY3 and not self.binary:
|
||||
channel = ord(channel)
|
||||
data = data[1:]
|
||||
if data:
|
||||
if channel in [STDOUT_CHANNEL, STDERR_CHANNEL]:
|
||||
# keeping all messages in the order they received
|
||||
# for non-blocking call.
|
||||
self._all.write(data)
|
||||
if channel not in self._channels:
|
||||
self._channels[channel] = data
|
||||
else:
|
||||
self._channels[channel] += data
|
||||
|
||||
def run_forever(self, timeout=None):
|
||||
"""Wait till connection is closed or timeout reached. Buffer any input
|
||||
received during this time."""
|
||||
if timeout:
|
||||
start = time.time()
|
||||
while self.is_open() and time.time() - start < timeout:
|
||||
self.update(timeout=(timeout - time.time() + start))
|
||||
else:
|
||||
while self.is_open():
|
||||
self.update(timeout=None)
|
||||
@property
|
||||
def returncode(self):
|
||||
"""
|
||||
The return code, A None value indicates that the process hasn't
|
||||
terminated yet.
|
||||
"""
|
||||
if self.is_open():
|
||||
return None
|
||||
else:
|
||||
if self._returncode is None:
|
||||
err = self.read_channel(ERROR_CHANNEL)
|
||||
err = yaml.safe_load(err)
|
||||
if err['status'] == "Success":
|
||||
self._returncode = 0
|
||||
else:
|
||||
self._returncode = int(err['details']['causes'][0]['message'])
|
||||
return self._returncode
|
||||
|
||||
def close(self, **kwargs):
|
||||
"""
|
||||
close websocket connection.
|
||||
"""
|
||||
self._connected = False
|
||||
if self.sock:
|
||||
self.sock.close(**kwargs)
|
||||
|
||||
|
||||
WSResponse = collections.namedtuple('WSResponse', ['data'])
|
||||
|
||||
|
||||
class PortForward:
|
||||
def __init__(self, websocket, ports):
|
||||
"""A websocket client with support for port forwarding.
|
||||
|
||||
Port Forward command sends on 2 channels per port, a read/write
|
||||
data channel and a read only error channel. Both channels are sent an
|
||||
initial frame containing the port number that channel is associated with.
|
||||
"""
|
||||
|
||||
self.websocket = websocket
|
||||
self.local_ports = {}
|
||||
for ix, port_number in enumerate(ports):
|
||||
self.local_ports[port_number] = self._Port(ix, port_number)
|
||||
# There is a thread run per PortForward instance which performs the translation between the
|
||||
# raw socket data sent by the python application and the websocket protocol. This thread
|
||||
# terminates after either side has closed all ports, and after flushing all pending data.
|
||||
proxy = threading.Thread(
|
||||
name="Kubernetes port forward proxy: %s" % ', '.join([str(port) for port in ports]),
|
||||
target=self._proxy
|
||||
)
|
||||
proxy.daemon = True
|
||||
proxy.start()
|
||||
|
||||
@property
|
||||
def connected(self):
|
||||
return self.websocket.connected
|
||||
|
||||
def socket(self, port_number):
|
||||
if port_number not in self.local_ports:
|
||||
raise ValueError("Invalid port number")
|
||||
return self.local_ports[port_number].socket
|
||||
|
||||
def error(self, port_number):
|
||||
if port_number not in self.local_ports:
|
||||
raise ValueError("Invalid port number")
|
||||
return self.local_ports[port_number].error
|
||||
|
||||
def close(self):
|
||||
for port in self.local_ports.values():
|
||||
port.socket.close()
|
||||
|
||||
class _Port:
|
||||
def __init__(self, ix, port_number):
|
||||
# The remote port number
|
||||
self.port_number = port_number
|
||||
# The websocket channel byte number for this port
|
||||
self.channel = six.int2byte(ix * 2)
|
||||
# A socket pair is created to provide a means of translating the data flow
|
||||
# between the python application and the kubernetes websocket. The self.python
|
||||
# half of the socket pair is used by the _proxy method to receive and send data
|
||||
# to the running python application.
|
||||
s, self.python = socket.socketpair()
|
||||
# The self.socket half of the pair is used by the python application to send
|
||||
# and receive data to the eventual pod port. It is wrapped in the _Socket class
|
||||
# because a socket pair is an AF_UNIX socket, not a AF_INET socket. This allows
|
||||
# intercepting setting AF_INET socket options that would error against an AF_UNIX
|
||||
# socket.
|
||||
self.socket = self._Socket(s)
|
||||
# Data accumulated from the websocket to be sent to the python application.
|
||||
self.data = b''
|
||||
# All data sent from kubernetes on the port error channel.
|
||||
self.error = None
|
||||
|
||||
class _Socket:
|
||||
def __init__(self, socket):
|
||||
self._socket = socket
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._socket, name)
|
||||
|
||||
def setsockopt(self, level, optname, value):
|
||||
# The following socket option is not valid with a socket created from socketpair,
|
||||
# and is set by the http.client.HTTPConnection.connect method.
|
||||
if level == socket.IPPROTO_TCP and optname == socket.TCP_NODELAY:
|
||||
return
|
||||
self._socket.setsockopt(level, optname, value)
|
||||
|
||||
# Proxy all socket data between the python code and the kubernetes websocket.
|
||||
def _proxy(self):
|
||||
channel_ports = []
|
||||
channel_initialized = []
|
||||
local_ports = {}
|
||||
for port in self.local_ports.values():
|
||||
# Setup the data channel for this port number
|
||||
channel_ports.append(port)
|
||||
channel_initialized.append(False)
|
||||
# Setup the error channel for this port number
|
||||
channel_ports.append(port)
|
||||
channel_initialized.append(False)
|
||||
port.python.setblocking(True)
|
||||
local_ports[port.python] = port
|
||||
# The data to send on the websocket socket
|
||||
kubernetes_data = b''
|
||||
while True:
|
||||
rlist = [] # List of sockets to read from
|
||||
wlist = [] # List of sockets to write to
|
||||
if self.websocket.connected:
|
||||
rlist.append(self.websocket)
|
||||
if kubernetes_data:
|
||||
wlist.append(self.websocket)
|
||||
local_all_closed = True
|
||||
for port in self.local_ports.values():
|
||||
if port.python.fileno() != -1:
|
||||
if self.websocket.connected:
|
||||
rlist.append(port.python)
|
||||
if port.data:
|
||||
wlist.append(port.python)
|
||||
local_all_closed = False
|
||||
else:
|
||||
if port.data:
|
||||
wlist.append(port.python)
|
||||
local_all_closed = False
|
||||
else:
|
||||
port.python.close()
|
||||
if local_all_closed and not (self.websocket.connected and kubernetes_data):
|
||||
self.websocket.close()
|
||||
return
|
||||
r, w, _ = select.select(rlist, wlist, [])
|
||||
for sock in r:
|
||||
if sock == self.websocket:
|
||||
pending = True
|
||||
while pending:
|
||||
opcode, frame = self.websocket.recv_data_frame(True)
|
||||
if opcode == ABNF.OPCODE_BINARY:
|
||||
if not frame.data:
|
||||
raise RuntimeError("Unexpected frame data size")
|
||||
channel = six.byte2int(frame.data)
|
||||
if channel >= len(channel_ports):
|
||||
raise RuntimeError("Unexpected channel number: %s" % channel)
|
||||
port = channel_ports[channel]
|
||||
if channel_initialized[channel]:
|
||||
if channel % 2:
|
||||
if port.error is None:
|
||||
port.error = ''
|
||||
port.error += frame.data[1:].decode()
|
||||
port.python.close()
|
||||
else:
|
||||
port.data += frame.data[1:]
|
||||
else:
|
||||
if len(frame.data) != 3:
|
||||
raise RuntimeError(
|
||||
"Unexpected initial channel frame data size"
|
||||
)
|
||||
port_number = six.byte2int(frame.data[1:2]) + (six.byte2int(frame.data[2:3]) * 256)
|
||||
if port_number != port.port_number:
|
||||
raise RuntimeError(
|
||||
"Unexpected port number in initial channel frame: %s" % port_number
|
||||
)
|
||||
channel_initialized[channel] = True
|
||||
elif opcode not in (ABNF.OPCODE_PING, ABNF.OPCODE_PONG, ABNF.OPCODE_CLOSE):
|
||||
raise RuntimeError("Unexpected websocket opcode: %s" % opcode)
|
||||
if not (isinstance(self.websocket.sock, ssl.SSLSocket) and self.websocket.sock.pending()):
|
||||
pending = False
|
||||
else:
|
||||
port = local_ports[sock]
|
||||
if port.python.fileno() != -1:
|
||||
data = port.python.recv(1024 * 1024)
|
||||
if data:
|
||||
kubernetes_data += ABNF.create_frame(
|
||||
port.channel + data,
|
||||
ABNF.OPCODE_BINARY,
|
||||
).format()
|
||||
else:
|
||||
port.python.close()
|
||||
for sock in w:
|
||||
if sock == self.websocket:
|
||||
sent = self.websocket.sock.send(kubernetes_data)
|
||||
kubernetes_data = kubernetes_data[sent:]
|
||||
else:
|
||||
port = local_ports[sock]
|
||||
if port.python.fileno() != -1:
|
||||
sent = port.python.send(port.data)
|
||||
port.data = port.data[sent:]
|
||||
|
||||
|
||||
def get_websocket_url(url, query_params=None):
|
||||
parsed_url = urlparse(url)
|
||||
parts = list(parsed_url)
|
||||
if parsed_url.scheme == 'http':
|
||||
parts[0] = 'ws'
|
||||
elif parsed_url.scheme == 'https':
|
||||
parts[0] = 'wss'
|
||||
if query_params:
|
||||
query = []
|
||||
for key, value in query_params:
|
||||
if key == 'command' and isinstance(value, list):
|
||||
for command in value:
|
||||
query.append((key, command))
|
||||
else:
|
||||
query.append((key, value))
|
||||
if query:
|
||||
parts[4] = urlencode(query)
|
||||
return urlunparse(parts)
|
||||
|
||||
|
||||
def create_websocket(configuration, url, headers=None):
|
||||
enableTrace(False)
|
||||
|
||||
# We just need to pass the Authorization, ignore all the other
|
||||
# http headers we get from the generated code
|
||||
header = []
|
||||
if headers and 'authorization' in headers:
|
||||
header.append("authorization: %s" % headers['authorization'])
|
||||
if headers and 'sec-websocket-protocol' in headers:
|
||||
header.append("sec-websocket-protocol: %s" %
|
||||
headers['sec-websocket-protocol'])
|
||||
else:
|
||||
header.append("sec-websocket-protocol: v4.channel.k8s.io")
|
||||
|
||||
if url.startswith('wss://') and configuration.verify_ssl:
|
||||
ssl_opts = {
|
||||
'cert_reqs': ssl.CERT_REQUIRED,
|
||||
'ca_certs': configuration.ssl_ca_cert or certifi.where(),
|
||||
}
|
||||
if configuration.assert_hostname is not None:
|
||||
ssl_opts['check_hostname'] = configuration.assert_hostname
|
||||
else:
|
||||
ssl_opts = {'cert_reqs': ssl.CERT_NONE}
|
||||
|
||||
if configuration.cert_file:
|
||||
ssl_opts['certfile'] = configuration.cert_file
|
||||
if configuration.key_file:
|
||||
ssl_opts['keyfile'] = configuration.key_file
|
||||
if configuration.tls_server_name:
|
||||
ssl_opts['server_hostname'] = configuration.tls_server_name
|
||||
|
||||
websocket = WebSocket(sslopt=ssl_opts, skip_utf8_validation=False)
|
||||
connect_opt = {
|
||||
'header': header
|
||||
}
|
||||
|
||||
if configuration.proxy or configuration.proxy_headers:
|
||||
connect_opt = websocket_proxycare(connect_opt, configuration, url, headers)
|
||||
|
||||
websocket.connect(url, **connect_opt)
|
||||
return websocket
|
||||
|
||||
def websocket_proxycare(connect_opt, configuration, url, headers):
|
||||
""" An internal function to be called in api-client when a websocket
|
||||
create is requested.
|
||||
"""
|
||||
if configuration.no_proxy:
|
||||
connect_opt.update({ 'http_no_proxy': configuration.no_proxy.split(',') })
|
||||
|
||||
if configuration.proxy:
|
||||
proxy_url = urlparse(configuration.proxy)
|
||||
connect_opt.update({'http_proxy_host': proxy_url.hostname, 'http_proxy_port': proxy_url.port})
|
||||
if configuration.proxy_headers:
|
||||
for key,value in configuration.proxy_headers.items():
|
||||
if key == 'proxy-authorization' and value.startswith('Basic'):
|
||||
b64value = value.split()[1]
|
||||
auth = urlsafe_b64decode(b64value).decode().split(':')
|
||||
connect_opt.update({'http_proxy_auth': (auth[0], auth[1]) })
|
||||
return(connect_opt)
|
||||
|
||||
|
||||
def websocket_call(configuration, _method, url, **kwargs):
|
||||
"""An internal function to be called in api-client when a websocket
|
||||
connection is required. method, url, and kwargs are the parameters of
|
||||
apiClient.request method."""
|
||||
|
||||
url = get_websocket_url(url, kwargs.get("query_params"))
|
||||
headers = kwargs.get("headers")
|
||||
_request_timeout = kwargs.get("_request_timeout", 60)
|
||||
_preload_content = kwargs.get("_preload_content", True)
|
||||
capture_all = kwargs.get("capture_all", True)
|
||||
binary = kwargs.get('binary', False)
|
||||
try:
|
||||
client = WSClient(configuration, url, headers, capture_all, binary=binary)
|
||||
if not _preload_content:
|
||||
return client
|
||||
client.run_forever(timeout=_request_timeout)
|
||||
all = client.read_all()
|
||||
if binary:
|
||||
return WSResponse(all)
|
||||
else:
|
||||
return WSResponse('%s' % ''.join(all))
|
||||
except (Exception, KeyboardInterrupt, SystemExit) as e:
|
||||
raise ApiException(status=0, reason=str(e))
|
||||
|
||||
|
||||
def portforward_call(configuration, _method, url, **kwargs):
|
||||
"""An internal function to be called in api-client when a websocket
|
||||
connection is required for port forwarding. args and kwargs are the
|
||||
parameters of apiClient.request method."""
|
||||
|
||||
query_params = kwargs.get("query_params")
|
||||
|
||||
ports = []
|
||||
for param, value in query_params:
|
||||
if param == 'ports':
|
||||
for port in value.split(','):
|
||||
try:
|
||||
port_number = int(port)
|
||||
except ValueError:
|
||||
raise ApiValueError("Invalid port number: %s" % port)
|
||||
if not (0 < port_number < 65536):
|
||||
raise ApiValueError("Port number must be between 0 and 65536: %s" % port)
|
||||
if port_number in ports:
|
||||
raise ApiValueError("Duplicate port numbers: %s" % port)
|
||||
ports.append(port_number)
|
||||
if not ports:
|
||||
raise ApiValueError("Missing required parameter `ports`")
|
||||
|
||||
url = get_websocket_url(url, query_params)
|
||||
headers = kwargs.get("headers")
|
||||
|
||||
try:
|
||||
websocket = create_websocket(configuration, url, headers)
|
||||
return PortForward(websocket, ports)
|
||||
except (Exception, KeyboardInterrupt, SystemExit) as e:
|
||||
raise ApiException(status=0, reason=str(e))
|
76
kubernetes/base/stream/ws_client_test.py
Normal file
76
kubernetes/base/stream/ws_client_test.py
Normal file
|
@ -0,0 +1,76 @@
|
|||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
|
||||
from .ws_client import get_websocket_url
|
||||
from .ws_client import websocket_proxycare
|
||||
from kubernetes.client.configuration import Configuration
|
||||
|
||||
try:
|
||||
import urllib3
|
||||
urllib3.disable_warnings()
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
def dictval(dict, key, default=None):
|
||||
try:
|
||||
val = dict[key]
|
||||
except KeyError:
|
||||
val = default
|
||||
return val
|
||||
|
||||
class WSClientTest(unittest.TestCase):
|
||||
|
||||
def test_websocket_client(self):
|
||||
for url, ws_url in [
|
||||
('http://localhost/api', 'ws://localhost/api'),
|
||||
('https://localhost/api', 'wss://localhost/api'),
|
||||
('https://domain.com/api', 'wss://domain.com/api'),
|
||||
('https://api.domain.com/api', 'wss://api.domain.com/api'),
|
||||
('http://api.domain.com', 'ws://api.domain.com'),
|
||||
('https://api.domain.com', 'wss://api.domain.com'),
|
||||
('http://api.domain.com/', 'ws://api.domain.com/'),
|
||||
('https://api.domain.com/', 'wss://api.domain.com/'),
|
||||
]:
|
||||
self.assertEqual(get_websocket_url(url), ws_url)
|
||||
|
||||
def test_websocket_proxycare(self):
|
||||
for proxy, idpass, no_proxy, expect_host, expect_port, expect_auth, expect_noproxy in [
|
||||
( None, None, None, None, None, None, None ),
|
||||
( 'http://proxy.example.com:8080/', None, None, 'proxy.example.com', 8080, None, None ),
|
||||
( 'http://proxy.example.com:8080/', 'user:pass', None, 'proxy.example.com', 8080, ('user','pass'), None),
|
||||
( 'http://proxy.example.com:8080/', 'user:pass', '', 'proxy.example.com', 8080, ('user','pass'), None),
|
||||
( 'http://proxy.example.com:8080/', 'user:pass', '*', 'proxy.example.com', 8080, ('user','pass'), ['*']),
|
||||
( 'http://proxy.example.com:8080/', 'user:pass', '.example.com', 'proxy.example.com', 8080, ('user','pass'), ['.example.com']),
|
||||
( 'http://proxy.example.com:8080/', 'user:pass', 'localhost,.local,.example.com', 'proxy.example.com', 8080, ('user','pass'), ['localhost','.local','.example.com']),
|
||||
]:
|
||||
# setup input
|
||||
config = Configuration()
|
||||
if proxy is not None:
|
||||
setattr(config, 'proxy', proxy)
|
||||
if idpass is not None:
|
||||
setattr(config, 'proxy_headers', urllib3.util.make_headers(proxy_basic_auth=idpass))
|
||||
if no_proxy is not None:
|
||||
setattr(config, 'no_proxy', no_proxy)
|
||||
# setup done
|
||||
# test starts
|
||||
connect_opt = websocket_proxycare( {}, config, None, None)
|
||||
self.assertEqual( dictval(connect_opt,'http_proxy_host'), expect_host)
|
||||
self.assertEqual( dictval(connect_opt,'http_proxy_port'), expect_port)
|
||||
self.assertEqual( dictval(connect_opt,'http_proxy_auth'), expect_auth)
|
||||
self.assertEqual( dictval(connect_opt,'http_no_proxy'), expect_noproxy)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
13
kubernetes/base/tox.ini
Normal file
13
kubernetes/base/tox.ini
Normal file
|
@ -0,0 +1,13 @@
|
|||
[tox]
|
||||
skipsdist = True
|
||||
envlist =
|
||||
py3{5,6,7,8,9}
|
||||
py3{5,6,7,8,9}-functional
|
||||
|
||||
[testenv]
|
||||
passenv = TOXENV CI TRAVIS TRAVIS_*
|
||||
commands =
|
||||
python -V
|
||||
pip install pytest
|
||||
./run_tox.sh pytest
|
||||
|
15
kubernetes/base/watch/__init__.py
Normal file
15
kubernetes/base/watch/__init__.py
Normal file
|
@ -0,0 +1,15 @@
|
|||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from .watch import Watch
|
213
kubernetes/base/watch/watch.py
Normal file
213
kubernetes/base/watch/watch.py
Normal file
|
@ -0,0 +1,213 @@
|
|||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import pydoc
|
||||
import sys
|
||||
|
||||
from kubernetes import client
|
||||
|
||||
PYDOC_RETURN_LABEL = ":return:"
|
||||
PYDOC_FOLLOW_PARAM = ":param bool follow:"
|
||||
|
||||
# Removing this suffix from return type name should give us event's object
|
||||
# type. e.g., if list_namespaces() returns "NamespaceList" type,
|
||||
# then list_namespaces(watch=true) returns a stream of events with objects
|
||||
# of type "Namespace". In case this assumption is not true, user should
|
||||
# provide return_type to Watch class's __init__.
|
||||
TYPE_LIST_SUFFIX = "List"
|
||||
|
||||
|
||||
PY2 = sys.version_info[0] == 2
|
||||
if PY2:
|
||||
import httplib
|
||||
HTTP_STATUS_GONE = httplib.GONE
|
||||
else:
|
||||
import http
|
||||
HTTP_STATUS_GONE = http.HTTPStatus.GONE
|
||||
|
||||
|
||||
class SimpleNamespace:
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.__dict__.update(kwargs)
|
||||
|
||||
|
||||
def _find_return_type(func):
|
||||
for line in pydoc.getdoc(func).splitlines():
|
||||
if line.startswith(PYDOC_RETURN_LABEL):
|
||||
return line[len(PYDOC_RETURN_LABEL):].strip()
|
||||
return ""
|
||||
|
||||
|
||||
def iter_resp_lines(resp):
|
||||
buffer = bytearray()
|
||||
for segment in resp.stream(amt=None, decode_content=False):
|
||||
|
||||
# Append the segment (chunk) to the buffer
|
||||
#
|
||||
# Performance note: depending on contents of buffer and the type+value of segment,
|
||||
# encoding segment into the buffer could be a wasteful step. The approach used here
|
||||
# simplifies the logic farther down, but in the future it may be reasonable to
|
||||
# sacrifice readability for performance.
|
||||
if isinstance(segment, bytes):
|
||||
buffer.extend(segment)
|
||||
elif isinstance(segment, str):
|
||||
buffer.extend(segment.encode("utf-8"))
|
||||
else:
|
||||
raise TypeError(
|
||||
f"Received invalid segment type, {type(segment)}, from stream. Accepts only 'str' or 'bytes'.")
|
||||
|
||||
# Split by newline (safe for utf-8 because multi-byte sequences cannot contain the newline byte)
|
||||
next_newline = buffer.find(b'\n')
|
||||
while next_newline != -1:
|
||||
# Convert bytes to a valid utf-8 string, replacing any invalid utf-8 with the '<27>' character
|
||||
line = buffer[:next_newline].decode(
|
||||
"utf-8", errors="replace")
|
||||
buffer = buffer[next_newline+1:]
|
||||
if line:
|
||||
yield line
|
||||
next_newline = buffer.find(b'\n')
|
||||
|
||||
|
||||
class Watch(object):
|
||||
|
||||
def __init__(self, return_type=None):
|
||||
self._raw_return_type = return_type
|
||||
self._stop = False
|
||||
self._api_client = client.ApiClient()
|
||||
self.resource_version = None
|
||||
|
||||
def stop(self):
|
||||
self._stop = True
|
||||
|
||||
def get_return_type(self, func):
|
||||
if self._raw_return_type:
|
||||
return self._raw_return_type
|
||||
return_type = _find_return_type(func)
|
||||
if return_type.endswith(TYPE_LIST_SUFFIX):
|
||||
return return_type[:-len(TYPE_LIST_SUFFIX)]
|
||||
return return_type
|
||||
|
||||
def get_watch_argument_name(self, func):
|
||||
if PYDOC_FOLLOW_PARAM in pydoc.getdoc(func):
|
||||
return 'follow'
|
||||
else:
|
||||
return 'watch'
|
||||
|
||||
def unmarshal_event(self, data, return_type):
|
||||
js = json.loads(data)
|
||||
js['raw_object'] = js['object']
|
||||
# BOOKMARK event is treated the same as ERROR for a quick fix of
|
||||
# decoding exception
|
||||
# TODO: make use of the resource_version in BOOKMARK event for more
|
||||
# efficient WATCH
|
||||
if return_type and js['type'] != 'ERROR' and js['type'] != 'BOOKMARK':
|
||||
obj = SimpleNamespace(data=json.dumps(js['raw_object']))
|
||||
js['object'] = self._api_client.deserialize(obj, return_type)
|
||||
if hasattr(js['object'], 'metadata'):
|
||||
self.resource_version = js['object'].metadata.resource_version
|
||||
# For custom objects that we don't have model defined, json
|
||||
# deserialization results in dictionary
|
||||
elif (isinstance(js['object'], dict) and 'metadata' in js['object']
|
||||
and 'resourceVersion' in js['object']['metadata']):
|
||||
self.resource_version = js['object']['metadata'][
|
||||
'resourceVersion']
|
||||
return js
|
||||
|
||||
def stream(self, func, *args, **kwargs):
|
||||
"""Watch an API resource and stream the result back via a generator.
|
||||
|
||||
Note that watching an API resource can expire. The method tries to
|
||||
resume automatically once from the last result, but if that last result
|
||||
is too old as well, an `ApiException` exception will be thrown with
|
||||
``code`` 410. In that case you have to recover yourself, probably
|
||||
by listing the API resource to obtain the latest state and then
|
||||
watching from that state on by setting ``resource_version`` to
|
||||
one returned from listing.
|
||||
|
||||
:param func: The API function pointer. Any parameter to the function
|
||||
can be passed after this parameter.
|
||||
|
||||
:return: Event object with these keys:
|
||||
'type': The type of event such as "ADDED", "DELETED", etc.
|
||||
'raw_object': a dict representing the watched object.
|
||||
'object': A model representation of raw_object. The name of
|
||||
model will be determined based on
|
||||
the func's doc string. If it cannot be determined,
|
||||
'object' value will be the same as 'raw_object'.
|
||||
|
||||
Example:
|
||||
v1 = kubernetes.client.CoreV1Api()
|
||||
watch = kubernetes.watch.Watch()
|
||||
for e in watch.stream(v1.list_namespace, resource_version=1127):
|
||||
type_ = e['type']
|
||||
object_ = e['object'] # object is one of type return_type
|
||||
raw_object = e['raw_object'] # raw_object is a dict
|
||||
...
|
||||
if should_stop:
|
||||
watch.stop()
|
||||
"""
|
||||
|
||||
self._stop = False
|
||||
return_type = self.get_return_type(func)
|
||||
watch_arg = self.get_watch_argument_name(func)
|
||||
kwargs[watch_arg] = True
|
||||
kwargs['_preload_content'] = False
|
||||
if 'resource_version' in kwargs:
|
||||
self.resource_version = kwargs['resource_version']
|
||||
|
||||
# Do not attempt retries if user specifies a timeout.
|
||||
# We want to ensure we are returning within that timeout.
|
||||
disable_retries = ('timeout_seconds' in kwargs)
|
||||
retry_after_410 = False
|
||||
while True:
|
||||
resp = func(*args, **kwargs)
|
||||
try:
|
||||
for line in iter_resp_lines(resp):
|
||||
# unmarshal when we are receiving events from watch,
|
||||
# return raw string when we are streaming log
|
||||
if watch_arg == "watch":
|
||||
event = self.unmarshal_event(line, return_type)
|
||||
if isinstance(event, dict) \
|
||||
and event['type'] == 'ERROR':
|
||||
obj = event['raw_object']
|
||||
# Current request expired, let's retry, (if enabled)
|
||||
# but only if we have not already retried.
|
||||
if not disable_retries and not retry_after_410 and \
|
||||
obj['code'] == HTTP_STATUS_GONE:
|
||||
retry_after_410 = True
|
||||
break
|
||||
else:
|
||||
reason = "%s: %s" % (
|
||||
obj['reason'], obj['message'])
|
||||
raise client.rest.ApiException(
|
||||
status=obj['code'], reason=reason)
|
||||
else:
|
||||
retry_after_410 = False
|
||||
yield event
|
||||
else:
|
||||
yield line
|
||||
if self._stop:
|
||||
break
|
||||
finally:
|
||||
resp.close()
|
||||
resp.release_conn()
|
||||
if self.resource_version is not None:
|
||||
kwargs['resource_version'] = self.resource_version
|
||||
else:
|
||||
self._stop = True
|
||||
|
||||
if self._stop or disable_retries:
|
||||
break
|
494
kubernetes/base/watch/watch_test.py
Normal file
494
kubernetes/base/watch/watch_test.py
Normal file
|
@ -0,0 +1,494 @@
|
|||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
|
||||
from unittest.mock import Mock, call
|
||||
|
||||
from kubernetes import client
|
||||
|
||||
from .watch import Watch
|
||||
|
||||
|
||||
class WatchTests(unittest.TestCase):
|
||||
def setUp(self):
|
||||
# counter for a test that needs test global state
|
||||
self.callcount = 0
|
||||
|
||||
def test_watch_with_decode(self):
|
||||
fake_resp = Mock()
|
||||
fake_resp.close = Mock()
|
||||
fake_resp.release_conn = Mock()
|
||||
fake_resp.stream = Mock(
|
||||
return_value=[
|
||||
'{"type": "ADDED", "object": {"metadata": {"name": "test1",'
|
||||
'"resourceVersion": "1"}, "spec": {}, "status": {}}}\n',
|
||||
'{"type": "ADDED", "object": {"metadata": {"name": "test2",'
|
||||
'"resourceVersion": "2"}, "spec": {}, "sta',
|
||||
'tus": {}}}\n'
|
||||
'{"type": "ADDED", "object": {"metadata": {"name": "test3",'
|
||||
'"resourceVersion": "3"}, "spec": {}, "status": {}}}\n',
|
||||
'should_not_happened\n'])
|
||||
|
||||
fake_api = Mock()
|
||||
fake_api.get_namespaces = Mock(return_value=fake_resp)
|
||||
fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList'
|
||||
|
||||
w = Watch()
|
||||
count = 1
|
||||
for e in w.stream(fake_api.get_namespaces):
|
||||
self.assertEqual("ADDED", e['type'])
|
||||
# make sure decoder worked and we got a model with the right name
|
||||
self.assertEqual("test%d" % count, e['object'].metadata.name)
|
||||
# make sure decoder worked and updated Watch.resource_version
|
||||
self.assertEqual(
|
||||
"%d" % count, e['object'].metadata.resource_version)
|
||||
self.assertEqual("%d" % count, w.resource_version)
|
||||
count += 1
|
||||
# make sure we can stop the watch and the last event with won't be
|
||||
# returned
|
||||
if count == 4:
|
||||
w.stop()
|
||||
|
||||
# make sure that all three records were consumed by the stream
|
||||
self.assertEqual(4, count)
|
||||
|
||||
fake_api.get_namespaces.assert_called_once_with(
|
||||
_preload_content=False, watch=True)
|
||||
fake_resp.stream.assert_called_once_with(
|
||||
amt=None, decode_content=False)
|
||||
fake_resp.close.assert_called_once()
|
||||
fake_resp.release_conn.assert_called_once()
|
||||
|
||||
def test_watch_with_interspersed_newlines(self):
|
||||
fake_resp = Mock()
|
||||
fake_resp.close = Mock()
|
||||
fake_resp.release_conn = Mock()
|
||||
fake_resp.stream = Mock(
|
||||
return_value=[
|
||||
'\n',
|
||||
'{"type": "ADDED", "object": {"metadata":',
|
||||
'{"name": "test1","resourceVersion": "1"}}}\n{"type": "ADDED", ',
|
||||
'"object": {"metadata": {"name": "test2", "resourceVersion": "2"}}}\n',
|
||||
'\n',
|
||||
'',
|
||||
'{"type": "ADDED", "object": {"metadata": {"name": "test3", "resourceVersion": "3"}}}\n',
|
||||
'\n\n\n',
|
||||
'\n',
|
||||
])
|
||||
|
||||
fake_api = Mock()
|
||||
fake_api.get_namespaces = Mock(return_value=fake_resp)
|
||||
fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList'
|
||||
|
||||
w = Watch()
|
||||
count = 0
|
||||
|
||||
# Consume all test events from the mock service, stopping when no more data is available.
|
||||
# Note that "timeout_seconds" below is not a timeout; rather, it disables retries and is
|
||||
# the only way to do so. Without that, the stream will re-read the test data forever.
|
||||
for e in w.stream(fake_api.get_namespaces, timeout_seconds=1):
|
||||
count += 1
|
||||
self.assertEqual("test%d" % count, e['object'].metadata.name)
|
||||
self.assertEqual(3, count)
|
||||
|
||||
def test_watch_with_multibyte_utf8(self):
|
||||
fake_resp = Mock()
|
||||
fake_resp.close = Mock()
|
||||
fake_resp.release_conn = Mock()
|
||||
fake_resp.stream = Mock(
|
||||
return_value=[
|
||||
# two-byte utf-8 character
|
||||
'{"type":"MODIFIED","object":{"data":{"utf-8":"© 1"},"metadata":{"name":"test1","resourceVersion":"1"}}}\n',
|
||||
# same copyright character expressed as bytes
|
||||
b'{"type":"MODIFIED","object":{"data":{"utf-8":"\xC2\xA9 2"},"metadata":{"name":"test2","resourceVersion":"2"}}}\n'
|
||||
# same copyright character with bytes split across two stream chunks
|
||||
b'{"type":"MODIFIED","object":{"data":{"utf-8":"\xC2',
|
||||
b'\xA9 3"},"metadata":{"n',
|
||||
# more chunks of the same event, sent as a mix of bytes and strings
|
||||
'ame":"test3","resourceVersion":"3"',
|
||||
'}}}',
|
||||
b'\n'
|
||||
])
|
||||
|
||||
fake_api = Mock()
|
||||
fake_api.get_configmaps = Mock(return_value=fake_resp)
|
||||
fake_api.get_configmaps.__doc__ = ':return: V1ConfigMapList'
|
||||
|
||||
w = Watch()
|
||||
count = 0
|
||||
|
||||
# Consume all test events from the mock service, stopping when no more data is available.
|
||||
# Note that "timeout_seconds" below is not a timeout; rather, it disables retries and is
|
||||
# the only way to do so. Without that, the stream will re-read the test data forever.
|
||||
for event in w.stream(fake_api.get_configmaps, timeout_seconds=1):
|
||||
count += 1
|
||||
self.assertEqual("MODIFIED", event['type'])
|
||||
self.assertEqual("test%d" % count, event['object'].metadata.name)
|
||||
self.assertEqual("© %d" % count, event['object'].data["utf-8"])
|
||||
self.assertEqual(
|
||||
"%d" % count, event['object'].metadata.resource_version)
|
||||
self.assertEqual("%d" % count, w.resource_version)
|
||||
self.assertEqual(3, count)
|
||||
|
||||
def test_watch_with_invalid_utf8(self):
|
||||
fake_resp = Mock()
|
||||
fake_resp.close = Mock()
|
||||
fake_resp.release_conn = Mock()
|
||||
fake_resp.stream = Mock(
|
||||
# test 1 uses 1 invalid utf-8 byte
|
||||
# test 2 uses a sequence of 2 invalid utf-8 bytes
|
||||
# test 3 uses a sequence of 3 invalid utf-8 bytes
|
||||
return_value=[
|
||||
# utf-8 sequence for 😄 is \xF0\x9F\x98\x84
|
||||
# all other sequences below are invalid
|
||||
# ref: https://www.w3.org/2001/06/utf-8-wrong/UTF-8-test.html
|
||||
b'{"type":"MODIFIED","object":{"data":{"utf-8":"\xF0\x9F\x98\x84 1","invalid":"\x80 1"},"metadata":{"name":"test1"}}}\n',
|
||||
b'{"type":"MODIFIED","object":{"data":{"utf-8":"\xF0\x9F\x98\x84 2","invalid":"\xC0\xAF 2"},"metadata":{"name":"test2"}}}\n',
|
||||
# mix bytes/strings and split byte sequences across chunks
|
||||
b'{"type":"MODIFIED","object":{"data":{"utf-8":"\xF0\x9F\x98',
|
||||
b'\x84 ',
|
||||
b'',
|
||||
b'3","invalid":"\xE0\x80',
|
||||
b'\xAF ',
|
||||
'3"},"metadata":{"n',
|
||||
'ame":"test3"',
|
||||
'}}}',
|
||||
b'\n'
|
||||
])
|
||||
|
||||
fake_api = Mock()
|
||||
fake_api.get_configmaps = Mock(return_value=fake_resp)
|
||||
fake_api.get_configmaps.__doc__ = ':return: V1ConfigMapList'
|
||||
|
||||
w = Watch()
|
||||
count = 0
|
||||
|
||||
# Consume all test events from the mock service, stopping when no more data is available.
|
||||
# Note that "timeout_seconds" below is not a timeout; rather, it disables retries and is
|
||||
# the only way to do so. Without that, the stream will re-read the test data forever.
|
||||
for event in w.stream(fake_api.get_configmaps, timeout_seconds=1):
|
||||
count += 1
|
||||
self.assertEqual("MODIFIED", event['type'])
|
||||
self.assertEqual("test%d" % count, event['object'].metadata.name)
|
||||
self.assertEqual("😄 %d" % count, event['object'].data["utf-8"])
|
||||
# expect N replacement characters in test N
|
||||
self.assertEqual("<EFBFBD> %d".replace('<EFBFBD>', '<EFBFBD>'*count) %
|
||||
count, event['object'].data["invalid"])
|
||||
self.assertEqual(3, count)
|
||||
|
||||
def test_watch_for_follow(self):
|
||||
fake_resp = Mock()
|
||||
fake_resp.close = Mock()
|
||||
fake_resp.release_conn = Mock()
|
||||
fake_resp.stream = Mock(
|
||||
return_value=[
|
||||
'log_line_1\n',
|
||||
'log_line_2\n'])
|
||||
|
||||
fake_api = Mock()
|
||||
fake_api.read_namespaced_pod_log = Mock(return_value=fake_resp)
|
||||
fake_api.read_namespaced_pod_log.__doc__ = ':param bool follow:\n:return: str'
|
||||
|
||||
w = Watch()
|
||||
count = 1
|
||||
for e in w.stream(fake_api.read_namespaced_pod_log):
|
||||
self.assertEqual("log_line_1", e)
|
||||
count += 1
|
||||
# make sure we can stop the watch and the last event with won't be
|
||||
# returned
|
||||
if count == 2:
|
||||
w.stop()
|
||||
|
||||
fake_api.read_namespaced_pod_log.assert_called_once_with(
|
||||
_preload_content=False, follow=True)
|
||||
fake_resp.stream.assert_called_once_with(
|
||||
amt=None, decode_content=False)
|
||||
fake_resp.close.assert_called_once()
|
||||
fake_resp.release_conn.assert_called_once()
|
||||
|
||||
def test_watch_resource_version_set(self):
|
||||
# https://github.com/kubernetes-client/python/issues/700
|
||||
# ensure watching from a resource version does reset to resource
|
||||
# version 0 after k8s resets the watch connection
|
||||
fake_resp = Mock()
|
||||
fake_resp.close = Mock()
|
||||
fake_resp.release_conn = Mock()
|
||||
values = [
|
||||
'{"type": "ADDED", "object": {"metadata": {"name": "test1",'
|
||||
'"resourceVersion": "1"}, "spec": {}, "status": {}}}\n',
|
||||
'{"type": "ADDED", "object": {"metadata": {"name": "test2",'
|
||||
'"resourceVersion": "2"}, "spec": {}, "sta',
|
||||
'tus": {}}}\n'
|
||||
'{"type": "ADDED", "object": {"metadata": {"name": "test3",'
|
||||
'"resourceVersion": "3"}, "spec": {}, "status": {}}}\n'
|
||||
]
|
||||
|
||||
# return nothing on the first call and values on the second
|
||||
# this emulates a watch from a rv that returns nothing in the first k8s
|
||||
# watch reset and values later
|
||||
|
||||
def get_values(*args, **kwargs):
|
||||
self.callcount += 1
|
||||
if self.callcount == 1:
|
||||
return []
|
||||
else:
|
||||
return values
|
||||
|
||||
fake_resp.stream = Mock(
|
||||
side_effect=get_values)
|
||||
|
||||
fake_api = Mock()
|
||||
fake_api.get_namespaces = Mock(return_value=fake_resp)
|
||||
fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList'
|
||||
|
||||
w = Watch()
|
||||
# ensure we keep our requested resource version or the version latest
|
||||
# returned version when the existing versions are older than the
|
||||
# requested version
|
||||
# needed for the list existing objects, then watch from there use case
|
||||
calls = []
|
||||
|
||||
iterations = 2
|
||||
# first two calls must use the passed rv, the first call is a
|
||||
# "reset" and does not actually return anything
|
||||
# the second call must use the same rv but will return values
|
||||
# (with a wrong rv but a real cluster would behave correctly)
|
||||
# calls following that will use the rv from those returned values
|
||||
calls.append(call(_preload_content=False, watch=True,
|
||||
resource_version="5"))
|
||||
calls.append(call(_preload_content=False, watch=True,
|
||||
resource_version="5"))
|
||||
for i in range(iterations):
|
||||
# ideally we want 5 here but as rv must be treated as an
|
||||
# opaque value we cannot interpret it and order it so rely
|
||||
# on k8s returning the events completely and in order
|
||||
calls.append(call(_preload_content=False, watch=True,
|
||||
resource_version="3"))
|
||||
|
||||
for c, e in enumerate(w.stream(fake_api.get_namespaces,
|
||||
resource_version="5")):
|
||||
if c == len(values) * iterations:
|
||||
w.stop()
|
||||
|
||||
# check calls are in the list, gives good error output
|
||||
fake_api.get_namespaces.assert_has_calls(calls)
|
||||
# more strict test with worse error message
|
||||
self.assertEqual(fake_api.get_namespaces.mock_calls, calls)
|
||||
|
||||
def test_watch_stream_twice(self):
|
||||
w = Watch(float)
|
||||
for step in ['first', 'second']:
|
||||
fake_resp = Mock()
|
||||
fake_resp.close = Mock()
|
||||
fake_resp.release_conn = Mock()
|
||||
fake_resp.stream = Mock(
|
||||
return_value=['{"type": "ADDED", "object": 1}\n'] * 4)
|
||||
|
||||
fake_api = Mock()
|
||||
fake_api.get_namespaces = Mock(return_value=fake_resp)
|
||||
fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList'
|
||||
|
||||
count = 1
|
||||
for e in w.stream(fake_api.get_namespaces):
|
||||
count += 1
|
||||
if count == 3:
|
||||
w.stop()
|
||||
|
||||
self.assertEqual(count, 3)
|
||||
fake_api.get_namespaces.assert_called_once_with(
|
||||
_preload_content=False, watch=True)
|
||||
fake_resp.stream.assert_called_once_with(
|
||||
amt=None, decode_content=False)
|
||||
fake_resp.close.assert_called_once()
|
||||
fake_resp.release_conn.assert_called_once()
|
||||
|
||||
def test_watch_stream_loop(self):
|
||||
w = Watch(float)
|
||||
|
||||
fake_resp = Mock()
|
||||
fake_resp.close = Mock()
|
||||
fake_resp.release_conn = Mock()
|
||||
fake_resp.stream = Mock(
|
||||
return_value=['{"type": "ADDED", "object": 1}\n'])
|
||||
|
||||
fake_api = Mock()
|
||||
fake_api.get_namespaces = Mock(return_value=fake_resp)
|
||||
fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList'
|
||||
|
||||
count = 0
|
||||
|
||||
# when timeout_seconds is set, auto-exist when timeout reaches
|
||||
for e in w.stream(fake_api.get_namespaces, timeout_seconds=1):
|
||||
count = count + 1
|
||||
self.assertEqual(count, 1)
|
||||
|
||||
# when no timeout_seconds, only exist when w.stop() is called
|
||||
for e in w.stream(fake_api.get_namespaces):
|
||||
count = count + 1
|
||||
if count == 2:
|
||||
w.stop()
|
||||
|
||||
self.assertEqual(count, 2)
|
||||
self.assertEqual(fake_api.get_namespaces.call_count, 2)
|
||||
self.assertEqual(fake_resp.stream.call_count, 2)
|
||||
self.assertEqual(fake_resp.close.call_count, 2)
|
||||
self.assertEqual(fake_resp.release_conn.call_count, 2)
|
||||
|
||||
def test_unmarshal_with_float_object(self):
|
||||
w = Watch()
|
||||
event = w.unmarshal_event('{"type": "ADDED", "object": 1}', 'float')
|
||||
self.assertEqual("ADDED", event['type'])
|
||||
self.assertEqual(1.0, event['object'])
|
||||
self.assertTrue(isinstance(event['object'], float))
|
||||
self.assertEqual(1, event['raw_object'])
|
||||
|
||||
def test_unmarshal_with_no_return_type(self):
|
||||
w = Watch()
|
||||
event = w.unmarshal_event('{"type": "ADDED", "object": ["test1"]}',
|
||||
None)
|
||||
self.assertEqual("ADDED", event['type'])
|
||||
self.assertEqual(["test1"], event['object'])
|
||||
self.assertEqual(["test1"], event['raw_object'])
|
||||
|
||||
def test_unmarshal_with_custom_object(self):
|
||||
w = Watch()
|
||||
event = w.unmarshal_event('{"type": "ADDED", "object": {"apiVersion":'
|
||||
'"test.com/v1beta1","kind":"foo","metadata":'
|
||||
'{"name": "bar", "resourceVersion": "1"}}}',
|
||||
'object')
|
||||
self.assertEqual("ADDED", event['type'])
|
||||
# make sure decoder deserialized json into dictionary and updated
|
||||
# Watch.resource_version
|
||||
self.assertTrue(isinstance(event['object'], dict))
|
||||
self.assertEqual("1", event['object']['metadata']['resourceVersion'])
|
||||
self.assertEqual("1", w.resource_version)
|
||||
|
||||
def test_unmarshal_with_bookmark(self):
|
||||
w = Watch()
|
||||
event = w.unmarshal_event(
|
||||
'{"type":"BOOKMARK","object":{"kind":"Job","apiVersion":"batch/v1"'
|
||||
',"metadata":{"resourceVersion":"1"},"spec":{"template":{'
|
||||
'"metadata":{},"spec":{"containers":null}}},"status":{}}}',
|
||||
'V1Job')
|
||||
self.assertEqual("BOOKMARK", event['type'])
|
||||
# Watch.resource_version is *not* updated, as BOOKMARK is treated the
|
||||
# same as ERROR for a quick fix of decoding exception,
|
||||
# resource_version in BOOKMARK is *not* used at all.
|
||||
self.assertEqual(None, w.resource_version)
|
||||
|
||||
def test_watch_with_exception(self):
|
||||
fake_resp = Mock()
|
||||
fake_resp.close = Mock()
|
||||
fake_resp.release_conn = Mock()
|
||||
fake_resp.stream = Mock(side_effect=KeyError('expected'))
|
||||
|
||||
fake_api = Mock()
|
||||
fake_api.get_thing = Mock(return_value=fake_resp)
|
||||
|
||||
w = Watch()
|
||||
try:
|
||||
for _ in w.stream(fake_api.get_thing):
|
||||
self.fail(self, "Should fail on exception.")
|
||||
except KeyError:
|
||||
pass
|
||||
# expected
|
||||
|
||||
fake_api.get_thing.assert_called_once_with(
|
||||
_preload_content=False, watch=True)
|
||||
fake_resp.stream.assert_called_once_with(
|
||||
amt=None, decode_content=False)
|
||||
fake_resp.close.assert_called_once()
|
||||
fake_resp.release_conn.assert_called_once()
|
||||
|
||||
def test_watch_with_error_event(self):
|
||||
fake_resp = Mock()
|
||||
fake_resp.close = Mock()
|
||||
fake_resp.release_conn = Mock()
|
||||
fake_resp.stream = Mock(
|
||||
return_value=[
|
||||
'{"type": "ERROR", "object": {"code": 410, '
|
||||
'"reason": "Gone", "message": "error message"}}\n'])
|
||||
|
||||
fake_api = Mock()
|
||||
fake_api.get_thing = Mock(return_value=fake_resp)
|
||||
|
||||
w = Watch()
|
||||
# No events are generated when no initial resourceVersion is passed
|
||||
# No retry is attempted either, preventing an ApiException
|
||||
assert not list(w.stream(fake_api.get_thing))
|
||||
|
||||
fake_api.get_thing.assert_called_once_with(
|
||||
_preload_content=False, watch=True)
|
||||
fake_resp.stream.assert_called_once_with(
|
||||
amt=None, decode_content=False)
|
||||
fake_resp.close.assert_called_once()
|
||||
fake_resp.release_conn.assert_called_once()
|
||||
|
||||
def test_watch_retries_on_error_event(self):
|
||||
fake_resp = Mock()
|
||||
fake_resp.close = Mock()
|
||||
fake_resp.release_conn = Mock()
|
||||
fake_resp.stream = Mock(
|
||||
return_value=[
|
||||
'{"type": "ERROR", "object": {"code": 410, '
|
||||
'"reason": "Gone", "message": "error message"}}\n'])
|
||||
|
||||
fake_api = Mock()
|
||||
fake_api.get_thing = Mock(return_value=fake_resp)
|
||||
|
||||
w = Watch()
|
||||
try:
|
||||
for _ in w.stream(fake_api.get_thing, resource_version=0):
|
||||
self.fail(self, "Should fail with ApiException.")
|
||||
except client.rest.ApiException:
|
||||
pass
|
||||
|
||||
# Two calls should be expected during a retry
|
||||
fake_api.get_thing.assert_has_calls(
|
||||
[call(resource_version=0, _preload_content=False, watch=True)] * 2)
|
||||
fake_resp.stream.assert_has_calls(
|
||||
[call(amt=None, decode_content=False)] * 2)
|
||||
assert fake_resp.close.call_count == 2
|
||||
assert fake_resp.release_conn.call_count == 2
|
||||
|
||||
def test_watch_with_error_event_and_timeout_param(self):
|
||||
fake_resp = Mock()
|
||||
fake_resp.close = Mock()
|
||||
fake_resp.release_conn = Mock()
|
||||
fake_resp.stream = Mock(
|
||||
return_value=[
|
||||
'{"type": "ERROR", "object": {"code": 410, '
|
||||
'"reason": "Gone", "message": "error message"}}\n'])
|
||||
|
||||
fake_api = Mock()
|
||||
fake_api.get_thing = Mock(return_value=fake_resp)
|
||||
|
||||
w = Watch()
|
||||
try:
|
||||
for _ in w.stream(fake_api.get_thing, timeout_seconds=10):
|
||||
self.fail(self, "Should fail with ApiException.")
|
||||
except client.rest.ApiException:
|
||||
pass
|
||||
|
||||
fake_api.get_thing.assert_called_once_with(
|
||||
_preload_content=False, watch=True, timeout_seconds=10)
|
||||
fake_resp.stream.assert_called_once_with(
|
||||
amt=None, decode_content=False)
|
||||
fake_resp.close.assert_called_once()
|
||||
fake_resp.release_conn.assert_called_once()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
717
kubernetes/client/__init__.py
Normal file
717
kubernetes/client/__init__.py
Normal file
|
@ -0,0 +1,717 @@
|
|||
# coding: utf-8
|
||||
|
||||
# flake8: noqa
|
||||
|
||||
"""
|
||||
Kubernetes
|
||||
|
||||
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
|
||||
|
||||
The version of the OpenAPI document: release-1.32
|
||||
Generated by: https://openapi-generator.tech
|
||||
"""
|
||||
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
__version__ = "32.0.0+snapshot"
|
||||
|
||||
# import apis into sdk package
|
||||
from kubernetes.client.api.well_known_api import WellKnownApi
|
||||
from kubernetes.client.api.admissionregistration_api import AdmissionregistrationApi
|
||||
from kubernetes.client.api.admissionregistration_v1_api import AdmissionregistrationV1Api
|
||||
from kubernetes.client.api.admissionregistration_v1alpha1_api import AdmissionregistrationV1alpha1Api
|
||||
from kubernetes.client.api.admissionregistration_v1beta1_api import AdmissionregistrationV1beta1Api
|
||||
from kubernetes.client.api.apiextensions_api import ApiextensionsApi
|
||||
from kubernetes.client.api.apiextensions_v1_api import ApiextensionsV1Api
|
||||
from kubernetes.client.api.apiregistration_api import ApiregistrationApi
|
||||
from kubernetes.client.api.apiregistration_v1_api import ApiregistrationV1Api
|
||||
from kubernetes.client.api.apis_api import ApisApi
|
||||
from kubernetes.client.api.apps_api import AppsApi
|
||||
from kubernetes.client.api.apps_v1_api import AppsV1Api
|
||||
from kubernetes.client.api.authentication_api import AuthenticationApi
|
||||
from kubernetes.client.api.authentication_v1_api import AuthenticationV1Api
|
||||
from kubernetes.client.api.authentication_v1beta1_api import AuthenticationV1beta1Api
|
||||
from kubernetes.client.api.authorization_api import AuthorizationApi
|
||||
from kubernetes.client.api.authorization_v1_api import AuthorizationV1Api
|
||||
from kubernetes.client.api.autoscaling_api import AutoscalingApi
|
||||
from kubernetes.client.api.autoscaling_v1_api import AutoscalingV1Api
|
||||
from kubernetes.client.api.autoscaling_v2_api import AutoscalingV2Api
|
||||
from kubernetes.client.api.batch_api import BatchApi
|
||||
from kubernetes.client.api.batch_v1_api import BatchV1Api
|
||||
from kubernetes.client.api.certificates_api import CertificatesApi
|
||||
from kubernetes.client.api.certificates_v1_api import CertificatesV1Api
|
||||
from kubernetes.client.api.certificates_v1alpha1_api import CertificatesV1alpha1Api
|
||||
from kubernetes.client.api.coordination_api import CoordinationApi
|
||||
from kubernetes.client.api.coordination_v1_api import CoordinationV1Api
|
||||
from kubernetes.client.api.coordination_v1alpha2_api import CoordinationV1alpha2Api
|
||||
from kubernetes.client.api.core_api import CoreApi
|
||||
from kubernetes.client.api.core_v1_api import CoreV1Api
|
||||
from kubernetes.client.api.custom_objects_api import CustomObjectsApi
|
||||
from kubernetes.client.api.discovery_api import DiscoveryApi
|
||||
from kubernetes.client.api.discovery_v1_api import DiscoveryV1Api
|
||||
from kubernetes.client.api.events_api import EventsApi
|
||||
from kubernetes.client.api.events_v1_api import EventsV1Api
|
||||
from kubernetes.client.api.flowcontrol_apiserver_api import FlowcontrolApiserverApi
|
||||
from kubernetes.client.api.flowcontrol_apiserver_v1_api import FlowcontrolApiserverV1Api
|
||||
from kubernetes.client.api.internal_apiserver_api import InternalApiserverApi
|
||||
from kubernetes.client.api.internal_apiserver_v1alpha1_api import InternalApiserverV1alpha1Api
|
||||
from kubernetes.client.api.logs_api import LogsApi
|
||||
from kubernetes.client.api.networking_api import NetworkingApi
|
||||
from kubernetes.client.api.networking_v1_api import NetworkingV1Api
|
||||
from kubernetes.client.api.networking_v1beta1_api import NetworkingV1beta1Api
|
||||
from kubernetes.client.api.node_api import NodeApi
|
||||
from kubernetes.client.api.node_v1_api import NodeV1Api
|
||||
from kubernetes.client.api.openid_api import OpenidApi
|
||||
from kubernetes.client.api.policy_api import PolicyApi
|
||||
from kubernetes.client.api.policy_v1_api import PolicyV1Api
|
||||
from kubernetes.client.api.rbac_authorization_api import RbacAuthorizationApi
|
||||
from kubernetes.client.api.rbac_authorization_v1_api import RbacAuthorizationV1Api
|
||||
from kubernetes.client.api.resource_api import ResourceApi
|
||||
from kubernetes.client.api.resource_v1alpha3_api import ResourceV1alpha3Api
|
||||
from kubernetes.client.api.resource_v1beta1_api import ResourceV1beta1Api
|
||||
from kubernetes.client.api.scheduling_api import SchedulingApi
|
||||
from kubernetes.client.api.scheduling_v1_api import SchedulingV1Api
|
||||
from kubernetes.client.api.storage_api import StorageApi
|
||||
from kubernetes.client.api.storage_v1_api import StorageV1Api
|
||||
from kubernetes.client.api.storage_v1alpha1_api import StorageV1alpha1Api
|
||||
from kubernetes.client.api.storage_v1beta1_api import StorageV1beta1Api
|
||||
from kubernetes.client.api.storagemigration_api import StoragemigrationApi
|
||||
from kubernetes.client.api.storagemigration_v1alpha1_api import StoragemigrationV1alpha1Api
|
||||
from kubernetes.client.api.version_api import VersionApi
|
||||
|
||||
# import ApiClient
|
||||
from kubernetes.client.api_client import ApiClient
|
||||
from kubernetes.client.configuration import Configuration
|
||||
from kubernetes.client.exceptions import OpenApiException
|
||||
from kubernetes.client.exceptions import ApiTypeError
|
||||
from kubernetes.client.exceptions import ApiValueError
|
||||
from kubernetes.client.exceptions import ApiKeyError
|
||||
from kubernetes.client.exceptions import ApiException
|
||||
# import models into sdk package
|
||||
from kubernetes.client.models.admissionregistration_v1_service_reference import AdmissionregistrationV1ServiceReference
|
||||
from kubernetes.client.models.admissionregistration_v1_webhook_client_config import AdmissionregistrationV1WebhookClientConfig
|
||||
from kubernetes.client.models.apiextensions_v1_service_reference import ApiextensionsV1ServiceReference
|
||||
from kubernetes.client.models.apiextensions_v1_webhook_client_config import ApiextensionsV1WebhookClientConfig
|
||||
from kubernetes.client.models.apiregistration_v1_service_reference import ApiregistrationV1ServiceReference
|
||||
from kubernetes.client.models.authentication_v1_token_request import AuthenticationV1TokenRequest
|
||||
from kubernetes.client.models.core_v1_endpoint_port import CoreV1EndpointPort
|
||||
from kubernetes.client.models.core_v1_event import CoreV1Event
|
||||
from kubernetes.client.models.core_v1_event_list import CoreV1EventList
|
||||
from kubernetes.client.models.core_v1_event_series import CoreV1EventSeries
|
||||
from kubernetes.client.models.discovery_v1_endpoint_port import DiscoveryV1EndpointPort
|
||||
from kubernetes.client.models.events_v1_event import EventsV1Event
|
||||
from kubernetes.client.models.events_v1_event_list import EventsV1EventList
|
||||
from kubernetes.client.models.events_v1_event_series import EventsV1EventSeries
|
||||
from kubernetes.client.models.flowcontrol_v1_subject import FlowcontrolV1Subject
|
||||
from kubernetes.client.models.rbac_v1_subject import RbacV1Subject
|
||||
from kubernetes.client.models.storage_v1_token_request import StorageV1TokenRequest
|
||||
from kubernetes.client.models.v1_api_group import V1APIGroup
|
||||
from kubernetes.client.models.v1_api_group_list import V1APIGroupList
|
||||
from kubernetes.client.models.v1_api_resource import V1APIResource
|
||||
from kubernetes.client.models.v1_api_resource_list import V1APIResourceList
|
||||
from kubernetes.client.models.v1_api_service import V1APIService
|
||||
from kubernetes.client.models.v1_api_service_condition import V1APIServiceCondition
|
||||
from kubernetes.client.models.v1_api_service_list import V1APIServiceList
|
||||
from kubernetes.client.models.v1_api_service_spec import V1APIServiceSpec
|
||||
from kubernetes.client.models.v1_api_service_status import V1APIServiceStatus
|
||||
from kubernetes.client.models.v1_api_versions import V1APIVersions
|
||||
from kubernetes.client.models.v1_aws_elastic_block_store_volume_source import V1AWSElasticBlockStoreVolumeSource
|
||||
from kubernetes.client.models.v1_affinity import V1Affinity
|
||||
from kubernetes.client.models.v1_aggregation_rule import V1AggregationRule
|
||||
from kubernetes.client.models.v1_app_armor_profile import V1AppArmorProfile
|
||||
from kubernetes.client.models.v1_attached_volume import V1AttachedVolume
|
||||
from kubernetes.client.models.v1_audit_annotation import V1AuditAnnotation
|
||||
from kubernetes.client.models.v1_azure_disk_volume_source import V1AzureDiskVolumeSource
|
||||
from kubernetes.client.models.v1_azure_file_persistent_volume_source import V1AzureFilePersistentVolumeSource
|
||||
from kubernetes.client.models.v1_azure_file_volume_source import V1AzureFileVolumeSource
|
||||
from kubernetes.client.models.v1_binding import V1Binding
|
||||
from kubernetes.client.models.v1_bound_object_reference import V1BoundObjectReference
|
||||
from kubernetes.client.models.v1_csi_driver import V1CSIDriver
|
||||
from kubernetes.client.models.v1_csi_driver_list import V1CSIDriverList
|
||||
from kubernetes.client.models.v1_csi_driver_spec import V1CSIDriverSpec
|
||||
from kubernetes.client.models.v1_csi_node import V1CSINode
|
||||
from kubernetes.client.models.v1_csi_node_driver import V1CSINodeDriver
|
||||
from kubernetes.client.models.v1_csi_node_list import V1CSINodeList
|
||||
from kubernetes.client.models.v1_csi_node_spec import V1CSINodeSpec
|
||||
from kubernetes.client.models.v1_csi_persistent_volume_source import V1CSIPersistentVolumeSource
|
||||
from kubernetes.client.models.v1_csi_storage_capacity import V1CSIStorageCapacity
|
||||
from kubernetes.client.models.v1_csi_storage_capacity_list import V1CSIStorageCapacityList
|
||||
from kubernetes.client.models.v1_csi_volume_source import V1CSIVolumeSource
|
||||
from kubernetes.client.models.v1_capabilities import V1Capabilities
|
||||
from kubernetes.client.models.v1_ceph_fs_persistent_volume_source import V1CephFSPersistentVolumeSource
|
||||
from kubernetes.client.models.v1_ceph_fs_volume_source import V1CephFSVolumeSource
|
||||
from kubernetes.client.models.v1_certificate_signing_request import V1CertificateSigningRequest
|
||||
from kubernetes.client.models.v1_certificate_signing_request_condition import V1CertificateSigningRequestCondition
|
||||
from kubernetes.client.models.v1_certificate_signing_request_list import V1CertificateSigningRequestList
|
||||
from kubernetes.client.models.v1_certificate_signing_request_spec import V1CertificateSigningRequestSpec
|
||||
from kubernetes.client.models.v1_certificate_signing_request_status import V1CertificateSigningRequestStatus
|
||||
from kubernetes.client.models.v1_cinder_persistent_volume_source import V1CinderPersistentVolumeSource
|
||||
from kubernetes.client.models.v1_cinder_volume_source import V1CinderVolumeSource
|
||||
from kubernetes.client.models.v1_client_ip_config import V1ClientIPConfig
|
||||
from kubernetes.client.models.v1_cluster_role import V1ClusterRole
|
||||
from kubernetes.client.models.v1_cluster_role_binding import V1ClusterRoleBinding
|
||||
from kubernetes.client.models.v1_cluster_role_binding_list import V1ClusterRoleBindingList
|
||||
from kubernetes.client.models.v1_cluster_role_list import V1ClusterRoleList
|
||||
from kubernetes.client.models.v1_cluster_trust_bundle_projection import V1ClusterTrustBundleProjection
|
||||
from kubernetes.client.models.v1_component_condition import V1ComponentCondition
|
||||
from kubernetes.client.models.v1_component_status import V1ComponentStatus
|
||||
from kubernetes.client.models.v1_component_status_list import V1ComponentStatusList
|
||||
from kubernetes.client.models.v1_condition import V1Condition
|
||||
from kubernetes.client.models.v1_config_map import V1ConfigMap
|
||||
from kubernetes.client.models.v1_config_map_env_source import V1ConfigMapEnvSource
|
||||
from kubernetes.client.models.v1_config_map_key_selector import V1ConfigMapKeySelector
|
||||
from kubernetes.client.models.v1_config_map_list import V1ConfigMapList
|
||||
from kubernetes.client.models.v1_config_map_node_config_source import V1ConfigMapNodeConfigSource
|
||||
from kubernetes.client.models.v1_config_map_projection import V1ConfigMapProjection
|
||||
from kubernetes.client.models.v1_config_map_volume_source import V1ConfigMapVolumeSource
|
||||
from kubernetes.client.models.v1_container import V1Container
|
||||
from kubernetes.client.models.v1_container_image import V1ContainerImage
|
||||
from kubernetes.client.models.v1_container_port import V1ContainerPort
|
||||
from kubernetes.client.models.v1_container_resize_policy import V1ContainerResizePolicy
|
||||
from kubernetes.client.models.v1_container_state import V1ContainerState
|
||||
from kubernetes.client.models.v1_container_state_running import V1ContainerStateRunning
|
||||
from kubernetes.client.models.v1_container_state_terminated import V1ContainerStateTerminated
|
||||
from kubernetes.client.models.v1_container_state_waiting import V1ContainerStateWaiting
|
||||
from kubernetes.client.models.v1_container_status import V1ContainerStatus
|
||||
from kubernetes.client.models.v1_container_user import V1ContainerUser
|
||||
from kubernetes.client.models.v1_controller_revision import V1ControllerRevision
|
||||
from kubernetes.client.models.v1_controller_revision_list import V1ControllerRevisionList
|
||||
from kubernetes.client.models.v1_cron_job import V1CronJob
|
||||
from kubernetes.client.models.v1_cron_job_list import V1CronJobList
|
||||
from kubernetes.client.models.v1_cron_job_spec import V1CronJobSpec
|
||||
from kubernetes.client.models.v1_cron_job_status import V1CronJobStatus
|
||||
from kubernetes.client.models.v1_cross_version_object_reference import V1CrossVersionObjectReference
|
||||
from kubernetes.client.models.v1_custom_resource_column_definition import V1CustomResourceColumnDefinition
|
||||
from kubernetes.client.models.v1_custom_resource_conversion import V1CustomResourceConversion
|
||||
from kubernetes.client.models.v1_custom_resource_definition import V1CustomResourceDefinition
|
||||
from kubernetes.client.models.v1_custom_resource_definition_condition import V1CustomResourceDefinitionCondition
|
||||
from kubernetes.client.models.v1_custom_resource_definition_list import V1CustomResourceDefinitionList
|
||||
from kubernetes.client.models.v1_custom_resource_definition_names import V1CustomResourceDefinitionNames
|
||||
from kubernetes.client.models.v1_custom_resource_definition_spec import V1CustomResourceDefinitionSpec
|
||||
from kubernetes.client.models.v1_custom_resource_definition_status import V1CustomResourceDefinitionStatus
|
||||
from kubernetes.client.models.v1_custom_resource_definition_version import V1CustomResourceDefinitionVersion
|
||||
from kubernetes.client.models.v1_custom_resource_subresource_scale import V1CustomResourceSubresourceScale
|
||||
from kubernetes.client.models.v1_custom_resource_subresources import V1CustomResourceSubresources
|
||||
from kubernetes.client.models.v1_custom_resource_validation import V1CustomResourceValidation
|
||||
from kubernetes.client.models.v1_daemon_endpoint import V1DaemonEndpoint
|
||||
from kubernetes.client.models.v1_daemon_set import V1DaemonSet
|
||||
from kubernetes.client.models.v1_daemon_set_condition import V1DaemonSetCondition
|
||||
from kubernetes.client.models.v1_daemon_set_list import V1DaemonSetList
|
||||
from kubernetes.client.models.v1_daemon_set_spec import V1DaemonSetSpec
|
||||
from kubernetes.client.models.v1_daemon_set_status import V1DaemonSetStatus
|
||||
from kubernetes.client.models.v1_daemon_set_update_strategy import V1DaemonSetUpdateStrategy
|
||||
from kubernetes.client.models.v1_delete_options import V1DeleteOptions
|
||||
from kubernetes.client.models.v1_deployment import V1Deployment
|
||||
from kubernetes.client.models.v1_deployment_condition import V1DeploymentCondition
|
||||
from kubernetes.client.models.v1_deployment_list import V1DeploymentList
|
||||
from kubernetes.client.models.v1_deployment_spec import V1DeploymentSpec
|
||||
from kubernetes.client.models.v1_deployment_status import V1DeploymentStatus
|
||||
from kubernetes.client.models.v1_deployment_strategy import V1DeploymentStrategy
|
||||
from kubernetes.client.models.v1_downward_api_projection import V1DownwardAPIProjection
|
||||
from kubernetes.client.models.v1_downward_api_volume_file import V1DownwardAPIVolumeFile
|
||||
from kubernetes.client.models.v1_downward_api_volume_source import V1DownwardAPIVolumeSource
|
||||
from kubernetes.client.models.v1_empty_dir_volume_source import V1EmptyDirVolumeSource
|
||||
from kubernetes.client.models.v1_endpoint import V1Endpoint
|
||||
from kubernetes.client.models.v1_endpoint_address import V1EndpointAddress
|
||||
from kubernetes.client.models.v1_endpoint_conditions import V1EndpointConditions
|
||||
from kubernetes.client.models.v1_endpoint_hints import V1EndpointHints
|
||||
from kubernetes.client.models.v1_endpoint_slice import V1EndpointSlice
|
||||
from kubernetes.client.models.v1_endpoint_slice_list import V1EndpointSliceList
|
||||
from kubernetes.client.models.v1_endpoint_subset import V1EndpointSubset
|
||||
from kubernetes.client.models.v1_endpoints import V1Endpoints
|
||||
from kubernetes.client.models.v1_endpoints_list import V1EndpointsList
|
||||
from kubernetes.client.models.v1_env_from_source import V1EnvFromSource
|
||||
from kubernetes.client.models.v1_env_var import V1EnvVar
|
||||
from kubernetes.client.models.v1_env_var_source import V1EnvVarSource
|
||||
from kubernetes.client.models.v1_ephemeral_container import V1EphemeralContainer
|
||||
from kubernetes.client.models.v1_ephemeral_volume_source import V1EphemeralVolumeSource
|
||||
from kubernetes.client.models.v1_event_source import V1EventSource
|
||||
from kubernetes.client.models.v1_eviction import V1Eviction
|
||||
from kubernetes.client.models.v1_exec_action import V1ExecAction
|
||||
from kubernetes.client.models.v1_exempt_priority_level_configuration import V1ExemptPriorityLevelConfiguration
|
||||
from kubernetes.client.models.v1_expression_warning import V1ExpressionWarning
|
||||
from kubernetes.client.models.v1_external_documentation import V1ExternalDocumentation
|
||||
from kubernetes.client.models.v1_fc_volume_source import V1FCVolumeSource
|
||||
from kubernetes.client.models.v1_field_selector_attributes import V1FieldSelectorAttributes
|
||||
from kubernetes.client.models.v1_field_selector_requirement import V1FieldSelectorRequirement
|
||||
from kubernetes.client.models.v1_flex_persistent_volume_source import V1FlexPersistentVolumeSource
|
||||
from kubernetes.client.models.v1_flex_volume_source import V1FlexVolumeSource
|
||||
from kubernetes.client.models.v1_flocker_volume_source import V1FlockerVolumeSource
|
||||
from kubernetes.client.models.v1_flow_distinguisher_method import V1FlowDistinguisherMethod
|
||||
from kubernetes.client.models.v1_flow_schema import V1FlowSchema
|
||||
from kubernetes.client.models.v1_flow_schema_condition import V1FlowSchemaCondition
|
||||
from kubernetes.client.models.v1_flow_schema_list import V1FlowSchemaList
|
||||
from kubernetes.client.models.v1_flow_schema_spec import V1FlowSchemaSpec
|
||||
from kubernetes.client.models.v1_flow_schema_status import V1FlowSchemaStatus
|
||||
from kubernetes.client.models.v1_for_zone import V1ForZone
|
||||
from kubernetes.client.models.v1_gce_persistent_disk_volume_source import V1GCEPersistentDiskVolumeSource
|
||||
from kubernetes.client.models.v1_grpc_action import V1GRPCAction
|
||||
from kubernetes.client.models.v1_git_repo_volume_source import V1GitRepoVolumeSource
|
||||
from kubernetes.client.models.v1_glusterfs_persistent_volume_source import V1GlusterfsPersistentVolumeSource
|
||||
from kubernetes.client.models.v1_glusterfs_volume_source import V1GlusterfsVolumeSource
|
||||
from kubernetes.client.models.v1_group_subject import V1GroupSubject
|
||||
from kubernetes.client.models.v1_group_version_for_discovery import V1GroupVersionForDiscovery
|
||||
from kubernetes.client.models.v1_http_get_action import V1HTTPGetAction
|
||||
from kubernetes.client.models.v1_http_header import V1HTTPHeader
|
||||
from kubernetes.client.models.v1_http_ingress_path import V1HTTPIngressPath
|
||||
from kubernetes.client.models.v1_http_ingress_rule_value import V1HTTPIngressRuleValue
|
||||
from kubernetes.client.models.v1_horizontal_pod_autoscaler import V1HorizontalPodAutoscaler
|
||||
from kubernetes.client.models.v1_horizontal_pod_autoscaler_list import V1HorizontalPodAutoscalerList
|
||||
from kubernetes.client.models.v1_horizontal_pod_autoscaler_spec import V1HorizontalPodAutoscalerSpec
|
||||
from kubernetes.client.models.v1_horizontal_pod_autoscaler_status import V1HorizontalPodAutoscalerStatus
|
||||
from kubernetes.client.models.v1_host_alias import V1HostAlias
|
||||
from kubernetes.client.models.v1_host_ip import V1HostIP
|
||||
from kubernetes.client.models.v1_host_path_volume_source import V1HostPathVolumeSource
|
||||
from kubernetes.client.models.v1_ip_block import V1IPBlock
|
||||
from kubernetes.client.models.v1_iscsi_persistent_volume_source import V1ISCSIPersistentVolumeSource
|
||||
from kubernetes.client.models.v1_iscsi_volume_source import V1ISCSIVolumeSource
|
||||
from kubernetes.client.models.v1_image_volume_source import V1ImageVolumeSource
|
||||
from kubernetes.client.models.v1_ingress import V1Ingress
|
||||
from kubernetes.client.models.v1_ingress_backend import V1IngressBackend
|
||||
from kubernetes.client.models.v1_ingress_class import V1IngressClass
|
||||
from kubernetes.client.models.v1_ingress_class_list import V1IngressClassList
|
||||
from kubernetes.client.models.v1_ingress_class_parameters_reference import V1IngressClassParametersReference
|
||||
from kubernetes.client.models.v1_ingress_class_spec import V1IngressClassSpec
|
||||
from kubernetes.client.models.v1_ingress_list import V1IngressList
|
||||
from kubernetes.client.models.v1_ingress_load_balancer_ingress import V1IngressLoadBalancerIngress
|
||||
from kubernetes.client.models.v1_ingress_load_balancer_status import V1IngressLoadBalancerStatus
|
||||
from kubernetes.client.models.v1_ingress_port_status import V1IngressPortStatus
|
||||
from kubernetes.client.models.v1_ingress_rule import V1IngressRule
|
||||
from kubernetes.client.models.v1_ingress_service_backend import V1IngressServiceBackend
|
||||
from kubernetes.client.models.v1_ingress_spec import V1IngressSpec
|
||||
from kubernetes.client.models.v1_ingress_status import V1IngressStatus
|
||||
from kubernetes.client.models.v1_ingress_tls import V1IngressTLS
|
||||
from kubernetes.client.models.v1_json_schema_props import V1JSONSchemaProps
|
||||
from kubernetes.client.models.v1_job import V1Job
|
||||
from kubernetes.client.models.v1_job_condition import V1JobCondition
|
||||
from kubernetes.client.models.v1_job_list import V1JobList
|
||||
from kubernetes.client.models.v1_job_spec import V1JobSpec
|
||||
from kubernetes.client.models.v1_job_status import V1JobStatus
|
||||
from kubernetes.client.models.v1_job_template_spec import V1JobTemplateSpec
|
||||
from kubernetes.client.models.v1_key_to_path import V1KeyToPath
|
||||
from kubernetes.client.models.v1_label_selector import V1LabelSelector
|
||||
from kubernetes.client.models.v1_label_selector_attributes import V1LabelSelectorAttributes
|
||||
from kubernetes.client.models.v1_label_selector_requirement import V1LabelSelectorRequirement
|
||||
from kubernetes.client.models.v1_lease import V1Lease
|
||||
from kubernetes.client.models.v1_lease_list import V1LeaseList
|
||||
from kubernetes.client.models.v1_lease_spec import V1LeaseSpec
|
||||
from kubernetes.client.models.v1_lifecycle import V1Lifecycle
|
||||
from kubernetes.client.models.v1_lifecycle_handler import V1LifecycleHandler
|
||||
from kubernetes.client.models.v1_limit_range import V1LimitRange
|
||||
from kubernetes.client.models.v1_limit_range_item import V1LimitRangeItem
|
||||
from kubernetes.client.models.v1_limit_range_list import V1LimitRangeList
|
||||
from kubernetes.client.models.v1_limit_range_spec import V1LimitRangeSpec
|
||||
from kubernetes.client.models.v1_limit_response import V1LimitResponse
|
||||
from kubernetes.client.models.v1_limited_priority_level_configuration import V1LimitedPriorityLevelConfiguration
|
||||
from kubernetes.client.models.v1_linux_container_user import V1LinuxContainerUser
|
||||
from kubernetes.client.models.v1_list_meta import V1ListMeta
|
||||
from kubernetes.client.models.v1_load_balancer_ingress import V1LoadBalancerIngress
|
||||
from kubernetes.client.models.v1_load_balancer_status import V1LoadBalancerStatus
|
||||
from kubernetes.client.models.v1_local_object_reference import V1LocalObjectReference
|
||||
from kubernetes.client.models.v1_local_subject_access_review import V1LocalSubjectAccessReview
|
||||
from kubernetes.client.models.v1_local_volume_source import V1LocalVolumeSource
|
||||
from kubernetes.client.models.v1_managed_fields_entry import V1ManagedFieldsEntry
|
||||
from kubernetes.client.models.v1_match_condition import V1MatchCondition
|
||||
from kubernetes.client.models.v1_match_resources import V1MatchResources
|
||||
from kubernetes.client.models.v1_modify_volume_status import V1ModifyVolumeStatus
|
||||
from kubernetes.client.models.v1_mutating_webhook import V1MutatingWebhook
|
||||
from kubernetes.client.models.v1_mutating_webhook_configuration import V1MutatingWebhookConfiguration
|
||||
from kubernetes.client.models.v1_mutating_webhook_configuration_list import V1MutatingWebhookConfigurationList
|
||||
from kubernetes.client.models.v1_nfs_volume_source import V1NFSVolumeSource
|
||||
from kubernetes.client.models.v1_named_rule_with_operations import V1NamedRuleWithOperations
|
||||
from kubernetes.client.models.v1_namespace import V1Namespace
|
||||
from kubernetes.client.models.v1_namespace_condition import V1NamespaceCondition
|
||||
from kubernetes.client.models.v1_namespace_list import V1NamespaceList
|
||||
from kubernetes.client.models.v1_namespace_spec import V1NamespaceSpec
|
||||
from kubernetes.client.models.v1_namespace_status import V1NamespaceStatus
|
||||
from kubernetes.client.models.v1_network_policy import V1NetworkPolicy
|
||||
from kubernetes.client.models.v1_network_policy_egress_rule import V1NetworkPolicyEgressRule
|
||||
from kubernetes.client.models.v1_network_policy_ingress_rule import V1NetworkPolicyIngressRule
|
||||
from kubernetes.client.models.v1_network_policy_list import V1NetworkPolicyList
|
||||
from kubernetes.client.models.v1_network_policy_peer import V1NetworkPolicyPeer
|
||||
from kubernetes.client.models.v1_network_policy_port import V1NetworkPolicyPort
|
||||
from kubernetes.client.models.v1_network_policy_spec import V1NetworkPolicySpec
|
||||
from kubernetes.client.models.v1_node import V1Node
|
||||
from kubernetes.client.models.v1_node_address import V1NodeAddress
|
||||
from kubernetes.client.models.v1_node_affinity import V1NodeAffinity
|
||||
from kubernetes.client.models.v1_node_condition import V1NodeCondition
|
||||
from kubernetes.client.models.v1_node_config_source import V1NodeConfigSource
|
||||
from kubernetes.client.models.v1_node_config_status import V1NodeConfigStatus
|
||||
from kubernetes.client.models.v1_node_daemon_endpoints import V1NodeDaemonEndpoints
|
||||
from kubernetes.client.models.v1_node_features import V1NodeFeatures
|
||||
from kubernetes.client.models.v1_node_list import V1NodeList
|
||||
from kubernetes.client.models.v1_node_runtime_handler import V1NodeRuntimeHandler
|
||||
from kubernetes.client.models.v1_node_runtime_handler_features import V1NodeRuntimeHandlerFeatures
|
||||
from kubernetes.client.models.v1_node_selector import V1NodeSelector
|
||||
from kubernetes.client.models.v1_node_selector_requirement import V1NodeSelectorRequirement
|
||||
from kubernetes.client.models.v1_node_selector_term import V1NodeSelectorTerm
|
||||
from kubernetes.client.models.v1_node_spec import V1NodeSpec
|
||||
from kubernetes.client.models.v1_node_status import V1NodeStatus
|
||||
from kubernetes.client.models.v1_node_system_info import V1NodeSystemInfo
|
||||
from kubernetes.client.models.v1_non_resource_attributes import V1NonResourceAttributes
|
||||
from kubernetes.client.models.v1_non_resource_policy_rule import V1NonResourcePolicyRule
|
||||
from kubernetes.client.models.v1_non_resource_rule import V1NonResourceRule
|
||||
from kubernetes.client.models.v1_object_field_selector import V1ObjectFieldSelector
|
||||
from kubernetes.client.models.v1_object_meta import V1ObjectMeta
|
||||
from kubernetes.client.models.v1_object_reference import V1ObjectReference
|
||||
from kubernetes.client.models.v1_overhead import V1Overhead
|
||||
from kubernetes.client.models.v1_owner_reference import V1OwnerReference
|
||||
from kubernetes.client.models.v1_param_kind import V1ParamKind
|
||||
from kubernetes.client.models.v1_param_ref import V1ParamRef
|
||||
from kubernetes.client.models.v1_persistent_volume import V1PersistentVolume
|
||||
from kubernetes.client.models.v1_persistent_volume_claim import V1PersistentVolumeClaim
|
||||
from kubernetes.client.models.v1_persistent_volume_claim_condition import V1PersistentVolumeClaimCondition
|
||||
from kubernetes.client.models.v1_persistent_volume_claim_list import V1PersistentVolumeClaimList
|
||||
from kubernetes.client.models.v1_persistent_volume_claim_spec import V1PersistentVolumeClaimSpec
|
||||
from kubernetes.client.models.v1_persistent_volume_claim_status import V1PersistentVolumeClaimStatus
|
||||
from kubernetes.client.models.v1_persistent_volume_claim_template import V1PersistentVolumeClaimTemplate
|
||||
from kubernetes.client.models.v1_persistent_volume_claim_volume_source import V1PersistentVolumeClaimVolumeSource
|
||||
from kubernetes.client.models.v1_persistent_volume_list import V1PersistentVolumeList
|
||||
from kubernetes.client.models.v1_persistent_volume_spec import V1PersistentVolumeSpec
|
||||
from kubernetes.client.models.v1_persistent_volume_status import V1PersistentVolumeStatus
|
||||
from kubernetes.client.models.v1_photon_persistent_disk_volume_source import V1PhotonPersistentDiskVolumeSource
|
||||
from kubernetes.client.models.v1_pod import V1Pod
|
||||
from kubernetes.client.models.v1_pod_affinity import V1PodAffinity
|
||||
from kubernetes.client.models.v1_pod_affinity_term import V1PodAffinityTerm
|
||||
from kubernetes.client.models.v1_pod_anti_affinity import V1PodAntiAffinity
|
||||
from kubernetes.client.models.v1_pod_condition import V1PodCondition
|
||||
from kubernetes.client.models.v1_pod_dns_config import V1PodDNSConfig
|
||||
from kubernetes.client.models.v1_pod_dns_config_option import V1PodDNSConfigOption
|
||||
from kubernetes.client.models.v1_pod_disruption_budget import V1PodDisruptionBudget
|
||||
from kubernetes.client.models.v1_pod_disruption_budget_list import V1PodDisruptionBudgetList
|
||||
from kubernetes.client.models.v1_pod_disruption_budget_spec import V1PodDisruptionBudgetSpec
|
||||
from kubernetes.client.models.v1_pod_disruption_budget_status import V1PodDisruptionBudgetStatus
|
||||
from kubernetes.client.models.v1_pod_failure_policy import V1PodFailurePolicy
|
||||
from kubernetes.client.models.v1_pod_failure_policy_on_exit_codes_requirement import V1PodFailurePolicyOnExitCodesRequirement
|
||||
from kubernetes.client.models.v1_pod_failure_policy_on_pod_conditions_pattern import V1PodFailurePolicyOnPodConditionsPattern
|
||||
from kubernetes.client.models.v1_pod_failure_policy_rule import V1PodFailurePolicyRule
|
||||
from kubernetes.client.models.v1_pod_ip import V1PodIP
|
||||
from kubernetes.client.models.v1_pod_list import V1PodList
|
||||
from kubernetes.client.models.v1_pod_os import V1PodOS
|
||||
from kubernetes.client.models.v1_pod_readiness_gate import V1PodReadinessGate
|
||||
from kubernetes.client.models.v1_pod_resource_claim import V1PodResourceClaim
|
||||
from kubernetes.client.models.v1_pod_resource_claim_status import V1PodResourceClaimStatus
|
||||
from kubernetes.client.models.v1_pod_scheduling_gate import V1PodSchedulingGate
|
||||
from kubernetes.client.models.v1_pod_security_context import V1PodSecurityContext
|
||||
from kubernetes.client.models.v1_pod_spec import V1PodSpec
|
||||
from kubernetes.client.models.v1_pod_status import V1PodStatus
|
||||
from kubernetes.client.models.v1_pod_template import V1PodTemplate
|
||||
from kubernetes.client.models.v1_pod_template_list import V1PodTemplateList
|
||||
from kubernetes.client.models.v1_pod_template_spec import V1PodTemplateSpec
|
||||
from kubernetes.client.models.v1_policy_rule import V1PolicyRule
|
||||
from kubernetes.client.models.v1_policy_rules_with_subjects import V1PolicyRulesWithSubjects
|
||||
from kubernetes.client.models.v1_port_status import V1PortStatus
|
||||
from kubernetes.client.models.v1_portworx_volume_source import V1PortworxVolumeSource
|
||||
from kubernetes.client.models.v1_preconditions import V1Preconditions
|
||||
from kubernetes.client.models.v1_preferred_scheduling_term import V1PreferredSchedulingTerm
|
||||
from kubernetes.client.models.v1_priority_class import V1PriorityClass
|
||||
from kubernetes.client.models.v1_priority_class_list import V1PriorityClassList
|
||||
from kubernetes.client.models.v1_priority_level_configuration import V1PriorityLevelConfiguration
|
||||
from kubernetes.client.models.v1_priority_level_configuration_condition import V1PriorityLevelConfigurationCondition
|
||||
from kubernetes.client.models.v1_priority_level_configuration_list import V1PriorityLevelConfigurationList
|
||||
from kubernetes.client.models.v1_priority_level_configuration_reference import V1PriorityLevelConfigurationReference
|
||||
from kubernetes.client.models.v1_priority_level_configuration_spec import V1PriorityLevelConfigurationSpec
|
||||
from kubernetes.client.models.v1_priority_level_configuration_status import V1PriorityLevelConfigurationStatus
|
||||
from kubernetes.client.models.v1_probe import V1Probe
|
||||
from kubernetes.client.models.v1_projected_volume_source import V1ProjectedVolumeSource
|
||||
from kubernetes.client.models.v1_queuing_configuration import V1QueuingConfiguration
|
||||
from kubernetes.client.models.v1_quobyte_volume_source import V1QuobyteVolumeSource
|
||||
from kubernetes.client.models.v1_rbd_persistent_volume_source import V1RBDPersistentVolumeSource
|
||||
from kubernetes.client.models.v1_rbd_volume_source import V1RBDVolumeSource
|
||||
from kubernetes.client.models.v1_replica_set import V1ReplicaSet
|
||||
from kubernetes.client.models.v1_replica_set_condition import V1ReplicaSetCondition
|
||||
from kubernetes.client.models.v1_replica_set_list import V1ReplicaSetList
|
||||
from kubernetes.client.models.v1_replica_set_spec import V1ReplicaSetSpec
|
||||
from kubernetes.client.models.v1_replica_set_status import V1ReplicaSetStatus
|
||||
from kubernetes.client.models.v1_replication_controller import V1ReplicationController
|
||||
from kubernetes.client.models.v1_replication_controller_condition import V1ReplicationControllerCondition
|
||||
from kubernetes.client.models.v1_replication_controller_list import V1ReplicationControllerList
|
||||
from kubernetes.client.models.v1_replication_controller_spec import V1ReplicationControllerSpec
|
||||
from kubernetes.client.models.v1_replication_controller_status import V1ReplicationControllerStatus
|
||||
from kubernetes.client.models.v1_resource_attributes import V1ResourceAttributes
|
||||
from kubernetes.client.models.v1_resource_claim import V1ResourceClaim
|
||||
from kubernetes.client.models.v1_resource_field_selector import V1ResourceFieldSelector
|
||||
from kubernetes.client.models.v1_resource_health import V1ResourceHealth
|
||||
from kubernetes.client.models.v1_resource_policy_rule import V1ResourcePolicyRule
|
||||
from kubernetes.client.models.v1_resource_quota import V1ResourceQuota
|
||||
from kubernetes.client.models.v1_resource_quota_list import V1ResourceQuotaList
|
||||
from kubernetes.client.models.v1_resource_quota_spec import V1ResourceQuotaSpec
|
||||
from kubernetes.client.models.v1_resource_quota_status import V1ResourceQuotaStatus
|
||||
from kubernetes.client.models.v1_resource_requirements import V1ResourceRequirements
|
||||
from kubernetes.client.models.v1_resource_rule import V1ResourceRule
|
||||
from kubernetes.client.models.v1_resource_status import V1ResourceStatus
|
||||
from kubernetes.client.models.v1_role import V1Role
|
||||
from kubernetes.client.models.v1_role_binding import V1RoleBinding
|
||||
from kubernetes.client.models.v1_role_binding_list import V1RoleBindingList
|
||||
from kubernetes.client.models.v1_role_list import V1RoleList
|
||||
from kubernetes.client.models.v1_role_ref import V1RoleRef
|
||||
from kubernetes.client.models.v1_rolling_update_daemon_set import V1RollingUpdateDaemonSet
|
||||
from kubernetes.client.models.v1_rolling_update_deployment import V1RollingUpdateDeployment
|
||||
from kubernetes.client.models.v1_rolling_update_stateful_set_strategy import V1RollingUpdateStatefulSetStrategy
|
||||
from kubernetes.client.models.v1_rule_with_operations import V1RuleWithOperations
|
||||
from kubernetes.client.models.v1_runtime_class import V1RuntimeClass
|
||||
from kubernetes.client.models.v1_runtime_class_list import V1RuntimeClassList
|
||||
from kubernetes.client.models.v1_se_linux_options import V1SELinuxOptions
|
||||
from kubernetes.client.models.v1_scale import V1Scale
|
||||
from kubernetes.client.models.v1_scale_io_persistent_volume_source import V1ScaleIOPersistentVolumeSource
|
||||
from kubernetes.client.models.v1_scale_io_volume_source import V1ScaleIOVolumeSource
|
||||
from kubernetes.client.models.v1_scale_spec import V1ScaleSpec
|
||||
from kubernetes.client.models.v1_scale_status import V1ScaleStatus
|
||||
from kubernetes.client.models.v1_scheduling import V1Scheduling
|
||||
from kubernetes.client.models.v1_scope_selector import V1ScopeSelector
|
||||
from kubernetes.client.models.v1_scoped_resource_selector_requirement import V1ScopedResourceSelectorRequirement
|
||||
from kubernetes.client.models.v1_seccomp_profile import V1SeccompProfile
|
||||
from kubernetes.client.models.v1_secret import V1Secret
|
||||
from kubernetes.client.models.v1_secret_env_source import V1SecretEnvSource
|
||||
from kubernetes.client.models.v1_secret_key_selector import V1SecretKeySelector
|
||||
from kubernetes.client.models.v1_secret_list import V1SecretList
|
||||
from kubernetes.client.models.v1_secret_projection import V1SecretProjection
|
||||
from kubernetes.client.models.v1_secret_reference import V1SecretReference
|
||||
from kubernetes.client.models.v1_secret_volume_source import V1SecretVolumeSource
|
||||
from kubernetes.client.models.v1_security_context import V1SecurityContext
|
||||
from kubernetes.client.models.v1_selectable_field import V1SelectableField
|
||||
from kubernetes.client.models.v1_self_subject_access_review import V1SelfSubjectAccessReview
|
||||
from kubernetes.client.models.v1_self_subject_access_review_spec import V1SelfSubjectAccessReviewSpec
|
||||
from kubernetes.client.models.v1_self_subject_review import V1SelfSubjectReview
|
||||
from kubernetes.client.models.v1_self_subject_review_status import V1SelfSubjectReviewStatus
|
||||
from kubernetes.client.models.v1_self_subject_rules_review import V1SelfSubjectRulesReview
|
||||
from kubernetes.client.models.v1_self_subject_rules_review_spec import V1SelfSubjectRulesReviewSpec
|
||||
from kubernetes.client.models.v1_server_address_by_client_cidr import V1ServerAddressByClientCIDR
|
||||
from kubernetes.client.models.v1_service import V1Service
|
||||
from kubernetes.client.models.v1_service_account import V1ServiceAccount
|
||||
from kubernetes.client.models.v1_service_account_list import V1ServiceAccountList
|
||||
from kubernetes.client.models.v1_service_account_subject import V1ServiceAccountSubject
|
||||
from kubernetes.client.models.v1_service_account_token_projection import V1ServiceAccountTokenProjection
|
||||
from kubernetes.client.models.v1_service_backend_port import V1ServiceBackendPort
|
||||
from kubernetes.client.models.v1_service_list import V1ServiceList
|
||||
from kubernetes.client.models.v1_service_port import V1ServicePort
|
||||
from kubernetes.client.models.v1_service_spec import V1ServiceSpec
|
||||
from kubernetes.client.models.v1_service_status import V1ServiceStatus
|
||||
from kubernetes.client.models.v1_session_affinity_config import V1SessionAffinityConfig
|
||||
from kubernetes.client.models.v1_sleep_action import V1SleepAction
|
||||
from kubernetes.client.models.v1_stateful_set import V1StatefulSet
|
||||
from kubernetes.client.models.v1_stateful_set_condition import V1StatefulSetCondition
|
||||
from kubernetes.client.models.v1_stateful_set_list import V1StatefulSetList
|
||||
from kubernetes.client.models.v1_stateful_set_ordinals import V1StatefulSetOrdinals
|
||||
from kubernetes.client.models.v1_stateful_set_persistent_volume_claim_retention_policy import V1StatefulSetPersistentVolumeClaimRetentionPolicy
|
||||
from kubernetes.client.models.v1_stateful_set_spec import V1StatefulSetSpec
|
||||
from kubernetes.client.models.v1_stateful_set_status import V1StatefulSetStatus
|
||||
from kubernetes.client.models.v1_stateful_set_update_strategy import V1StatefulSetUpdateStrategy
|
||||
from kubernetes.client.models.v1_status import V1Status
|
||||
from kubernetes.client.models.v1_status_cause import V1StatusCause
|
||||
from kubernetes.client.models.v1_status_details import V1StatusDetails
|
||||
from kubernetes.client.models.v1_storage_class import V1StorageClass
|
||||
from kubernetes.client.models.v1_storage_class_list import V1StorageClassList
|
||||
from kubernetes.client.models.v1_storage_os_persistent_volume_source import V1StorageOSPersistentVolumeSource
|
||||
from kubernetes.client.models.v1_storage_os_volume_source import V1StorageOSVolumeSource
|
||||
from kubernetes.client.models.v1_subject_access_review import V1SubjectAccessReview
|
||||
from kubernetes.client.models.v1_subject_access_review_spec import V1SubjectAccessReviewSpec
|
||||
from kubernetes.client.models.v1_subject_access_review_status import V1SubjectAccessReviewStatus
|
||||
from kubernetes.client.models.v1_subject_rules_review_status import V1SubjectRulesReviewStatus
|
||||
from kubernetes.client.models.v1_success_policy import V1SuccessPolicy
|
||||
from kubernetes.client.models.v1_success_policy_rule import V1SuccessPolicyRule
|
||||
from kubernetes.client.models.v1_sysctl import V1Sysctl
|
||||
from kubernetes.client.models.v1_tcp_socket_action import V1TCPSocketAction
|
||||
from kubernetes.client.models.v1_taint import V1Taint
|
||||
from kubernetes.client.models.v1_token_request_spec import V1TokenRequestSpec
|
||||
from kubernetes.client.models.v1_token_request_status import V1TokenRequestStatus
|
||||
from kubernetes.client.models.v1_token_review import V1TokenReview
|
||||
from kubernetes.client.models.v1_token_review_spec import V1TokenReviewSpec
|
||||
from kubernetes.client.models.v1_token_review_status import V1TokenReviewStatus
|
||||
from kubernetes.client.models.v1_toleration import V1Toleration
|
||||
from kubernetes.client.models.v1_topology_selector_label_requirement import V1TopologySelectorLabelRequirement
|
||||
from kubernetes.client.models.v1_topology_selector_term import V1TopologySelectorTerm
|
||||
from kubernetes.client.models.v1_topology_spread_constraint import V1TopologySpreadConstraint
|
||||
from kubernetes.client.models.v1_type_checking import V1TypeChecking
|
||||
from kubernetes.client.models.v1_typed_local_object_reference import V1TypedLocalObjectReference
|
||||
from kubernetes.client.models.v1_typed_object_reference import V1TypedObjectReference
|
||||
from kubernetes.client.models.v1_uncounted_terminated_pods import V1UncountedTerminatedPods
|
||||
from kubernetes.client.models.v1_user_info import V1UserInfo
|
||||
from kubernetes.client.models.v1_user_subject import V1UserSubject
|
||||
from kubernetes.client.models.v1_validating_admission_policy import V1ValidatingAdmissionPolicy
|
||||
from kubernetes.client.models.v1_validating_admission_policy_binding import V1ValidatingAdmissionPolicyBinding
|
||||
from kubernetes.client.models.v1_validating_admission_policy_binding_list import V1ValidatingAdmissionPolicyBindingList
|
||||
from kubernetes.client.models.v1_validating_admission_policy_binding_spec import V1ValidatingAdmissionPolicyBindingSpec
|
||||
from kubernetes.client.models.v1_validating_admission_policy_list import V1ValidatingAdmissionPolicyList
|
||||
from kubernetes.client.models.v1_validating_admission_policy_spec import V1ValidatingAdmissionPolicySpec
|
||||
from kubernetes.client.models.v1_validating_admission_policy_status import V1ValidatingAdmissionPolicyStatus
|
||||
from kubernetes.client.models.v1_validating_webhook import V1ValidatingWebhook
|
||||
from kubernetes.client.models.v1_validating_webhook_configuration import V1ValidatingWebhookConfiguration
|
||||
from kubernetes.client.models.v1_validating_webhook_configuration_list import V1ValidatingWebhookConfigurationList
|
||||
from kubernetes.client.models.v1_validation import V1Validation
|
||||
from kubernetes.client.models.v1_validation_rule import V1ValidationRule
|
||||
from kubernetes.client.models.v1_variable import V1Variable
|
||||
from kubernetes.client.models.v1_volume import V1Volume
|
||||
from kubernetes.client.models.v1_volume_attachment import V1VolumeAttachment
|
||||
from kubernetes.client.models.v1_volume_attachment_list import V1VolumeAttachmentList
|
||||
from kubernetes.client.models.v1_volume_attachment_source import V1VolumeAttachmentSource
|
||||
from kubernetes.client.models.v1_volume_attachment_spec import V1VolumeAttachmentSpec
|
||||
from kubernetes.client.models.v1_volume_attachment_status import V1VolumeAttachmentStatus
|
||||
from kubernetes.client.models.v1_volume_device import V1VolumeDevice
|
||||
from kubernetes.client.models.v1_volume_error import V1VolumeError
|
||||
from kubernetes.client.models.v1_volume_mount import V1VolumeMount
|
||||
from kubernetes.client.models.v1_volume_mount_status import V1VolumeMountStatus
|
||||
from kubernetes.client.models.v1_volume_node_affinity import V1VolumeNodeAffinity
|
||||
from kubernetes.client.models.v1_volume_node_resources import V1VolumeNodeResources
|
||||
from kubernetes.client.models.v1_volume_projection import V1VolumeProjection
|
||||
from kubernetes.client.models.v1_volume_resource_requirements import V1VolumeResourceRequirements
|
||||
from kubernetes.client.models.v1_vsphere_virtual_disk_volume_source import V1VsphereVirtualDiskVolumeSource
|
||||
from kubernetes.client.models.v1_watch_event import V1WatchEvent
|
||||
from kubernetes.client.models.v1_webhook_conversion import V1WebhookConversion
|
||||
from kubernetes.client.models.v1_weighted_pod_affinity_term import V1WeightedPodAffinityTerm
|
||||
from kubernetes.client.models.v1_windows_security_context_options import V1WindowsSecurityContextOptions
|
||||
from kubernetes.client.models.v1alpha1_apply_configuration import V1alpha1ApplyConfiguration
|
||||
from kubernetes.client.models.v1alpha1_cluster_trust_bundle import V1alpha1ClusterTrustBundle
|
||||
from kubernetes.client.models.v1alpha1_cluster_trust_bundle_list import V1alpha1ClusterTrustBundleList
|
||||
from kubernetes.client.models.v1alpha1_cluster_trust_bundle_spec import V1alpha1ClusterTrustBundleSpec
|
||||
from kubernetes.client.models.v1alpha1_group_version_resource import V1alpha1GroupVersionResource
|
||||
from kubernetes.client.models.v1alpha1_json_patch import V1alpha1JSONPatch
|
||||
from kubernetes.client.models.v1alpha1_match_condition import V1alpha1MatchCondition
|
||||
from kubernetes.client.models.v1alpha1_match_resources import V1alpha1MatchResources
|
||||
from kubernetes.client.models.v1alpha1_migration_condition import V1alpha1MigrationCondition
|
||||
from kubernetes.client.models.v1alpha1_mutating_admission_policy import V1alpha1MutatingAdmissionPolicy
|
||||
from kubernetes.client.models.v1alpha1_mutating_admission_policy_binding import V1alpha1MutatingAdmissionPolicyBinding
|
||||
from kubernetes.client.models.v1alpha1_mutating_admission_policy_binding_list import V1alpha1MutatingAdmissionPolicyBindingList
|
||||
from kubernetes.client.models.v1alpha1_mutating_admission_policy_binding_spec import V1alpha1MutatingAdmissionPolicyBindingSpec
|
||||
from kubernetes.client.models.v1alpha1_mutating_admission_policy_list import V1alpha1MutatingAdmissionPolicyList
|
||||
from kubernetes.client.models.v1alpha1_mutating_admission_policy_spec import V1alpha1MutatingAdmissionPolicySpec
|
||||
from kubernetes.client.models.v1alpha1_mutation import V1alpha1Mutation
|
||||
from kubernetes.client.models.v1alpha1_named_rule_with_operations import V1alpha1NamedRuleWithOperations
|
||||
from kubernetes.client.models.v1alpha1_param_kind import V1alpha1ParamKind
|
||||
from kubernetes.client.models.v1alpha1_param_ref import V1alpha1ParamRef
|
||||
from kubernetes.client.models.v1alpha1_server_storage_version import V1alpha1ServerStorageVersion
|
||||
from kubernetes.client.models.v1alpha1_storage_version import V1alpha1StorageVersion
|
||||
from kubernetes.client.models.v1alpha1_storage_version_condition import V1alpha1StorageVersionCondition
|
||||
from kubernetes.client.models.v1alpha1_storage_version_list import V1alpha1StorageVersionList
|
||||
from kubernetes.client.models.v1alpha1_storage_version_migration import V1alpha1StorageVersionMigration
|
||||
from kubernetes.client.models.v1alpha1_storage_version_migration_list import V1alpha1StorageVersionMigrationList
|
||||
from kubernetes.client.models.v1alpha1_storage_version_migration_spec import V1alpha1StorageVersionMigrationSpec
|
||||
from kubernetes.client.models.v1alpha1_storage_version_migration_status import V1alpha1StorageVersionMigrationStatus
|
||||
from kubernetes.client.models.v1alpha1_storage_version_status import V1alpha1StorageVersionStatus
|
||||
from kubernetes.client.models.v1alpha1_variable import V1alpha1Variable
|
||||
from kubernetes.client.models.v1alpha1_volume_attributes_class import V1alpha1VolumeAttributesClass
|
||||
from kubernetes.client.models.v1alpha1_volume_attributes_class_list import V1alpha1VolumeAttributesClassList
|
||||
from kubernetes.client.models.v1alpha2_lease_candidate import V1alpha2LeaseCandidate
|
||||
from kubernetes.client.models.v1alpha2_lease_candidate_list import V1alpha2LeaseCandidateList
|
||||
from kubernetes.client.models.v1alpha2_lease_candidate_spec import V1alpha2LeaseCandidateSpec
|
||||
from kubernetes.client.models.v1alpha3_allocated_device_status import V1alpha3AllocatedDeviceStatus
|
||||
from kubernetes.client.models.v1alpha3_allocation_result import V1alpha3AllocationResult
|
||||
from kubernetes.client.models.v1alpha3_basic_device import V1alpha3BasicDevice
|
||||
from kubernetes.client.models.v1alpha3_cel_device_selector import V1alpha3CELDeviceSelector
|
||||
from kubernetes.client.models.v1alpha3_device import V1alpha3Device
|
||||
from kubernetes.client.models.v1alpha3_device_allocation_configuration import V1alpha3DeviceAllocationConfiguration
|
||||
from kubernetes.client.models.v1alpha3_device_allocation_result import V1alpha3DeviceAllocationResult
|
||||
from kubernetes.client.models.v1alpha3_device_attribute import V1alpha3DeviceAttribute
|
||||
from kubernetes.client.models.v1alpha3_device_claim import V1alpha3DeviceClaim
|
||||
from kubernetes.client.models.v1alpha3_device_claim_configuration import V1alpha3DeviceClaimConfiguration
|
||||
from kubernetes.client.models.v1alpha3_device_class import V1alpha3DeviceClass
|
||||
from kubernetes.client.models.v1alpha3_device_class_configuration import V1alpha3DeviceClassConfiguration
|
||||
from kubernetes.client.models.v1alpha3_device_class_list import V1alpha3DeviceClassList
|
||||
from kubernetes.client.models.v1alpha3_device_class_spec import V1alpha3DeviceClassSpec
|
||||
from kubernetes.client.models.v1alpha3_device_constraint import V1alpha3DeviceConstraint
|
||||
from kubernetes.client.models.v1alpha3_device_request import V1alpha3DeviceRequest
|
||||
from kubernetes.client.models.v1alpha3_device_request_allocation_result import V1alpha3DeviceRequestAllocationResult
|
||||
from kubernetes.client.models.v1alpha3_device_selector import V1alpha3DeviceSelector
|
||||
from kubernetes.client.models.v1alpha3_network_device_data import V1alpha3NetworkDeviceData
|
||||
from kubernetes.client.models.v1alpha3_opaque_device_configuration import V1alpha3OpaqueDeviceConfiguration
|
||||
from kubernetes.client.models.v1alpha3_resource_claim import V1alpha3ResourceClaim
|
||||
from kubernetes.client.models.v1alpha3_resource_claim_consumer_reference import V1alpha3ResourceClaimConsumerReference
|
||||
from kubernetes.client.models.v1alpha3_resource_claim_list import V1alpha3ResourceClaimList
|
||||
from kubernetes.client.models.v1alpha3_resource_claim_spec import V1alpha3ResourceClaimSpec
|
||||
from kubernetes.client.models.v1alpha3_resource_claim_status import V1alpha3ResourceClaimStatus
|
||||
from kubernetes.client.models.v1alpha3_resource_claim_template import V1alpha3ResourceClaimTemplate
|
||||
from kubernetes.client.models.v1alpha3_resource_claim_template_list import V1alpha3ResourceClaimTemplateList
|
||||
from kubernetes.client.models.v1alpha3_resource_claim_template_spec import V1alpha3ResourceClaimTemplateSpec
|
||||
from kubernetes.client.models.v1alpha3_resource_pool import V1alpha3ResourcePool
|
||||
from kubernetes.client.models.v1alpha3_resource_slice import V1alpha3ResourceSlice
|
||||
from kubernetes.client.models.v1alpha3_resource_slice_list import V1alpha3ResourceSliceList
|
||||
from kubernetes.client.models.v1alpha3_resource_slice_spec import V1alpha3ResourceSliceSpec
|
||||
from kubernetes.client.models.v1beta1_allocated_device_status import V1beta1AllocatedDeviceStatus
|
||||
from kubernetes.client.models.v1beta1_allocation_result import V1beta1AllocationResult
|
||||
from kubernetes.client.models.v1beta1_audit_annotation import V1beta1AuditAnnotation
|
||||
from kubernetes.client.models.v1beta1_basic_device import V1beta1BasicDevice
|
||||
from kubernetes.client.models.v1beta1_cel_device_selector import V1beta1CELDeviceSelector
|
||||
from kubernetes.client.models.v1beta1_device import V1beta1Device
|
||||
from kubernetes.client.models.v1beta1_device_allocation_configuration import V1beta1DeviceAllocationConfiguration
|
||||
from kubernetes.client.models.v1beta1_device_allocation_result import V1beta1DeviceAllocationResult
|
||||
from kubernetes.client.models.v1beta1_device_attribute import V1beta1DeviceAttribute
|
||||
from kubernetes.client.models.v1beta1_device_capacity import V1beta1DeviceCapacity
|
||||
from kubernetes.client.models.v1beta1_device_claim import V1beta1DeviceClaim
|
||||
from kubernetes.client.models.v1beta1_device_claim_configuration import V1beta1DeviceClaimConfiguration
|
||||
from kubernetes.client.models.v1beta1_device_class import V1beta1DeviceClass
|
||||
from kubernetes.client.models.v1beta1_device_class_configuration import V1beta1DeviceClassConfiguration
|
||||
from kubernetes.client.models.v1beta1_device_class_list import V1beta1DeviceClassList
|
||||
from kubernetes.client.models.v1beta1_device_class_spec import V1beta1DeviceClassSpec
|
||||
from kubernetes.client.models.v1beta1_device_constraint import V1beta1DeviceConstraint
|
||||
from kubernetes.client.models.v1beta1_device_request import V1beta1DeviceRequest
|
||||
from kubernetes.client.models.v1beta1_device_request_allocation_result import V1beta1DeviceRequestAllocationResult
|
||||
from kubernetes.client.models.v1beta1_device_selector import V1beta1DeviceSelector
|
||||
from kubernetes.client.models.v1beta1_expression_warning import V1beta1ExpressionWarning
|
||||
from kubernetes.client.models.v1beta1_ip_address import V1beta1IPAddress
|
||||
from kubernetes.client.models.v1beta1_ip_address_list import V1beta1IPAddressList
|
||||
from kubernetes.client.models.v1beta1_ip_address_spec import V1beta1IPAddressSpec
|
||||
from kubernetes.client.models.v1beta1_match_condition import V1beta1MatchCondition
|
||||
from kubernetes.client.models.v1beta1_match_resources import V1beta1MatchResources
|
||||
from kubernetes.client.models.v1beta1_named_rule_with_operations import V1beta1NamedRuleWithOperations
|
||||
from kubernetes.client.models.v1beta1_network_device_data import V1beta1NetworkDeviceData
|
||||
from kubernetes.client.models.v1beta1_opaque_device_configuration import V1beta1OpaqueDeviceConfiguration
|
||||
from kubernetes.client.models.v1beta1_param_kind import V1beta1ParamKind
|
||||
from kubernetes.client.models.v1beta1_param_ref import V1beta1ParamRef
|
||||
from kubernetes.client.models.v1beta1_parent_reference import V1beta1ParentReference
|
||||
from kubernetes.client.models.v1beta1_resource_claim import V1beta1ResourceClaim
|
||||
from kubernetes.client.models.v1beta1_resource_claim_consumer_reference import V1beta1ResourceClaimConsumerReference
|
||||
from kubernetes.client.models.v1beta1_resource_claim_list import V1beta1ResourceClaimList
|
||||
from kubernetes.client.models.v1beta1_resource_claim_spec import V1beta1ResourceClaimSpec
|
||||
from kubernetes.client.models.v1beta1_resource_claim_status import V1beta1ResourceClaimStatus
|
||||
from kubernetes.client.models.v1beta1_resource_claim_template import V1beta1ResourceClaimTemplate
|
||||
from kubernetes.client.models.v1beta1_resource_claim_template_list import V1beta1ResourceClaimTemplateList
|
||||
from kubernetes.client.models.v1beta1_resource_claim_template_spec import V1beta1ResourceClaimTemplateSpec
|
||||
from kubernetes.client.models.v1beta1_resource_pool import V1beta1ResourcePool
|
||||
from kubernetes.client.models.v1beta1_resource_slice import V1beta1ResourceSlice
|
||||
from kubernetes.client.models.v1beta1_resource_slice_list import V1beta1ResourceSliceList
|
||||
from kubernetes.client.models.v1beta1_resource_slice_spec import V1beta1ResourceSliceSpec
|
||||
from kubernetes.client.models.v1beta1_self_subject_review import V1beta1SelfSubjectReview
|
||||
from kubernetes.client.models.v1beta1_self_subject_review_status import V1beta1SelfSubjectReviewStatus
|
||||
from kubernetes.client.models.v1beta1_service_cidr import V1beta1ServiceCIDR
|
||||
from kubernetes.client.models.v1beta1_service_cidr_list import V1beta1ServiceCIDRList
|
||||
from kubernetes.client.models.v1beta1_service_cidr_spec import V1beta1ServiceCIDRSpec
|
||||
from kubernetes.client.models.v1beta1_service_cidr_status import V1beta1ServiceCIDRStatus
|
||||
from kubernetes.client.models.v1beta1_type_checking import V1beta1TypeChecking
|
||||
from kubernetes.client.models.v1beta1_validating_admission_policy import V1beta1ValidatingAdmissionPolicy
|
||||
from kubernetes.client.models.v1beta1_validating_admission_policy_binding import V1beta1ValidatingAdmissionPolicyBinding
|
||||
from kubernetes.client.models.v1beta1_validating_admission_policy_binding_list import V1beta1ValidatingAdmissionPolicyBindingList
|
||||
from kubernetes.client.models.v1beta1_validating_admission_policy_binding_spec import V1beta1ValidatingAdmissionPolicyBindingSpec
|
||||
from kubernetes.client.models.v1beta1_validating_admission_policy_list import V1beta1ValidatingAdmissionPolicyList
|
||||
from kubernetes.client.models.v1beta1_validating_admission_policy_spec import V1beta1ValidatingAdmissionPolicySpec
|
||||
from kubernetes.client.models.v1beta1_validating_admission_policy_status import V1beta1ValidatingAdmissionPolicyStatus
|
||||
from kubernetes.client.models.v1beta1_validation import V1beta1Validation
|
||||
from kubernetes.client.models.v1beta1_variable import V1beta1Variable
|
||||
from kubernetes.client.models.v1beta1_volume_attributes_class import V1beta1VolumeAttributesClass
|
||||
from kubernetes.client.models.v1beta1_volume_attributes_class_list import V1beta1VolumeAttributesClassList
|
||||
from kubernetes.client.models.v2_container_resource_metric_source import V2ContainerResourceMetricSource
|
||||
from kubernetes.client.models.v2_container_resource_metric_status import V2ContainerResourceMetricStatus
|
||||
from kubernetes.client.models.v2_cross_version_object_reference import V2CrossVersionObjectReference
|
||||
from kubernetes.client.models.v2_external_metric_source import V2ExternalMetricSource
|
||||
from kubernetes.client.models.v2_external_metric_status import V2ExternalMetricStatus
|
||||
from kubernetes.client.models.v2_hpa_scaling_policy import V2HPAScalingPolicy
|
||||
from kubernetes.client.models.v2_hpa_scaling_rules import V2HPAScalingRules
|
||||
from kubernetes.client.models.v2_horizontal_pod_autoscaler import V2HorizontalPodAutoscaler
|
||||
from kubernetes.client.models.v2_horizontal_pod_autoscaler_behavior import V2HorizontalPodAutoscalerBehavior
|
||||
from kubernetes.client.models.v2_horizontal_pod_autoscaler_condition import V2HorizontalPodAutoscalerCondition
|
||||
from kubernetes.client.models.v2_horizontal_pod_autoscaler_list import V2HorizontalPodAutoscalerList
|
||||
from kubernetes.client.models.v2_horizontal_pod_autoscaler_spec import V2HorizontalPodAutoscalerSpec
|
||||
from kubernetes.client.models.v2_horizontal_pod_autoscaler_status import V2HorizontalPodAutoscalerStatus
|
||||
from kubernetes.client.models.v2_metric_identifier import V2MetricIdentifier
|
||||
from kubernetes.client.models.v2_metric_spec import V2MetricSpec
|
||||
from kubernetes.client.models.v2_metric_status import V2MetricStatus
|
||||
from kubernetes.client.models.v2_metric_target import V2MetricTarget
|
||||
from kubernetes.client.models.v2_metric_value_status import V2MetricValueStatus
|
||||
from kubernetes.client.models.v2_object_metric_source import V2ObjectMetricSource
|
||||
from kubernetes.client.models.v2_object_metric_status import V2ObjectMetricStatus
|
||||
from kubernetes.client.models.v2_pods_metric_source import V2PodsMetricSource
|
||||
from kubernetes.client.models.v2_pods_metric_status import V2PodsMetricStatus
|
||||
from kubernetes.client.models.v2_resource_metric_source import V2ResourceMetricSource
|
||||
from kubernetes.client.models.v2_resource_metric_status import V2ResourceMetricStatus
|
||||
from kubernetes.client.models.version_info import VersionInfo
|
||||
|
67
kubernetes/client/api/__init__.py
Normal file
67
kubernetes/client/api/__init__.py
Normal file
|
@ -0,0 +1,67 @@
|
|||
from __future__ import absolute_import
|
||||
|
||||
# flake8: noqa
|
||||
|
||||
# import apis into api package
|
||||
from kubernetes.client.api.well_known_api import WellKnownApi
|
||||
from kubernetes.client.api.admissionregistration_api import AdmissionregistrationApi
|
||||
from kubernetes.client.api.admissionregistration_v1_api import AdmissionregistrationV1Api
|
||||
from kubernetes.client.api.admissionregistration_v1alpha1_api import AdmissionregistrationV1alpha1Api
|
||||
from kubernetes.client.api.admissionregistration_v1beta1_api import AdmissionregistrationV1beta1Api
|
||||
from kubernetes.client.api.apiextensions_api import ApiextensionsApi
|
||||
from kubernetes.client.api.apiextensions_v1_api import ApiextensionsV1Api
|
||||
from kubernetes.client.api.apiregistration_api import ApiregistrationApi
|
||||
from kubernetes.client.api.apiregistration_v1_api import ApiregistrationV1Api
|
||||
from kubernetes.client.api.apis_api import ApisApi
|
||||
from kubernetes.client.api.apps_api import AppsApi
|
||||
from kubernetes.client.api.apps_v1_api import AppsV1Api
|
||||
from kubernetes.client.api.authentication_api import AuthenticationApi
|
||||
from kubernetes.client.api.authentication_v1_api import AuthenticationV1Api
|
||||
from kubernetes.client.api.authentication_v1beta1_api import AuthenticationV1beta1Api
|
||||
from kubernetes.client.api.authorization_api import AuthorizationApi
|
||||
from kubernetes.client.api.authorization_v1_api import AuthorizationV1Api
|
||||
from kubernetes.client.api.autoscaling_api import AutoscalingApi
|
||||
from kubernetes.client.api.autoscaling_v1_api import AutoscalingV1Api
|
||||
from kubernetes.client.api.autoscaling_v2_api import AutoscalingV2Api
|
||||
from kubernetes.client.api.batch_api import BatchApi
|
||||
from kubernetes.client.api.batch_v1_api import BatchV1Api
|
||||
from kubernetes.client.api.certificates_api import CertificatesApi
|
||||
from kubernetes.client.api.certificates_v1_api import CertificatesV1Api
|
||||
from kubernetes.client.api.certificates_v1alpha1_api import CertificatesV1alpha1Api
|
||||
from kubernetes.client.api.coordination_api import CoordinationApi
|
||||
from kubernetes.client.api.coordination_v1_api import CoordinationV1Api
|
||||
from kubernetes.client.api.coordination_v1alpha2_api import CoordinationV1alpha2Api
|
||||
from kubernetes.client.api.core_api import CoreApi
|
||||
from kubernetes.client.api.core_v1_api import CoreV1Api
|
||||
from kubernetes.client.api.custom_objects_api import CustomObjectsApi
|
||||
from kubernetes.client.api.discovery_api import DiscoveryApi
|
||||
from kubernetes.client.api.discovery_v1_api import DiscoveryV1Api
|
||||
from kubernetes.client.api.events_api import EventsApi
|
||||
from kubernetes.client.api.events_v1_api import EventsV1Api
|
||||
from kubernetes.client.api.flowcontrol_apiserver_api import FlowcontrolApiserverApi
|
||||
from kubernetes.client.api.flowcontrol_apiserver_v1_api import FlowcontrolApiserverV1Api
|
||||
from kubernetes.client.api.internal_apiserver_api import InternalApiserverApi
|
||||
from kubernetes.client.api.internal_apiserver_v1alpha1_api import InternalApiserverV1alpha1Api
|
||||
from kubernetes.client.api.logs_api import LogsApi
|
||||
from kubernetes.client.api.networking_api import NetworkingApi
|
||||
from kubernetes.client.api.networking_v1_api import NetworkingV1Api
|
||||
from kubernetes.client.api.networking_v1beta1_api import NetworkingV1beta1Api
|
||||
from kubernetes.client.api.node_api import NodeApi
|
||||
from kubernetes.client.api.node_v1_api import NodeV1Api
|
||||
from kubernetes.client.api.openid_api import OpenidApi
|
||||
from kubernetes.client.api.policy_api import PolicyApi
|
||||
from kubernetes.client.api.policy_v1_api import PolicyV1Api
|
||||
from kubernetes.client.api.rbac_authorization_api import RbacAuthorizationApi
|
||||
from kubernetes.client.api.rbac_authorization_v1_api import RbacAuthorizationV1Api
|
||||
from kubernetes.client.api.resource_api import ResourceApi
|
||||
from kubernetes.client.api.resource_v1alpha3_api import ResourceV1alpha3Api
|
||||
from kubernetes.client.api.resource_v1beta1_api import ResourceV1beta1Api
|
||||
from kubernetes.client.api.scheduling_api import SchedulingApi
|
||||
from kubernetes.client.api.scheduling_v1_api import SchedulingV1Api
|
||||
from kubernetes.client.api.storage_api import StorageApi
|
||||
from kubernetes.client.api.storage_v1_api import StorageV1Api
|
||||
from kubernetes.client.api.storage_v1alpha1_api import StorageV1alpha1Api
|
||||
from kubernetes.client.api.storage_v1beta1_api import StorageV1beta1Api
|
||||
from kubernetes.client.api.storagemigration_api import StoragemigrationApi
|
||||
from kubernetes.client.api.storagemigration_v1alpha1_api import StoragemigrationV1alpha1Api
|
||||
from kubernetes.client.api.version_api import VersionApi
|
142
kubernetes/client/api/admissionregistration_api.py
Normal file
142
kubernetes/client/api/admissionregistration_api.py
Normal file
|
@ -0,0 +1,142 @@
|
|||
# coding: utf-8
|
||||
|
||||
"""
|
||||
Kubernetes
|
||||
|
||||
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
|
||||
|
||||
The version of the OpenAPI document: release-1.32
|
||||
Generated by: https://openapi-generator.tech
|
||||
"""
|
||||
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import re # noqa: F401
|
||||
|
||||
# python 2 and python 3 compatibility library
|
||||
import six
|
||||
|
||||
from kubernetes.client.api_client import ApiClient
|
||||
from kubernetes.client.exceptions import ( # noqa: F401
|
||||
ApiTypeError,
|
||||
ApiValueError
|
||||
)
|
||||
|
||||
|
||||
class AdmissionregistrationApi(object):
|
||||
"""NOTE: This class is auto generated by OpenAPI Generator
|
||||
Ref: https://openapi-generator.tech
|
||||
|
||||
Do not edit the class manually.
|
||||
"""
|
||||
|
||||
def __init__(self, api_client=None):
|
||||
if api_client is None:
|
||||
api_client = ApiClient()
|
||||
self.api_client = api_client
|
||||
|
||||
def get_api_group(self, **kwargs): # noqa: E501
|
||||
"""get_api_group # noqa: E501
|
||||
|
||||
get information of a group # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_group(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: V1APIGroup
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
kwargs['_return_http_data_only'] = True
|
||||
return self.get_api_group_with_http_info(**kwargs) # noqa: E501
|
||||
|
||||
def get_api_group_with_http_info(self, **kwargs): # noqa: E501
|
||||
"""get_api_group # noqa: E501
|
||||
|
||||
get information of a group # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_group_with_http_info(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _return_http_data_only: response data without head status code
|
||||
and headers
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
|
||||
local_var_params = locals()
|
||||
|
||||
all_params = [
|
||||
]
|
||||
all_params.extend(
|
||||
[
|
||||
'async_req',
|
||||
'_return_http_data_only',
|
||||
'_preload_content',
|
||||
'_request_timeout'
|
||||
]
|
||||
)
|
||||
|
||||
for key, val in six.iteritems(local_var_params['kwargs']):
|
||||
if key not in all_params:
|
||||
raise ApiTypeError(
|
||||
"Got an unexpected keyword argument '%s'"
|
||||
" to method get_api_group" % key
|
||||
)
|
||||
local_var_params[key] = val
|
||||
del local_var_params['kwargs']
|
||||
|
||||
collection_formats = {}
|
||||
|
||||
path_params = {}
|
||||
|
||||
query_params = []
|
||||
|
||||
header_params = {}
|
||||
|
||||
form_params = []
|
||||
local_var_files = {}
|
||||
|
||||
body_params = None
|
||||
# HTTP header `Accept`
|
||||
header_params['Accept'] = self.api_client.select_header_accept(
|
||||
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
|
||||
|
||||
# Authentication setting
|
||||
auth_settings = ['BearerToken'] # noqa: E501
|
||||
|
||||
return self.api_client.call_api(
|
||||
'/apis/admissionregistration.k8s.io/', 'GET',
|
||||
path_params,
|
||||
query_params,
|
||||
header_params,
|
||||
body=body_params,
|
||||
post_params=form_params,
|
||||
files=local_var_files,
|
||||
response_type='V1APIGroup', # noqa: E501
|
||||
auth_settings=auth_settings,
|
||||
async_req=local_var_params.get('async_req'),
|
||||
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
|
||||
_preload_content=local_var_params.get('_preload_content', True),
|
||||
_request_timeout=local_var_params.get('_request_timeout'),
|
||||
collection_formats=collection_formats)
|
4704
kubernetes/client/api/admissionregistration_v1_api.py
Normal file
4704
kubernetes/client/api/admissionregistration_v1_api.py
Normal file
File diff suppressed because it is too large
Load diff
2216
kubernetes/client/api/admissionregistration_v1alpha1_api.py
Normal file
2216
kubernetes/client/api/admissionregistration_v1alpha1_api.py
Normal file
File diff suppressed because it is too large
Load diff
2630
kubernetes/client/api/admissionregistration_v1beta1_api.py
Normal file
2630
kubernetes/client/api/admissionregistration_v1beta1_api.py
Normal file
File diff suppressed because it is too large
Load diff
142
kubernetes/client/api/apiextensions_api.py
Normal file
142
kubernetes/client/api/apiextensions_api.py
Normal file
|
@ -0,0 +1,142 @@
|
|||
# coding: utf-8
|
||||
|
||||
"""
|
||||
Kubernetes
|
||||
|
||||
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
|
||||
|
||||
The version of the OpenAPI document: release-1.32
|
||||
Generated by: https://openapi-generator.tech
|
||||
"""
|
||||
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import re # noqa: F401
|
||||
|
||||
# python 2 and python 3 compatibility library
|
||||
import six
|
||||
|
||||
from kubernetes.client.api_client import ApiClient
|
||||
from kubernetes.client.exceptions import ( # noqa: F401
|
||||
ApiTypeError,
|
||||
ApiValueError
|
||||
)
|
||||
|
||||
|
||||
class ApiextensionsApi(object):
|
||||
"""NOTE: This class is auto generated by OpenAPI Generator
|
||||
Ref: https://openapi-generator.tech
|
||||
|
||||
Do not edit the class manually.
|
||||
"""
|
||||
|
||||
def __init__(self, api_client=None):
|
||||
if api_client is None:
|
||||
api_client = ApiClient()
|
||||
self.api_client = api_client
|
||||
|
||||
def get_api_group(self, **kwargs): # noqa: E501
|
||||
"""get_api_group # noqa: E501
|
||||
|
||||
get information of a group # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_group(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: V1APIGroup
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
kwargs['_return_http_data_only'] = True
|
||||
return self.get_api_group_with_http_info(**kwargs) # noqa: E501
|
||||
|
||||
def get_api_group_with_http_info(self, **kwargs): # noqa: E501
|
||||
"""get_api_group # noqa: E501
|
||||
|
||||
get information of a group # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_group_with_http_info(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _return_http_data_only: response data without head status code
|
||||
and headers
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
|
||||
local_var_params = locals()
|
||||
|
||||
all_params = [
|
||||
]
|
||||
all_params.extend(
|
||||
[
|
||||
'async_req',
|
||||
'_return_http_data_only',
|
||||
'_preload_content',
|
||||
'_request_timeout'
|
||||
]
|
||||
)
|
||||
|
||||
for key, val in six.iteritems(local_var_params['kwargs']):
|
||||
if key not in all_params:
|
||||
raise ApiTypeError(
|
||||
"Got an unexpected keyword argument '%s'"
|
||||
" to method get_api_group" % key
|
||||
)
|
||||
local_var_params[key] = val
|
||||
del local_var_params['kwargs']
|
||||
|
||||
collection_formats = {}
|
||||
|
||||
path_params = {}
|
||||
|
||||
query_params = []
|
||||
|
||||
header_params = {}
|
||||
|
||||
form_params = []
|
||||
local_var_files = {}
|
||||
|
||||
body_params = None
|
||||
# HTTP header `Accept`
|
||||
header_params['Accept'] = self.api_client.select_header_accept(
|
||||
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
|
||||
|
||||
# Authentication setting
|
||||
auth_settings = ['BearerToken'] # noqa: E501
|
||||
|
||||
return self.api_client.call_api(
|
||||
'/apis/apiextensions.k8s.io/', 'GET',
|
||||
path_params,
|
||||
query_params,
|
||||
header_params,
|
||||
body=body_params,
|
||||
post_params=form_params,
|
||||
files=local_var_files,
|
||||
response_type='V1APIGroup', # noqa: E501
|
||||
auth_settings=auth_settings,
|
||||
async_req=local_var_params.get('async_req'),
|
||||
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
|
||||
_preload_content=local_var_params.get('_preload_content', True),
|
||||
_request_timeout=local_var_params.get('_request_timeout'),
|
||||
collection_formats=collection_formats)
|
1593
kubernetes/client/api/apiextensions_v1_api.py
Normal file
1593
kubernetes/client/api/apiextensions_v1_api.py
Normal file
File diff suppressed because it is too large
Load diff
142
kubernetes/client/api/apiregistration_api.py
Normal file
142
kubernetes/client/api/apiregistration_api.py
Normal file
|
@ -0,0 +1,142 @@
|
|||
# coding: utf-8
|
||||
|
||||
"""
|
||||
Kubernetes
|
||||
|
||||
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
|
||||
|
||||
The version of the OpenAPI document: release-1.32
|
||||
Generated by: https://openapi-generator.tech
|
||||
"""
|
||||
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import re # noqa: F401
|
||||
|
||||
# python 2 and python 3 compatibility library
|
||||
import six
|
||||
|
||||
from kubernetes.client.api_client import ApiClient
|
||||
from kubernetes.client.exceptions import ( # noqa: F401
|
||||
ApiTypeError,
|
||||
ApiValueError
|
||||
)
|
||||
|
||||
|
||||
class ApiregistrationApi(object):
|
||||
"""NOTE: This class is auto generated by OpenAPI Generator
|
||||
Ref: https://openapi-generator.tech
|
||||
|
||||
Do not edit the class manually.
|
||||
"""
|
||||
|
||||
def __init__(self, api_client=None):
|
||||
if api_client is None:
|
||||
api_client = ApiClient()
|
||||
self.api_client = api_client
|
||||
|
||||
def get_api_group(self, **kwargs): # noqa: E501
|
||||
"""get_api_group # noqa: E501
|
||||
|
||||
get information of a group # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_group(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: V1APIGroup
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
kwargs['_return_http_data_only'] = True
|
||||
return self.get_api_group_with_http_info(**kwargs) # noqa: E501
|
||||
|
||||
def get_api_group_with_http_info(self, **kwargs): # noqa: E501
|
||||
"""get_api_group # noqa: E501
|
||||
|
||||
get information of a group # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_group_with_http_info(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _return_http_data_only: response data without head status code
|
||||
and headers
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
|
||||
local_var_params = locals()
|
||||
|
||||
all_params = [
|
||||
]
|
||||
all_params.extend(
|
||||
[
|
||||
'async_req',
|
||||
'_return_http_data_only',
|
||||
'_preload_content',
|
||||
'_request_timeout'
|
||||
]
|
||||
)
|
||||
|
||||
for key, val in six.iteritems(local_var_params['kwargs']):
|
||||
if key not in all_params:
|
||||
raise ApiTypeError(
|
||||
"Got an unexpected keyword argument '%s'"
|
||||
" to method get_api_group" % key
|
||||
)
|
||||
local_var_params[key] = val
|
||||
del local_var_params['kwargs']
|
||||
|
||||
collection_formats = {}
|
||||
|
||||
path_params = {}
|
||||
|
||||
query_params = []
|
||||
|
||||
header_params = {}
|
||||
|
||||
form_params = []
|
||||
local_var_files = {}
|
||||
|
||||
body_params = None
|
||||
# HTTP header `Accept`
|
||||
header_params['Accept'] = self.api_client.select_header_accept(
|
||||
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
|
||||
|
||||
# Authentication setting
|
||||
auth_settings = ['BearerToken'] # noqa: E501
|
||||
|
||||
return self.api_client.call_api(
|
||||
'/apis/apiregistration.k8s.io/', 'GET',
|
||||
path_params,
|
||||
query_params,
|
||||
header_params,
|
||||
body=body_params,
|
||||
post_params=form_params,
|
||||
files=local_var_files,
|
||||
response_type='V1APIGroup', # noqa: E501
|
||||
auth_settings=auth_settings,
|
||||
async_req=local_var_params.get('async_req'),
|
||||
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
|
||||
_preload_content=local_var_params.get('_preload_content', True),
|
||||
_request_timeout=local_var_params.get('_request_timeout'),
|
||||
collection_formats=collection_formats)
|
1593
kubernetes/client/api/apiregistration_v1_api.py
Normal file
1593
kubernetes/client/api/apiregistration_v1_api.py
Normal file
File diff suppressed because it is too large
Load diff
142
kubernetes/client/api/apis_api.py
Normal file
142
kubernetes/client/api/apis_api.py
Normal file
|
@ -0,0 +1,142 @@
|
|||
# coding: utf-8
|
||||
|
||||
"""
|
||||
Kubernetes
|
||||
|
||||
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
|
||||
|
||||
The version of the OpenAPI document: release-1.32
|
||||
Generated by: https://openapi-generator.tech
|
||||
"""
|
||||
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import re # noqa: F401
|
||||
|
||||
# python 2 and python 3 compatibility library
|
||||
import six
|
||||
|
||||
from kubernetes.client.api_client import ApiClient
|
||||
from kubernetes.client.exceptions import ( # noqa: F401
|
||||
ApiTypeError,
|
||||
ApiValueError
|
||||
)
|
||||
|
||||
|
||||
class ApisApi(object):
|
||||
"""NOTE: This class is auto generated by OpenAPI Generator
|
||||
Ref: https://openapi-generator.tech
|
||||
|
||||
Do not edit the class manually.
|
||||
"""
|
||||
|
||||
def __init__(self, api_client=None):
|
||||
if api_client is None:
|
||||
api_client = ApiClient()
|
||||
self.api_client = api_client
|
||||
|
||||
def get_api_versions(self, **kwargs): # noqa: E501
|
||||
"""get_api_versions # noqa: E501
|
||||
|
||||
get available API versions # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_versions(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: V1APIGroupList
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
kwargs['_return_http_data_only'] = True
|
||||
return self.get_api_versions_with_http_info(**kwargs) # noqa: E501
|
||||
|
||||
def get_api_versions_with_http_info(self, **kwargs): # noqa: E501
|
||||
"""get_api_versions # noqa: E501
|
||||
|
||||
get available API versions # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_versions_with_http_info(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _return_http_data_only: response data without head status code
|
||||
and headers
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: tuple(V1APIGroupList, status_code(int), headers(HTTPHeaderDict))
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
|
||||
local_var_params = locals()
|
||||
|
||||
all_params = [
|
||||
]
|
||||
all_params.extend(
|
||||
[
|
||||
'async_req',
|
||||
'_return_http_data_only',
|
||||
'_preload_content',
|
||||
'_request_timeout'
|
||||
]
|
||||
)
|
||||
|
||||
for key, val in six.iteritems(local_var_params['kwargs']):
|
||||
if key not in all_params:
|
||||
raise ApiTypeError(
|
||||
"Got an unexpected keyword argument '%s'"
|
||||
" to method get_api_versions" % key
|
||||
)
|
||||
local_var_params[key] = val
|
||||
del local_var_params['kwargs']
|
||||
|
||||
collection_formats = {}
|
||||
|
||||
path_params = {}
|
||||
|
||||
query_params = []
|
||||
|
||||
header_params = {}
|
||||
|
||||
form_params = []
|
||||
local_var_files = {}
|
||||
|
||||
body_params = None
|
||||
# HTTP header `Accept`
|
||||
header_params['Accept'] = self.api_client.select_header_accept(
|
||||
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
|
||||
|
||||
# Authentication setting
|
||||
auth_settings = ['BearerToken'] # noqa: E501
|
||||
|
||||
return self.api_client.call_api(
|
||||
'/apis/', 'GET',
|
||||
path_params,
|
||||
query_params,
|
||||
header_params,
|
||||
body=body_params,
|
||||
post_params=form_params,
|
||||
files=local_var_files,
|
||||
response_type='V1APIGroupList', # noqa: E501
|
||||
auth_settings=auth_settings,
|
||||
async_req=local_var_params.get('async_req'),
|
||||
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
|
||||
_preload_content=local_var_params.get('_preload_content', True),
|
||||
_request_timeout=local_var_params.get('_request_timeout'),
|
||||
collection_formats=collection_formats)
|
142
kubernetes/client/api/apps_api.py
Normal file
142
kubernetes/client/api/apps_api.py
Normal file
|
@ -0,0 +1,142 @@
|
|||
# coding: utf-8
|
||||
|
||||
"""
|
||||
Kubernetes
|
||||
|
||||
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
|
||||
|
||||
The version of the OpenAPI document: release-1.32
|
||||
Generated by: https://openapi-generator.tech
|
||||
"""
|
||||
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import re # noqa: F401
|
||||
|
||||
# python 2 and python 3 compatibility library
|
||||
import six
|
||||
|
||||
from kubernetes.client.api_client import ApiClient
|
||||
from kubernetes.client.exceptions import ( # noqa: F401
|
||||
ApiTypeError,
|
||||
ApiValueError
|
||||
)
|
||||
|
||||
|
||||
class AppsApi(object):
|
||||
"""NOTE: This class is auto generated by OpenAPI Generator
|
||||
Ref: https://openapi-generator.tech
|
||||
|
||||
Do not edit the class manually.
|
||||
"""
|
||||
|
||||
def __init__(self, api_client=None):
|
||||
if api_client is None:
|
||||
api_client = ApiClient()
|
||||
self.api_client = api_client
|
||||
|
||||
def get_api_group(self, **kwargs): # noqa: E501
|
||||
"""get_api_group # noqa: E501
|
||||
|
||||
get information of a group # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_group(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: V1APIGroup
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
kwargs['_return_http_data_only'] = True
|
||||
return self.get_api_group_with_http_info(**kwargs) # noqa: E501
|
||||
|
||||
def get_api_group_with_http_info(self, **kwargs): # noqa: E501
|
||||
"""get_api_group # noqa: E501
|
||||
|
||||
get information of a group # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_group_with_http_info(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _return_http_data_only: response data without head status code
|
||||
and headers
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
|
||||
local_var_params = locals()
|
||||
|
||||
all_params = [
|
||||
]
|
||||
all_params.extend(
|
||||
[
|
||||
'async_req',
|
||||
'_return_http_data_only',
|
||||
'_preload_content',
|
||||
'_request_timeout'
|
||||
]
|
||||
)
|
||||
|
||||
for key, val in six.iteritems(local_var_params['kwargs']):
|
||||
if key not in all_params:
|
||||
raise ApiTypeError(
|
||||
"Got an unexpected keyword argument '%s'"
|
||||
" to method get_api_group" % key
|
||||
)
|
||||
local_var_params[key] = val
|
||||
del local_var_params['kwargs']
|
||||
|
||||
collection_formats = {}
|
||||
|
||||
path_params = {}
|
||||
|
||||
query_params = []
|
||||
|
||||
header_params = {}
|
||||
|
||||
form_params = []
|
||||
local_var_files = {}
|
||||
|
||||
body_params = None
|
||||
# HTTP header `Accept`
|
||||
header_params['Accept'] = self.api_client.select_header_accept(
|
||||
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
|
||||
|
||||
# Authentication setting
|
||||
auth_settings = ['BearerToken'] # noqa: E501
|
||||
|
||||
return self.api_client.call_api(
|
||||
'/apis/apps/', 'GET',
|
||||
path_params,
|
||||
query_params,
|
||||
header_params,
|
||||
body=body_params,
|
||||
post_params=form_params,
|
||||
files=local_var_files,
|
||||
response_type='V1APIGroup', # noqa: E501
|
||||
auth_settings=auth_settings,
|
||||
async_req=local_var_params.get('async_req'),
|
||||
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
|
||||
_preload_content=local_var_params.get('_preload_content', True),
|
||||
_request_timeout=local_var_params.get('_request_timeout'),
|
||||
collection_formats=collection_formats)
|
9529
kubernetes/client/api/apps_v1_api.py
Normal file
9529
kubernetes/client/api/apps_v1_api.py
Normal file
File diff suppressed because it is too large
Load diff
142
kubernetes/client/api/authentication_api.py
Normal file
142
kubernetes/client/api/authentication_api.py
Normal file
|
@ -0,0 +1,142 @@
|
|||
# coding: utf-8
|
||||
|
||||
"""
|
||||
Kubernetes
|
||||
|
||||
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
|
||||
|
||||
The version of the OpenAPI document: release-1.32
|
||||
Generated by: https://openapi-generator.tech
|
||||
"""
|
||||
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import re # noqa: F401
|
||||
|
||||
# python 2 and python 3 compatibility library
|
||||
import six
|
||||
|
||||
from kubernetes.client.api_client import ApiClient
|
||||
from kubernetes.client.exceptions import ( # noqa: F401
|
||||
ApiTypeError,
|
||||
ApiValueError
|
||||
)
|
||||
|
||||
|
||||
class AuthenticationApi(object):
|
||||
"""NOTE: This class is auto generated by OpenAPI Generator
|
||||
Ref: https://openapi-generator.tech
|
||||
|
||||
Do not edit the class manually.
|
||||
"""
|
||||
|
||||
def __init__(self, api_client=None):
|
||||
if api_client is None:
|
||||
api_client = ApiClient()
|
||||
self.api_client = api_client
|
||||
|
||||
def get_api_group(self, **kwargs): # noqa: E501
|
||||
"""get_api_group # noqa: E501
|
||||
|
||||
get information of a group # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_group(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: V1APIGroup
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
kwargs['_return_http_data_only'] = True
|
||||
return self.get_api_group_with_http_info(**kwargs) # noqa: E501
|
||||
|
||||
def get_api_group_with_http_info(self, **kwargs): # noqa: E501
|
||||
"""get_api_group # noqa: E501
|
||||
|
||||
get information of a group # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_group_with_http_info(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _return_http_data_only: response data without head status code
|
||||
and headers
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
|
||||
local_var_params = locals()
|
||||
|
||||
all_params = [
|
||||
]
|
||||
all_params.extend(
|
||||
[
|
||||
'async_req',
|
||||
'_return_http_data_only',
|
||||
'_preload_content',
|
||||
'_request_timeout'
|
||||
]
|
||||
)
|
||||
|
||||
for key, val in six.iteritems(local_var_params['kwargs']):
|
||||
if key not in all_params:
|
||||
raise ApiTypeError(
|
||||
"Got an unexpected keyword argument '%s'"
|
||||
" to method get_api_group" % key
|
||||
)
|
||||
local_var_params[key] = val
|
||||
del local_var_params['kwargs']
|
||||
|
||||
collection_formats = {}
|
||||
|
||||
path_params = {}
|
||||
|
||||
query_params = []
|
||||
|
||||
header_params = {}
|
||||
|
||||
form_params = []
|
||||
local_var_files = {}
|
||||
|
||||
body_params = None
|
||||
# HTTP header `Accept`
|
||||
header_params['Accept'] = self.api_client.select_header_accept(
|
||||
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
|
||||
|
||||
# Authentication setting
|
||||
auth_settings = ['BearerToken'] # noqa: E501
|
||||
|
||||
return self.api_client.call_api(
|
||||
'/apis/authentication.k8s.io/', 'GET',
|
||||
path_params,
|
||||
query_params,
|
||||
header_params,
|
||||
body=body_params,
|
||||
post_params=form_params,
|
||||
files=local_var_files,
|
||||
response_type='V1APIGroup', # noqa: E501
|
||||
auth_settings=auth_settings,
|
||||
async_req=local_var_params.get('async_req'),
|
||||
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
|
||||
_preload_content=local_var_params.get('_preload_content', True),
|
||||
_request_timeout=local_var_params.get('_request_timeout'),
|
||||
collection_formats=collection_formats)
|
410
kubernetes/client/api/authentication_v1_api.py
Normal file
410
kubernetes/client/api/authentication_v1_api.py
Normal file
|
@ -0,0 +1,410 @@
|
|||
# coding: utf-8
|
||||
|
||||
"""
|
||||
Kubernetes
|
||||
|
||||
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
|
||||
|
||||
The version of the OpenAPI document: release-1.32
|
||||
Generated by: https://openapi-generator.tech
|
||||
"""
|
||||
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import re # noqa: F401
|
||||
|
||||
# python 2 and python 3 compatibility library
|
||||
import six
|
||||
|
||||
from kubernetes.client.api_client import ApiClient
|
||||
from kubernetes.client.exceptions import ( # noqa: F401
|
||||
ApiTypeError,
|
||||
ApiValueError
|
||||
)
|
||||
|
||||
|
||||
class AuthenticationV1Api(object):
|
||||
"""NOTE: This class is auto generated by OpenAPI Generator
|
||||
Ref: https://openapi-generator.tech
|
||||
|
||||
Do not edit the class manually.
|
||||
"""
|
||||
|
||||
def __init__(self, api_client=None):
|
||||
if api_client is None:
|
||||
api_client = ApiClient()
|
||||
self.api_client = api_client
|
||||
|
||||
def create_self_subject_review(self, body, **kwargs): # noqa: E501
|
||||
"""create_self_subject_review # noqa: E501
|
||||
|
||||
create a SelfSubjectReview # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.create_self_subject_review(body, async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param V1SelfSubjectReview body: (required)
|
||||
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
|
||||
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
|
||||
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
|
||||
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: V1SelfSubjectReview
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
kwargs['_return_http_data_only'] = True
|
||||
return self.create_self_subject_review_with_http_info(body, **kwargs) # noqa: E501
|
||||
|
||||
def create_self_subject_review_with_http_info(self, body, **kwargs): # noqa: E501
|
||||
"""create_self_subject_review # noqa: E501
|
||||
|
||||
create a SelfSubjectReview # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.create_self_subject_review_with_http_info(body, async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param V1SelfSubjectReview body: (required)
|
||||
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
|
||||
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
|
||||
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
|
||||
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
|
||||
:param _return_http_data_only: response data without head status code
|
||||
and headers
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: tuple(V1SelfSubjectReview, status_code(int), headers(HTTPHeaderDict))
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
|
||||
local_var_params = locals()
|
||||
|
||||
all_params = [
|
||||
'body',
|
||||
'dry_run',
|
||||
'field_manager',
|
||||
'field_validation',
|
||||
'pretty'
|
||||
]
|
||||
all_params.extend(
|
||||
[
|
||||
'async_req',
|
||||
'_return_http_data_only',
|
||||
'_preload_content',
|
||||
'_request_timeout'
|
||||
]
|
||||
)
|
||||
|
||||
for key, val in six.iteritems(local_var_params['kwargs']):
|
||||
if key not in all_params:
|
||||
raise ApiTypeError(
|
||||
"Got an unexpected keyword argument '%s'"
|
||||
" to method create_self_subject_review" % key
|
||||
)
|
||||
local_var_params[key] = val
|
||||
del local_var_params['kwargs']
|
||||
# verify the required parameter 'body' is set
|
||||
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
|
||||
local_var_params['body'] is None): # noqa: E501
|
||||
raise ApiValueError("Missing the required parameter `body` when calling `create_self_subject_review`") # noqa: E501
|
||||
|
||||
collection_formats = {}
|
||||
|
||||
path_params = {}
|
||||
|
||||
query_params = []
|
||||
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
|
||||
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
|
||||
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
|
||||
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
|
||||
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
|
||||
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
|
||||
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
|
||||
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
|
||||
|
||||
header_params = {}
|
||||
|
||||
form_params = []
|
||||
local_var_files = {}
|
||||
|
||||
body_params = None
|
||||
if 'body' in local_var_params:
|
||||
body_params = local_var_params['body']
|
||||
# HTTP header `Accept`
|
||||
header_params['Accept'] = self.api_client.select_header_accept(
|
||||
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
|
||||
|
||||
# Authentication setting
|
||||
auth_settings = ['BearerToken'] # noqa: E501
|
||||
|
||||
return self.api_client.call_api(
|
||||
'/apis/authentication.k8s.io/v1/selfsubjectreviews', 'POST',
|
||||
path_params,
|
||||
query_params,
|
||||
header_params,
|
||||
body=body_params,
|
||||
post_params=form_params,
|
||||
files=local_var_files,
|
||||
response_type='V1SelfSubjectReview', # noqa: E501
|
||||
auth_settings=auth_settings,
|
||||
async_req=local_var_params.get('async_req'),
|
||||
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
|
||||
_preload_content=local_var_params.get('_preload_content', True),
|
||||
_request_timeout=local_var_params.get('_request_timeout'),
|
||||
collection_formats=collection_formats)
|
||||
|
||||
def create_token_review(self, body, **kwargs): # noqa: E501
|
||||
"""create_token_review # noqa: E501
|
||||
|
||||
create a TokenReview # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.create_token_review(body, async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param V1TokenReview body: (required)
|
||||
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
|
||||
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
|
||||
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
|
||||
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: V1TokenReview
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
kwargs['_return_http_data_only'] = True
|
||||
return self.create_token_review_with_http_info(body, **kwargs) # noqa: E501
|
||||
|
||||
def create_token_review_with_http_info(self, body, **kwargs): # noqa: E501
|
||||
"""create_token_review # noqa: E501
|
||||
|
||||
create a TokenReview # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.create_token_review_with_http_info(body, async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param V1TokenReview body: (required)
|
||||
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
|
||||
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
|
||||
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
|
||||
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
|
||||
:param _return_http_data_only: response data without head status code
|
||||
and headers
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: tuple(V1TokenReview, status_code(int), headers(HTTPHeaderDict))
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
|
||||
local_var_params = locals()
|
||||
|
||||
all_params = [
|
||||
'body',
|
||||
'dry_run',
|
||||
'field_manager',
|
||||
'field_validation',
|
||||
'pretty'
|
||||
]
|
||||
all_params.extend(
|
||||
[
|
||||
'async_req',
|
||||
'_return_http_data_only',
|
||||
'_preload_content',
|
||||
'_request_timeout'
|
||||
]
|
||||
)
|
||||
|
||||
for key, val in six.iteritems(local_var_params['kwargs']):
|
||||
if key not in all_params:
|
||||
raise ApiTypeError(
|
||||
"Got an unexpected keyword argument '%s'"
|
||||
" to method create_token_review" % key
|
||||
)
|
||||
local_var_params[key] = val
|
||||
del local_var_params['kwargs']
|
||||
# verify the required parameter 'body' is set
|
||||
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
|
||||
local_var_params['body'] is None): # noqa: E501
|
||||
raise ApiValueError("Missing the required parameter `body` when calling `create_token_review`") # noqa: E501
|
||||
|
||||
collection_formats = {}
|
||||
|
||||
path_params = {}
|
||||
|
||||
query_params = []
|
||||
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
|
||||
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
|
||||
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
|
||||
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
|
||||
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
|
||||
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
|
||||
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
|
||||
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
|
||||
|
||||
header_params = {}
|
||||
|
||||
form_params = []
|
||||
local_var_files = {}
|
||||
|
||||
body_params = None
|
||||
if 'body' in local_var_params:
|
||||
body_params = local_var_params['body']
|
||||
# HTTP header `Accept`
|
||||
header_params['Accept'] = self.api_client.select_header_accept(
|
||||
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
|
||||
|
||||
# Authentication setting
|
||||
auth_settings = ['BearerToken'] # noqa: E501
|
||||
|
||||
return self.api_client.call_api(
|
||||
'/apis/authentication.k8s.io/v1/tokenreviews', 'POST',
|
||||
path_params,
|
||||
query_params,
|
||||
header_params,
|
||||
body=body_params,
|
||||
post_params=form_params,
|
||||
files=local_var_files,
|
||||
response_type='V1TokenReview', # noqa: E501
|
||||
auth_settings=auth_settings,
|
||||
async_req=local_var_params.get('async_req'),
|
||||
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
|
||||
_preload_content=local_var_params.get('_preload_content', True),
|
||||
_request_timeout=local_var_params.get('_request_timeout'),
|
||||
collection_formats=collection_formats)
|
||||
|
||||
def get_api_resources(self, **kwargs): # noqa: E501
|
||||
"""get_api_resources # noqa: E501
|
||||
|
||||
get available resources # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_resources(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: V1APIResourceList
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
kwargs['_return_http_data_only'] = True
|
||||
return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
|
||||
|
||||
def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
|
||||
"""get_api_resources # noqa: E501
|
||||
|
||||
get available resources # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_resources_with_http_info(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _return_http_data_only: response data without head status code
|
||||
and headers
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
|
||||
local_var_params = locals()
|
||||
|
||||
all_params = [
|
||||
]
|
||||
all_params.extend(
|
||||
[
|
||||
'async_req',
|
||||
'_return_http_data_only',
|
||||
'_preload_content',
|
||||
'_request_timeout'
|
||||
]
|
||||
)
|
||||
|
||||
for key, val in six.iteritems(local_var_params['kwargs']):
|
||||
if key not in all_params:
|
||||
raise ApiTypeError(
|
||||
"Got an unexpected keyword argument '%s'"
|
||||
" to method get_api_resources" % key
|
||||
)
|
||||
local_var_params[key] = val
|
||||
del local_var_params['kwargs']
|
||||
|
||||
collection_formats = {}
|
||||
|
||||
path_params = {}
|
||||
|
||||
query_params = []
|
||||
|
||||
header_params = {}
|
||||
|
||||
form_params = []
|
||||
local_var_files = {}
|
||||
|
||||
body_params = None
|
||||
# HTTP header `Accept`
|
||||
header_params['Accept'] = self.api_client.select_header_accept(
|
||||
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
|
||||
|
||||
# Authentication setting
|
||||
auth_settings = ['BearerToken'] # noqa: E501
|
||||
|
||||
return self.api_client.call_api(
|
||||
'/apis/authentication.k8s.io/v1/', 'GET',
|
||||
path_params,
|
||||
query_params,
|
||||
header_params,
|
||||
body=body_params,
|
||||
post_params=form_params,
|
||||
files=local_var_files,
|
||||
response_type='V1APIResourceList', # noqa: E501
|
||||
auth_settings=auth_settings,
|
||||
async_req=local_var_params.get('async_req'),
|
||||
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
|
||||
_preload_content=local_var_params.get('_preload_content', True),
|
||||
_request_timeout=local_var_params.get('_request_timeout'),
|
||||
collection_formats=collection_formats)
|
276
kubernetes/client/api/authentication_v1beta1_api.py
Normal file
276
kubernetes/client/api/authentication_v1beta1_api.py
Normal file
|
@ -0,0 +1,276 @@
|
|||
# coding: utf-8
|
||||
|
||||
"""
|
||||
Kubernetes
|
||||
|
||||
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
|
||||
|
||||
The version of the OpenAPI document: release-1.32
|
||||
Generated by: https://openapi-generator.tech
|
||||
"""
|
||||
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import re # noqa: F401
|
||||
|
||||
# python 2 and python 3 compatibility library
|
||||
import six
|
||||
|
||||
from kubernetes.client.api_client import ApiClient
|
||||
from kubernetes.client.exceptions import ( # noqa: F401
|
||||
ApiTypeError,
|
||||
ApiValueError
|
||||
)
|
||||
|
||||
|
||||
class AuthenticationV1beta1Api(object):
|
||||
"""NOTE: This class is auto generated by OpenAPI Generator
|
||||
Ref: https://openapi-generator.tech
|
||||
|
||||
Do not edit the class manually.
|
||||
"""
|
||||
|
||||
def __init__(self, api_client=None):
|
||||
if api_client is None:
|
||||
api_client = ApiClient()
|
||||
self.api_client = api_client
|
||||
|
||||
def create_self_subject_review(self, body, **kwargs): # noqa: E501
|
||||
"""create_self_subject_review # noqa: E501
|
||||
|
||||
create a SelfSubjectReview # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.create_self_subject_review(body, async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param V1beta1SelfSubjectReview body: (required)
|
||||
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
|
||||
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
|
||||
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
|
||||
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: V1beta1SelfSubjectReview
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
kwargs['_return_http_data_only'] = True
|
||||
return self.create_self_subject_review_with_http_info(body, **kwargs) # noqa: E501
|
||||
|
||||
def create_self_subject_review_with_http_info(self, body, **kwargs): # noqa: E501
|
||||
"""create_self_subject_review # noqa: E501
|
||||
|
||||
create a SelfSubjectReview # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.create_self_subject_review_with_http_info(body, async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param V1beta1SelfSubjectReview body: (required)
|
||||
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
|
||||
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
|
||||
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
|
||||
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
|
||||
:param _return_http_data_only: response data without head status code
|
||||
and headers
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: tuple(V1beta1SelfSubjectReview, status_code(int), headers(HTTPHeaderDict))
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
|
||||
local_var_params = locals()
|
||||
|
||||
all_params = [
|
||||
'body',
|
||||
'dry_run',
|
||||
'field_manager',
|
||||
'field_validation',
|
||||
'pretty'
|
||||
]
|
||||
all_params.extend(
|
||||
[
|
||||
'async_req',
|
||||
'_return_http_data_only',
|
||||
'_preload_content',
|
||||
'_request_timeout'
|
||||
]
|
||||
)
|
||||
|
||||
for key, val in six.iteritems(local_var_params['kwargs']):
|
||||
if key not in all_params:
|
||||
raise ApiTypeError(
|
||||
"Got an unexpected keyword argument '%s'"
|
||||
" to method create_self_subject_review" % key
|
||||
)
|
||||
local_var_params[key] = val
|
||||
del local_var_params['kwargs']
|
||||
# verify the required parameter 'body' is set
|
||||
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
|
||||
local_var_params['body'] is None): # noqa: E501
|
||||
raise ApiValueError("Missing the required parameter `body` when calling `create_self_subject_review`") # noqa: E501
|
||||
|
||||
collection_formats = {}
|
||||
|
||||
path_params = {}
|
||||
|
||||
query_params = []
|
||||
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
|
||||
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
|
||||
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
|
||||
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
|
||||
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
|
||||
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
|
||||
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
|
||||
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
|
||||
|
||||
header_params = {}
|
||||
|
||||
form_params = []
|
||||
local_var_files = {}
|
||||
|
||||
body_params = None
|
||||
if 'body' in local_var_params:
|
||||
body_params = local_var_params['body']
|
||||
# HTTP header `Accept`
|
||||
header_params['Accept'] = self.api_client.select_header_accept(
|
||||
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
|
||||
|
||||
# Authentication setting
|
||||
auth_settings = ['BearerToken'] # noqa: E501
|
||||
|
||||
return self.api_client.call_api(
|
||||
'/apis/authentication.k8s.io/v1beta1/selfsubjectreviews', 'POST',
|
||||
path_params,
|
||||
query_params,
|
||||
header_params,
|
||||
body=body_params,
|
||||
post_params=form_params,
|
||||
files=local_var_files,
|
||||
response_type='V1beta1SelfSubjectReview', # noqa: E501
|
||||
auth_settings=auth_settings,
|
||||
async_req=local_var_params.get('async_req'),
|
||||
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
|
||||
_preload_content=local_var_params.get('_preload_content', True),
|
||||
_request_timeout=local_var_params.get('_request_timeout'),
|
||||
collection_formats=collection_formats)
|
||||
|
||||
def get_api_resources(self, **kwargs): # noqa: E501
|
||||
"""get_api_resources # noqa: E501
|
||||
|
||||
get available resources # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_resources(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: V1APIResourceList
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
kwargs['_return_http_data_only'] = True
|
||||
return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
|
||||
|
||||
def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
|
||||
"""get_api_resources # noqa: E501
|
||||
|
||||
get available resources # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_resources_with_http_info(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _return_http_data_only: response data without head status code
|
||||
and headers
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
|
||||
local_var_params = locals()
|
||||
|
||||
all_params = [
|
||||
]
|
||||
all_params.extend(
|
||||
[
|
||||
'async_req',
|
||||
'_return_http_data_only',
|
||||
'_preload_content',
|
||||
'_request_timeout'
|
||||
]
|
||||
)
|
||||
|
||||
for key, val in six.iteritems(local_var_params['kwargs']):
|
||||
if key not in all_params:
|
||||
raise ApiTypeError(
|
||||
"Got an unexpected keyword argument '%s'"
|
||||
" to method get_api_resources" % key
|
||||
)
|
||||
local_var_params[key] = val
|
||||
del local_var_params['kwargs']
|
||||
|
||||
collection_formats = {}
|
||||
|
||||
path_params = {}
|
||||
|
||||
query_params = []
|
||||
|
||||
header_params = {}
|
||||
|
||||
form_params = []
|
||||
local_var_files = {}
|
||||
|
||||
body_params = None
|
||||
# HTTP header `Accept`
|
||||
header_params['Accept'] = self.api_client.select_header_accept(
|
||||
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
|
||||
|
||||
# Authentication setting
|
||||
auth_settings = ['BearerToken'] # noqa: E501
|
||||
|
||||
return self.api_client.call_api(
|
||||
'/apis/authentication.k8s.io/v1beta1/', 'GET',
|
||||
path_params,
|
||||
query_params,
|
||||
header_params,
|
||||
body=body_params,
|
||||
post_params=form_params,
|
||||
files=local_var_files,
|
||||
response_type='V1APIResourceList', # noqa: E501
|
||||
auth_settings=auth_settings,
|
||||
async_req=local_var_params.get('async_req'),
|
||||
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
|
||||
_preload_content=local_var_params.get('_preload_content', True),
|
||||
_request_timeout=local_var_params.get('_request_timeout'),
|
||||
collection_formats=collection_formats)
|
142
kubernetes/client/api/authorization_api.py
Normal file
142
kubernetes/client/api/authorization_api.py
Normal file
|
@ -0,0 +1,142 @@
|
|||
# coding: utf-8
|
||||
|
||||
"""
|
||||
Kubernetes
|
||||
|
||||
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
|
||||
|
||||
The version of the OpenAPI document: release-1.32
|
||||
Generated by: https://openapi-generator.tech
|
||||
"""
|
||||
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import re # noqa: F401
|
||||
|
||||
# python 2 and python 3 compatibility library
|
||||
import six
|
||||
|
||||
from kubernetes.client.api_client import ApiClient
|
||||
from kubernetes.client.exceptions import ( # noqa: F401
|
||||
ApiTypeError,
|
||||
ApiValueError
|
||||
)
|
||||
|
||||
|
||||
class AuthorizationApi(object):
|
||||
"""NOTE: This class is auto generated by OpenAPI Generator
|
||||
Ref: https://openapi-generator.tech
|
||||
|
||||
Do not edit the class manually.
|
||||
"""
|
||||
|
||||
def __init__(self, api_client=None):
|
||||
if api_client is None:
|
||||
api_client = ApiClient()
|
||||
self.api_client = api_client
|
||||
|
||||
def get_api_group(self, **kwargs): # noqa: E501
|
||||
"""get_api_group # noqa: E501
|
||||
|
||||
get information of a group # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_group(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: V1APIGroup
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
kwargs['_return_http_data_only'] = True
|
||||
return self.get_api_group_with_http_info(**kwargs) # noqa: E501
|
||||
|
||||
def get_api_group_with_http_info(self, **kwargs): # noqa: E501
|
||||
"""get_api_group # noqa: E501
|
||||
|
||||
get information of a group # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_group_with_http_info(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _return_http_data_only: response data without head status code
|
||||
and headers
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
|
||||
local_var_params = locals()
|
||||
|
||||
all_params = [
|
||||
]
|
||||
all_params.extend(
|
||||
[
|
||||
'async_req',
|
||||
'_return_http_data_only',
|
||||
'_preload_content',
|
||||
'_request_timeout'
|
||||
]
|
||||
)
|
||||
|
||||
for key, val in six.iteritems(local_var_params['kwargs']):
|
||||
if key not in all_params:
|
||||
raise ApiTypeError(
|
||||
"Got an unexpected keyword argument '%s'"
|
||||
" to method get_api_group" % key
|
||||
)
|
||||
local_var_params[key] = val
|
||||
del local_var_params['kwargs']
|
||||
|
||||
collection_formats = {}
|
||||
|
||||
path_params = {}
|
||||
|
||||
query_params = []
|
||||
|
||||
header_params = {}
|
||||
|
||||
form_params = []
|
||||
local_var_files = {}
|
||||
|
||||
body_params = None
|
||||
# HTTP header `Accept`
|
||||
header_params['Accept'] = self.api_client.select_header_accept(
|
||||
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
|
||||
|
||||
# Authentication setting
|
||||
auth_settings = ['BearerToken'] # noqa: E501
|
||||
|
||||
return self.api_client.call_api(
|
||||
'/apis/authorization.k8s.io/', 'GET',
|
||||
path_params,
|
||||
query_params,
|
||||
header_params,
|
||||
body=body_params,
|
||||
post_params=form_params,
|
||||
files=local_var_files,
|
||||
response_type='V1APIGroup', # noqa: E501
|
||||
auth_settings=auth_settings,
|
||||
async_req=local_var_params.get('async_req'),
|
||||
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
|
||||
_preload_content=local_var_params.get('_preload_content', True),
|
||||
_request_timeout=local_var_params.get('_request_timeout'),
|
||||
collection_formats=collection_formats)
|
687
kubernetes/client/api/authorization_v1_api.py
Normal file
687
kubernetes/client/api/authorization_v1_api.py
Normal file
|
@ -0,0 +1,687 @@
|
|||
# coding: utf-8
|
||||
|
||||
"""
|
||||
Kubernetes
|
||||
|
||||
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
|
||||
|
||||
The version of the OpenAPI document: release-1.32
|
||||
Generated by: https://openapi-generator.tech
|
||||
"""
|
||||
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import re # noqa: F401
|
||||
|
||||
# python 2 and python 3 compatibility library
|
||||
import six
|
||||
|
||||
from kubernetes.client.api_client import ApiClient
|
||||
from kubernetes.client.exceptions import ( # noqa: F401
|
||||
ApiTypeError,
|
||||
ApiValueError
|
||||
)
|
||||
|
||||
|
||||
class AuthorizationV1Api(object):
|
||||
"""NOTE: This class is auto generated by OpenAPI Generator
|
||||
Ref: https://openapi-generator.tech
|
||||
|
||||
Do not edit the class manually.
|
||||
"""
|
||||
|
||||
def __init__(self, api_client=None):
|
||||
if api_client is None:
|
||||
api_client = ApiClient()
|
||||
self.api_client = api_client
|
||||
|
||||
def create_namespaced_local_subject_access_review(self, namespace, body, **kwargs): # noqa: E501
|
||||
"""create_namespaced_local_subject_access_review # noqa: E501
|
||||
|
||||
create a LocalSubjectAccessReview # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.create_namespaced_local_subject_access_review(namespace, body, async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param str namespace: object name and auth scope, such as for teams and projects (required)
|
||||
:param V1LocalSubjectAccessReview body: (required)
|
||||
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
|
||||
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
|
||||
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
|
||||
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: V1LocalSubjectAccessReview
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
kwargs['_return_http_data_only'] = True
|
||||
return self.create_namespaced_local_subject_access_review_with_http_info(namespace, body, **kwargs) # noqa: E501
|
||||
|
||||
def create_namespaced_local_subject_access_review_with_http_info(self, namespace, body, **kwargs): # noqa: E501
|
||||
"""create_namespaced_local_subject_access_review # noqa: E501
|
||||
|
||||
create a LocalSubjectAccessReview # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.create_namespaced_local_subject_access_review_with_http_info(namespace, body, async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param str namespace: object name and auth scope, such as for teams and projects (required)
|
||||
:param V1LocalSubjectAccessReview body: (required)
|
||||
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
|
||||
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
|
||||
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
|
||||
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
|
||||
:param _return_http_data_only: response data without head status code
|
||||
and headers
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: tuple(V1LocalSubjectAccessReview, status_code(int), headers(HTTPHeaderDict))
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
|
||||
local_var_params = locals()
|
||||
|
||||
all_params = [
|
||||
'namespace',
|
||||
'body',
|
||||
'dry_run',
|
||||
'field_manager',
|
||||
'field_validation',
|
||||
'pretty'
|
||||
]
|
||||
all_params.extend(
|
||||
[
|
||||
'async_req',
|
||||
'_return_http_data_only',
|
||||
'_preload_content',
|
||||
'_request_timeout'
|
||||
]
|
||||
)
|
||||
|
||||
for key, val in six.iteritems(local_var_params['kwargs']):
|
||||
if key not in all_params:
|
||||
raise ApiTypeError(
|
||||
"Got an unexpected keyword argument '%s'"
|
||||
" to method create_namespaced_local_subject_access_review" % key
|
||||
)
|
||||
local_var_params[key] = val
|
||||
del local_var_params['kwargs']
|
||||
# verify the required parameter 'namespace' is set
|
||||
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
|
||||
local_var_params['namespace'] is None): # noqa: E501
|
||||
raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_local_subject_access_review`") # noqa: E501
|
||||
# verify the required parameter 'body' is set
|
||||
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
|
||||
local_var_params['body'] is None): # noqa: E501
|
||||
raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_local_subject_access_review`") # noqa: E501
|
||||
|
||||
collection_formats = {}
|
||||
|
||||
path_params = {}
|
||||
if 'namespace' in local_var_params:
|
||||
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
|
||||
|
||||
query_params = []
|
||||
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
|
||||
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
|
||||
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
|
||||
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
|
||||
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
|
||||
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
|
||||
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
|
||||
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
|
||||
|
||||
header_params = {}
|
||||
|
||||
form_params = []
|
||||
local_var_files = {}
|
||||
|
||||
body_params = None
|
||||
if 'body' in local_var_params:
|
||||
body_params = local_var_params['body']
|
||||
# HTTP header `Accept`
|
||||
header_params['Accept'] = self.api_client.select_header_accept(
|
||||
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
|
||||
|
||||
# Authentication setting
|
||||
auth_settings = ['BearerToken'] # noqa: E501
|
||||
|
||||
return self.api_client.call_api(
|
||||
'/apis/authorization.k8s.io/v1/namespaces/{namespace}/localsubjectaccessreviews', 'POST',
|
||||
path_params,
|
||||
query_params,
|
||||
header_params,
|
||||
body=body_params,
|
||||
post_params=form_params,
|
||||
files=local_var_files,
|
||||
response_type='V1LocalSubjectAccessReview', # noqa: E501
|
||||
auth_settings=auth_settings,
|
||||
async_req=local_var_params.get('async_req'),
|
||||
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
|
||||
_preload_content=local_var_params.get('_preload_content', True),
|
||||
_request_timeout=local_var_params.get('_request_timeout'),
|
||||
collection_formats=collection_formats)
|
||||
|
||||
def create_self_subject_access_review(self, body, **kwargs): # noqa: E501
|
||||
"""create_self_subject_access_review # noqa: E501
|
||||
|
||||
create a SelfSubjectAccessReview # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.create_self_subject_access_review(body, async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param V1SelfSubjectAccessReview body: (required)
|
||||
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
|
||||
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
|
||||
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
|
||||
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: V1SelfSubjectAccessReview
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
kwargs['_return_http_data_only'] = True
|
||||
return self.create_self_subject_access_review_with_http_info(body, **kwargs) # noqa: E501
|
||||
|
||||
def create_self_subject_access_review_with_http_info(self, body, **kwargs): # noqa: E501
|
||||
"""create_self_subject_access_review # noqa: E501
|
||||
|
||||
create a SelfSubjectAccessReview # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.create_self_subject_access_review_with_http_info(body, async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param V1SelfSubjectAccessReview body: (required)
|
||||
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
|
||||
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
|
||||
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
|
||||
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
|
||||
:param _return_http_data_only: response data without head status code
|
||||
and headers
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: tuple(V1SelfSubjectAccessReview, status_code(int), headers(HTTPHeaderDict))
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
|
||||
local_var_params = locals()
|
||||
|
||||
all_params = [
|
||||
'body',
|
||||
'dry_run',
|
||||
'field_manager',
|
||||
'field_validation',
|
||||
'pretty'
|
||||
]
|
||||
all_params.extend(
|
||||
[
|
||||
'async_req',
|
||||
'_return_http_data_only',
|
||||
'_preload_content',
|
||||
'_request_timeout'
|
||||
]
|
||||
)
|
||||
|
||||
for key, val in six.iteritems(local_var_params['kwargs']):
|
||||
if key not in all_params:
|
||||
raise ApiTypeError(
|
||||
"Got an unexpected keyword argument '%s'"
|
||||
" to method create_self_subject_access_review" % key
|
||||
)
|
||||
local_var_params[key] = val
|
||||
del local_var_params['kwargs']
|
||||
# verify the required parameter 'body' is set
|
||||
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
|
||||
local_var_params['body'] is None): # noqa: E501
|
||||
raise ApiValueError("Missing the required parameter `body` when calling `create_self_subject_access_review`") # noqa: E501
|
||||
|
||||
collection_formats = {}
|
||||
|
||||
path_params = {}
|
||||
|
||||
query_params = []
|
||||
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
|
||||
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
|
||||
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
|
||||
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
|
||||
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
|
||||
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
|
||||
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
|
||||
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
|
||||
|
||||
header_params = {}
|
||||
|
||||
form_params = []
|
||||
local_var_files = {}
|
||||
|
||||
body_params = None
|
||||
if 'body' in local_var_params:
|
||||
body_params = local_var_params['body']
|
||||
# HTTP header `Accept`
|
||||
header_params['Accept'] = self.api_client.select_header_accept(
|
||||
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
|
||||
|
||||
# Authentication setting
|
||||
auth_settings = ['BearerToken'] # noqa: E501
|
||||
|
||||
return self.api_client.call_api(
|
||||
'/apis/authorization.k8s.io/v1/selfsubjectaccessreviews', 'POST',
|
||||
path_params,
|
||||
query_params,
|
||||
header_params,
|
||||
body=body_params,
|
||||
post_params=form_params,
|
||||
files=local_var_files,
|
||||
response_type='V1SelfSubjectAccessReview', # noqa: E501
|
||||
auth_settings=auth_settings,
|
||||
async_req=local_var_params.get('async_req'),
|
||||
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
|
||||
_preload_content=local_var_params.get('_preload_content', True),
|
||||
_request_timeout=local_var_params.get('_request_timeout'),
|
||||
collection_formats=collection_formats)
|
||||
|
||||
def create_self_subject_rules_review(self, body, **kwargs): # noqa: E501
|
||||
"""create_self_subject_rules_review # noqa: E501
|
||||
|
||||
create a SelfSubjectRulesReview # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.create_self_subject_rules_review(body, async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param V1SelfSubjectRulesReview body: (required)
|
||||
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
|
||||
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
|
||||
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
|
||||
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: V1SelfSubjectRulesReview
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
kwargs['_return_http_data_only'] = True
|
||||
return self.create_self_subject_rules_review_with_http_info(body, **kwargs) # noqa: E501
|
||||
|
||||
def create_self_subject_rules_review_with_http_info(self, body, **kwargs): # noqa: E501
|
||||
"""create_self_subject_rules_review # noqa: E501
|
||||
|
||||
create a SelfSubjectRulesReview # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.create_self_subject_rules_review_with_http_info(body, async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param V1SelfSubjectRulesReview body: (required)
|
||||
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
|
||||
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
|
||||
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
|
||||
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
|
||||
:param _return_http_data_only: response data without head status code
|
||||
and headers
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: tuple(V1SelfSubjectRulesReview, status_code(int), headers(HTTPHeaderDict))
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
|
||||
local_var_params = locals()
|
||||
|
||||
all_params = [
|
||||
'body',
|
||||
'dry_run',
|
||||
'field_manager',
|
||||
'field_validation',
|
||||
'pretty'
|
||||
]
|
||||
all_params.extend(
|
||||
[
|
||||
'async_req',
|
||||
'_return_http_data_only',
|
||||
'_preload_content',
|
||||
'_request_timeout'
|
||||
]
|
||||
)
|
||||
|
||||
for key, val in six.iteritems(local_var_params['kwargs']):
|
||||
if key not in all_params:
|
||||
raise ApiTypeError(
|
||||
"Got an unexpected keyword argument '%s'"
|
||||
" to method create_self_subject_rules_review" % key
|
||||
)
|
||||
local_var_params[key] = val
|
||||
del local_var_params['kwargs']
|
||||
# verify the required parameter 'body' is set
|
||||
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
|
||||
local_var_params['body'] is None): # noqa: E501
|
||||
raise ApiValueError("Missing the required parameter `body` when calling `create_self_subject_rules_review`") # noqa: E501
|
||||
|
||||
collection_formats = {}
|
||||
|
||||
path_params = {}
|
||||
|
||||
query_params = []
|
||||
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
|
||||
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
|
||||
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
|
||||
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
|
||||
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
|
||||
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
|
||||
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
|
||||
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
|
||||
|
||||
header_params = {}
|
||||
|
||||
form_params = []
|
||||
local_var_files = {}
|
||||
|
||||
body_params = None
|
||||
if 'body' in local_var_params:
|
||||
body_params = local_var_params['body']
|
||||
# HTTP header `Accept`
|
||||
header_params['Accept'] = self.api_client.select_header_accept(
|
||||
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
|
||||
|
||||
# Authentication setting
|
||||
auth_settings = ['BearerToken'] # noqa: E501
|
||||
|
||||
return self.api_client.call_api(
|
||||
'/apis/authorization.k8s.io/v1/selfsubjectrulesreviews', 'POST',
|
||||
path_params,
|
||||
query_params,
|
||||
header_params,
|
||||
body=body_params,
|
||||
post_params=form_params,
|
||||
files=local_var_files,
|
||||
response_type='V1SelfSubjectRulesReview', # noqa: E501
|
||||
auth_settings=auth_settings,
|
||||
async_req=local_var_params.get('async_req'),
|
||||
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
|
||||
_preload_content=local_var_params.get('_preload_content', True),
|
||||
_request_timeout=local_var_params.get('_request_timeout'),
|
||||
collection_formats=collection_formats)
|
||||
|
||||
def create_subject_access_review(self, body, **kwargs): # noqa: E501
|
||||
"""create_subject_access_review # noqa: E501
|
||||
|
||||
create a SubjectAccessReview # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.create_subject_access_review(body, async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param V1SubjectAccessReview body: (required)
|
||||
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
|
||||
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
|
||||
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
|
||||
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: V1SubjectAccessReview
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
kwargs['_return_http_data_only'] = True
|
||||
return self.create_subject_access_review_with_http_info(body, **kwargs) # noqa: E501
|
||||
|
||||
def create_subject_access_review_with_http_info(self, body, **kwargs): # noqa: E501
|
||||
"""create_subject_access_review # noqa: E501
|
||||
|
||||
create a SubjectAccessReview # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.create_subject_access_review_with_http_info(body, async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param V1SubjectAccessReview body: (required)
|
||||
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
|
||||
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
|
||||
:param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
|
||||
:param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).
|
||||
:param _return_http_data_only: response data without head status code
|
||||
and headers
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: tuple(V1SubjectAccessReview, status_code(int), headers(HTTPHeaderDict))
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
|
||||
local_var_params = locals()
|
||||
|
||||
all_params = [
|
||||
'body',
|
||||
'dry_run',
|
||||
'field_manager',
|
||||
'field_validation',
|
||||
'pretty'
|
||||
]
|
||||
all_params.extend(
|
||||
[
|
||||
'async_req',
|
||||
'_return_http_data_only',
|
||||
'_preload_content',
|
||||
'_request_timeout'
|
||||
]
|
||||
)
|
||||
|
||||
for key, val in six.iteritems(local_var_params['kwargs']):
|
||||
if key not in all_params:
|
||||
raise ApiTypeError(
|
||||
"Got an unexpected keyword argument '%s'"
|
||||
" to method create_subject_access_review" % key
|
||||
)
|
||||
local_var_params[key] = val
|
||||
del local_var_params['kwargs']
|
||||
# verify the required parameter 'body' is set
|
||||
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
|
||||
local_var_params['body'] is None): # noqa: E501
|
||||
raise ApiValueError("Missing the required parameter `body` when calling `create_subject_access_review`") # noqa: E501
|
||||
|
||||
collection_formats = {}
|
||||
|
||||
path_params = {}
|
||||
|
||||
query_params = []
|
||||
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
|
||||
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
|
||||
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
|
||||
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
|
||||
if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501
|
||||
query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501
|
||||
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
|
||||
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
|
||||
|
||||
header_params = {}
|
||||
|
||||
form_params = []
|
||||
local_var_files = {}
|
||||
|
||||
body_params = None
|
||||
if 'body' in local_var_params:
|
||||
body_params = local_var_params['body']
|
||||
# HTTP header `Accept`
|
||||
header_params['Accept'] = self.api_client.select_header_accept(
|
||||
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
|
||||
|
||||
# Authentication setting
|
||||
auth_settings = ['BearerToken'] # noqa: E501
|
||||
|
||||
return self.api_client.call_api(
|
||||
'/apis/authorization.k8s.io/v1/subjectaccessreviews', 'POST',
|
||||
path_params,
|
||||
query_params,
|
||||
header_params,
|
||||
body=body_params,
|
||||
post_params=form_params,
|
||||
files=local_var_files,
|
||||
response_type='V1SubjectAccessReview', # noqa: E501
|
||||
auth_settings=auth_settings,
|
||||
async_req=local_var_params.get('async_req'),
|
||||
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
|
||||
_preload_content=local_var_params.get('_preload_content', True),
|
||||
_request_timeout=local_var_params.get('_request_timeout'),
|
||||
collection_formats=collection_formats)
|
||||
|
||||
def get_api_resources(self, **kwargs): # noqa: E501
|
||||
"""get_api_resources # noqa: E501
|
||||
|
||||
get available resources # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_resources(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: V1APIResourceList
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
kwargs['_return_http_data_only'] = True
|
||||
return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
|
||||
|
||||
def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
|
||||
"""get_api_resources # noqa: E501
|
||||
|
||||
get available resources # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_resources_with_http_info(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _return_http_data_only: response data without head status code
|
||||
and headers
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
|
||||
local_var_params = locals()
|
||||
|
||||
all_params = [
|
||||
]
|
||||
all_params.extend(
|
||||
[
|
||||
'async_req',
|
||||
'_return_http_data_only',
|
||||
'_preload_content',
|
||||
'_request_timeout'
|
||||
]
|
||||
)
|
||||
|
||||
for key, val in six.iteritems(local_var_params['kwargs']):
|
||||
if key not in all_params:
|
||||
raise ApiTypeError(
|
||||
"Got an unexpected keyword argument '%s'"
|
||||
" to method get_api_resources" % key
|
||||
)
|
||||
local_var_params[key] = val
|
||||
del local_var_params['kwargs']
|
||||
|
||||
collection_formats = {}
|
||||
|
||||
path_params = {}
|
||||
|
||||
query_params = []
|
||||
|
||||
header_params = {}
|
||||
|
||||
form_params = []
|
||||
local_var_files = {}
|
||||
|
||||
body_params = None
|
||||
# HTTP header `Accept`
|
||||
header_params['Accept'] = self.api_client.select_header_accept(
|
||||
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501
|
||||
|
||||
# Authentication setting
|
||||
auth_settings = ['BearerToken'] # noqa: E501
|
||||
|
||||
return self.api_client.call_api(
|
||||
'/apis/authorization.k8s.io/v1/', 'GET',
|
||||
path_params,
|
||||
query_params,
|
||||
header_params,
|
||||
body=body_params,
|
||||
post_params=form_params,
|
||||
files=local_var_files,
|
||||
response_type='V1APIResourceList', # noqa: E501
|
||||
auth_settings=auth_settings,
|
||||
async_req=local_var_params.get('async_req'),
|
||||
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
|
||||
_preload_content=local_var_params.get('_preload_content', True),
|
||||
_request_timeout=local_var_params.get('_request_timeout'),
|
||||
collection_formats=collection_formats)
|
142
kubernetes/client/api/autoscaling_api.py
Normal file
142
kubernetes/client/api/autoscaling_api.py
Normal file
|
@ -0,0 +1,142 @@
|
|||
# coding: utf-8
|
||||
|
||||
"""
|
||||
Kubernetes
|
||||
|
||||
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
|
||||
|
||||
The version of the OpenAPI document: release-1.32
|
||||
Generated by: https://openapi-generator.tech
|
||||
"""
|
||||
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import re # noqa: F401
|
||||
|
||||
# python 2 and python 3 compatibility library
|
||||
import six
|
||||
|
||||
from kubernetes.client.api_client import ApiClient
|
||||
from kubernetes.client.exceptions import ( # noqa: F401
|
||||
ApiTypeError,
|
||||
ApiValueError
|
||||
)
|
||||
|
||||
|
||||
class AutoscalingApi(object):
|
||||
"""NOTE: This class is auto generated by OpenAPI Generator
|
||||
Ref: https://openapi-generator.tech
|
||||
|
||||
Do not edit the class manually.
|
||||
"""
|
||||
|
||||
def __init__(self, api_client=None):
|
||||
if api_client is None:
|
||||
api_client = ApiClient()
|
||||
self.api_client = api_client
|
||||
|
||||
def get_api_group(self, **kwargs): # noqa: E501
|
||||
"""get_api_group # noqa: E501
|
||||
|
||||
get information of a group # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_group(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: V1APIGroup
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
kwargs['_return_http_data_only'] = True
|
||||
return self.get_api_group_with_http_info(**kwargs) # noqa: E501
|
||||
|
||||
def get_api_group_with_http_info(self, **kwargs): # noqa: E501
|
||||
"""get_api_group # noqa: E501
|
||||
|
||||
get information of a group # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_group_with_http_info(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _return_http_data_only: response data without head status code
|
||||
and headers
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
|
||||
local_var_params = locals()
|
||||
|
||||
all_params = [
|
||||
]
|
||||
all_params.extend(
|
||||
[
|
||||
'async_req',
|
||||
'_return_http_data_only',
|
||||
'_preload_content',
|
||||
'_request_timeout'
|
||||
]
|
||||
)
|
||||
|
||||
for key, val in six.iteritems(local_var_params['kwargs']):
|
||||
if key not in all_params:
|
||||
raise ApiTypeError(
|
||||
"Got an unexpected keyword argument '%s'"
|
||||
" to method get_api_group" % key
|
||||
)
|
||||
local_var_params[key] = val
|
||||
del local_var_params['kwargs']
|
||||
|
||||
collection_formats = {}
|
||||
|
||||
path_params = {}
|
||||
|
||||
query_params = []
|
||||
|
||||
header_params = {}
|
||||
|
||||
form_params = []
|
||||
local_var_files = {}
|
||||
|
||||
body_params = None
|
||||
# HTTP header `Accept`
|
||||
header_params['Accept'] = self.api_client.select_header_accept(
|
||||
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
|
||||
|
||||
# Authentication setting
|
||||
auth_settings = ['BearerToken'] # noqa: E501
|
||||
|
||||
return self.api_client.call_api(
|
||||
'/apis/autoscaling/', 'GET',
|
||||
path_params,
|
||||
query_params,
|
||||
header_params,
|
||||
body=body_params,
|
||||
post_params=form_params,
|
||||
files=local_var_files,
|
||||
response_type='V1APIGroup', # noqa: E501
|
||||
auth_settings=auth_settings,
|
||||
async_req=local_var_params.get('async_req'),
|
||||
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
|
||||
_preload_content=local_var_params.get('_preload_content', True),
|
||||
_request_timeout=local_var_params.get('_request_timeout'),
|
||||
collection_formats=collection_formats)
|
1843
kubernetes/client/api/autoscaling_v1_api.py
Normal file
1843
kubernetes/client/api/autoscaling_v1_api.py
Normal file
File diff suppressed because it is too large
Load diff
1843
kubernetes/client/api/autoscaling_v2_api.py
Normal file
1843
kubernetes/client/api/autoscaling_v2_api.py
Normal file
File diff suppressed because it is too large
Load diff
142
kubernetes/client/api/batch_api.py
Normal file
142
kubernetes/client/api/batch_api.py
Normal file
|
@ -0,0 +1,142 @@
|
|||
# coding: utf-8
|
||||
|
||||
"""
|
||||
Kubernetes
|
||||
|
||||
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
|
||||
|
||||
The version of the OpenAPI document: release-1.32
|
||||
Generated by: https://openapi-generator.tech
|
||||
"""
|
||||
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import re # noqa: F401
|
||||
|
||||
# python 2 and python 3 compatibility library
|
||||
import six
|
||||
|
||||
from kubernetes.client.api_client import ApiClient
|
||||
from kubernetes.client.exceptions import ( # noqa: F401
|
||||
ApiTypeError,
|
||||
ApiValueError
|
||||
)
|
||||
|
||||
|
||||
class BatchApi(object):
|
||||
"""NOTE: This class is auto generated by OpenAPI Generator
|
||||
Ref: https://openapi-generator.tech
|
||||
|
||||
Do not edit the class manually.
|
||||
"""
|
||||
|
||||
def __init__(self, api_client=None):
|
||||
if api_client is None:
|
||||
api_client = ApiClient()
|
||||
self.api_client = api_client
|
||||
|
||||
def get_api_group(self, **kwargs): # noqa: E501
|
||||
"""get_api_group # noqa: E501
|
||||
|
||||
get information of a group # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_group(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: V1APIGroup
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
kwargs['_return_http_data_only'] = True
|
||||
return self.get_api_group_with_http_info(**kwargs) # noqa: E501
|
||||
|
||||
def get_api_group_with_http_info(self, **kwargs): # noqa: E501
|
||||
"""get_api_group # noqa: E501
|
||||
|
||||
get information of a group # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_group_with_http_info(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _return_http_data_only: response data without head status code
|
||||
and headers
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
|
||||
local_var_params = locals()
|
||||
|
||||
all_params = [
|
||||
]
|
||||
all_params.extend(
|
||||
[
|
||||
'async_req',
|
||||
'_return_http_data_only',
|
||||
'_preload_content',
|
||||
'_request_timeout'
|
||||
]
|
||||
)
|
||||
|
||||
for key, val in six.iteritems(local_var_params['kwargs']):
|
||||
if key not in all_params:
|
||||
raise ApiTypeError(
|
||||
"Got an unexpected keyword argument '%s'"
|
||||
" to method get_api_group" % key
|
||||
)
|
||||
local_var_params[key] = val
|
||||
del local_var_params['kwargs']
|
||||
|
||||
collection_formats = {}
|
||||
|
||||
path_params = {}
|
||||
|
||||
query_params = []
|
||||
|
||||
header_params = {}
|
||||
|
||||
form_params = []
|
||||
local_var_files = {}
|
||||
|
||||
body_params = None
|
||||
# HTTP header `Accept`
|
||||
header_params['Accept'] = self.api_client.select_header_accept(
|
||||
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
|
||||
|
||||
# Authentication setting
|
||||
auth_settings = ['BearerToken'] # noqa: E501
|
||||
|
||||
return self.api_client.call_api(
|
||||
'/apis/batch/', 'GET',
|
||||
path_params,
|
||||
query_params,
|
||||
header_params,
|
||||
body=body_params,
|
||||
post_params=form_params,
|
||||
files=local_var_files,
|
||||
response_type='V1APIGroup', # noqa: E501
|
||||
auth_settings=auth_settings,
|
||||
async_req=local_var_params.get('async_req'),
|
||||
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
|
||||
_preload_content=local_var_params.get('_preload_content', True),
|
||||
_request_timeout=local_var_params.get('_request_timeout'),
|
||||
collection_formats=collection_formats)
|
3544
kubernetes/client/api/batch_v1_api.py
Normal file
3544
kubernetes/client/api/batch_v1_api.py
Normal file
File diff suppressed because it is too large
Load diff
142
kubernetes/client/api/certificates_api.py
Normal file
142
kubernetes/client/api/certificates_api.py
Normal file
|
@ -0,0 +1,142 @@
|
|||
# coding: utf-8
|
||||
|
||||
"""
|
||||
Kubernetes
|
||||
|
||||
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
|
||||
|
||||
The version of the OpenAPI document: release-1.32
|
||||
Generated by: https://openapi-generator.tech
|
||||
"""
|
||||
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import re # noqa: F401
|
||||
|
||||
# python 2 and python 3 compatibility library
|
||||
import six
|
||||
|
||||
from kubernetes.client.api_client import ApiClient
|
||||
from kubernetes.client.exceptions import ( # noqa: F401
|
||||
ApiTypeError,
|
||||
ApiValueError
|
||||
)
|
||||
|
||||
|
||||
class CertificatesApi(object):
|
||||
"""NOTE: This class is auto generated by OpenAPI Generator
|
||||
Ref: https://openapi-generator.tech
|
||||
|
||||
Do not edit the class manually.
|
||||
"""
|
||||
|
||||
def __init__(self, api_client=None):
|
||||
if api_client is None:
|
||||
api_client = ApiClient()
|
||||
self.api_client = api_client
|
||||
|
||||
def get_api_group(self, **kwargs): # noqa: E501
|
||||
"""get_api_group # noqa: E501
|
||||
|
||||
get information of a group # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_group(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: V1APIGroup
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
kwargs['_return_http_data_only'] = True
|
||||
return self.get_api_group_with_http_info(**kwargs) # noqa: E501
|
||||
|
||||
def get_api_group_with_http_info(self, **kwargs): # noqa: E501
|
||||
"""get_api_group # noqa: E501
|
||||
|
||||
get information of a group # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_group_with_http_info(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _return_http_data_only: response data without head status code
|
||||
and headers
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
|
||||
local_var_params = locals()
|
||||
|
||||
all_params = [
|
||||
]
|
||||
all_params.extend(
|
||||
[
|
||||
'async_req',
|
||||
'_return_http_data_only',
|
||||
'_preload_content',
|
||||
'_request_timeout'
|
||||
]
|
||||
)
|
||||
|
||||
for key, val in six.iteritems(local_var_params['kwargs']):
|
||||
if key not in all_params:
|
||||
raise ApiTypeError(
|
||||
"Got an unexpected keyword argument '%s'"
|
||||
" to method get_api_group" % key
|
||||
)
|
||||
local_var_params[key] = val
|
||||
del local_var_params['kwargs']
|
||||
|
||||
collection_formats = {}
|
||||
|
||||
path_params = {}
|
||||
|
||||
query_params = []
|
||||
|
||||
header_params = {}
|
||||
|
||||
form_params = []
|
||||
local_var_files = {}
|
||||
|
||||
body_params = None
|
||||
# HTTP header `Accept`
|
||||
header_params['Accept'] = self.api_client.select_header_accept(
|
||||
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
|
||||
|
||||
# Authentication setting
|
||||
auth_settings = ['BearerToken'] # noqa: E501
|
||||
|
||||
return self.api_client.call_api(
|
||||
'/apis/certificates.k8s.io/', 'GET',
|
||||
path_params,
|
||||
query_params,
|
||||
header_params,
|
||||
body=body_params,
|
||||
post_params=form_params,
|
||||
files=local_var_files,
|
||||
response_type='V1APIGroup', # noqa: E501
|
||||
auth_settings=auth_settings,
|
||||
async_req=local_var_params.get('async_req'),
|
||||
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
|
||||
_preload_content=local_var_params.get('_preload_content', True),
|
||||
_request_timeout=local_var_params.get('_request_timeout'),
|
||||
collection_formats=collection_formats)
|
2007
kubernetes/client/api/certificates_v1_api.py
Normal file
2007
kubernetes/client/api/certificates_v1_api.py
Normal file
File diff suppressed because it is too large
Load diff
1179
kubernetes/client/api/certificates_v1alpha1_api.py
Normal file
1179
kubernetes/client/api/certificates_v1alpha1_api.py
Normal file
File diff suppressed because it is too large
Load diff
142
kubernetes/client/api/coordination_api.py
Normal file
142
kubernetes/client/api/coordination_api.py
Normal file
|
@ -0,0 +1,142 @@
|
|||
# coding: utf-8
|
||||
|
||||
"""
|
||||
Kubernetes
|
||||
|
||||
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
|
||||
|
||||
The version of the OpenAPI document: release-1.32
|
||||
Generated by: https://openapi-generator.tech
|
||||
"""
|
||||
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import re # noqa: F401
|
||||
|
||||
# python 2 and python 3 compatibility library
|
||||
import six
|
||||
|
||||
from kubernetes.client.api_client import ApiClient
|
||||
from kubernetes.client.exceptions import ( # noqa: F401
|
||||
ApiTypeError,
|
||||
ApiValueError
|
||||
)
|
||||
|
||||
|
||||
class CoordinationApi(object):
|
||||
"""NOTE: This class is auto generated by OpenAPI Generator
|
||||
Ref: https://openapi-generator.tech
|
||||
|
||||
Do not edit the class manually.
|
||||
"""
|
||||
|
||||
def __init__(self, api_client=None):
|
||||
if api_client is None:
|
||||
api_client = ApiClient()
|
||||
self.api_client = api_client
|
||||
|
||||
def get_api_group(self, **kwargs): # noqa: E501
|
||||
"""get_api_group # noqa: E501
|
||||
|
||||
get information of a group # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_group(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: V1APIGroup
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
kwargs['_return_http_data_only'] = True
|
||||
return self.get_api_group_with_http_info(**kwargs) # noqa: E501
|
||||
|
||||
def get_api_group_with_http_info(self, **kwargs): # noqa: E501
|
||||
"""get_api_group # noqa: E501
|
||||
|
||||
get information of a group # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_group_with_http_info(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _return_http_data_only: response data without head status code
|
||||
and headers
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
|
||||
local_var_params = locals()
|
||||
|
||||
all_params = [
|
||||
]
|
||||
all_params.extend(
|
||||
[
|
||||
'async_req',
|
||||
'_return_http_data_only',
|
||||
'_preload_content',
|
||||
'_request_timeout'
|
||||
]
|
||||
)
|
||||
|
||||
for key, val in six.iteritems(local_var_params['kwargs']):
|
||||
if key not in all_params:
|
||||
raise ApiTypeError(
|
||||
"Got an unexpected keyword argument '%s'"
|
||||
" to method get_api_group" % key
|
||||
)
|
||||
local_var_params[key] = val
|
||||
del local_var_params['kwargs']
|
||||
|
||||
collection_formats = {}
|
||||
|
||||
path_params = {}
|
||||
|
||||
query_params = []
|
||||
|
||||
header_params = {}
|
||||
|
||||
form_params = []
|
||||
local_var_files = {}
|
||||
|
||||
body_params = None
|
||||
# HTTP header `Accept`
|
||||
header_params['Accept'] = self.api_client.select_header_accept(
|
||||
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
|
||||
|
||||
# Authentication setting
|
||||
auth_settings = ['BearerToken'] # noqa: E501
|
||||
|
||||
return self.api_client.call_api(
|
||||
'/apis/coordination.k8s.io/', 'GET',
|
||||
path_params,
|
||||
query_params,
|
||||
header_params,
|
||||
body=body_params,
|
||||
post_params=form_params,
|
||||
files=local_var_files,
|
||||
response_type='V1APIGroup', # noqa: E501
|
||||
auth_settings=auth_settings,
|
||||
async_req=local_var_params.get('async_req'),
|
||||
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
|
||||
_preload_content=local_var_params.get('_preload_content', True),
|
||||
_request_timeout=local_var_params.get('_request_timeout'),
|
||||
collection_formats=collection_formats)
|
1402
kubernetes/client/api/coordination_v1_api.py
Normal file
1402
kubernetes/client/api/coordination_v1_api.py
Normal file
File diff suppressed because it is too large
Load diff
1402
kubernetes/client/api/coordination_v1alpha2_api.py
Normal file
1402
kubernetes/client/api/coordination_v1alpha2_api.py
Normal file
File diff suppressed because it is too large
Load diff
142
kubernetes/client/api/core_api.py
Normal file
142
kubernetes/client/api/core_api.py
Normal file
|
@ -0,0 +1,142 @@
|
|||
# coding: utf-8
|
||||
|
||||
"""
|
||||
Kubernetes
|
||||
|
||||
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
|
||||
|
||||
The version of the OpenAPI document: release-1.32
|
||||
Generated by: https://openapi-generator.tech
|
||||
"""
|
||||
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import re # noqa: F401
|
||||
|
||||
# python 2 and python 3 compatibility library
|
||||
import six
|
||||
|
||||
from kubernetes.client.api_client import ApiClient
|
||||
from kubernetes.client.exceptions import ( # noqa: F401
|
||||
ApiTypeError,
|
||||
ApiValueError
|
||||
)
|
||||
|
||||
|
||||
class CoreApi(object):
|
||||
"""NOTE: This class is auto generated by OpenAPI Generator
|
||||
Ref: https://openapi-generator.tech
|
||||
|
||||
Do not edit the class manually.
|
||||
"""
|
||||
|
||||
def __init__(self, api_client=None):
|
||||
if api_client is None:
|
||||
api_client = ApiClient()
|
||||
self.api_client = api_client
|
||||
|
||||
def get_api_versions(self, **kwargs): # noqa: E501
|
||||
"""get_api_versions # noqa: E501
|
||||
|
||||
get available API versions # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_versions(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: V1APIVersions
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
kwargs['_return_http_data_only'] = True
|
||||
return self.get_api_versions_with_http_info(**kwargs) # noqa: E501
|
||||
|
||||
def get_api_versions_with_http_info(self, **kwargs): # noqa: E501
|
||||
"""get_api_versions # noqa: E501
|
||||
|
||||
get available API versions # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_versions_with_http_info(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _return_http_data_only: response data without head status code
|
||||
and headers
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: tuple(V1APIVersions, status_code(int), headers(HTTPHeaderDict))
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
|
||||
local_var_params = locals()
|
||||
|
||||
all_params = [
|
||||
]
|
||||
all_params.extend(
|
||||
[
|
||||
'async_req',
|
||||
'_return_http_data_only',
|
||||
'_preload_content',
|
||||
'_request_timeout'
|
||||
]
|
||||
)
|
||||
|
||||
for key, val in six.iteritems(local_var_params['kwargs']):
|
||||
if key not in all_params:
|
||||
raise ApiTypeError(
|
||||
"Got an unexpected keyword argument '%s'"
|
||||
" to method get_api_versions" % key
|
||||
)
|
||||
local_var_params[key] = val
|
||||
del local_var_params['kwargs']
|
||||
|
||||
collection_formats = {}
|
||||
|
||||
path_params = {}
|
||||
|
||||
query_params = []
|
||||
|
||||
header_params = {}
|
||||
|
||||
form_params = []
|
||||
local_var_files = {}
|
||||
|
||||
body_params = None
|
||||
# HTTP header `Accept`
|
||||
header_params['Accept'] = self.api_client.select_header_accept(
|
||||
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
|
||||
|
||||
# Authentication setting
|
||||
auth_settings = ['BearerToken'] # noqa: E501
|
||||
|
||||
return self.api_client.call_api(
|
||||
'/api/', 'GET',
|
||||
path_params,
|
||||
query_params,
|
||||
header_params,
|
||||
body=body_params,
|
||||
post_params=form_params,
|
||||
files=local_var_files,
|
||||
response_type='V1APIVersions', # noqa: E501
|
||||
auth_settings=auth_settings,
|
||||
async_req=local_var_params.get('async_req'),
|
||||
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
|
||||
_preload_content=local_var_params.get('_preload_content', True),
|
||||
_request_timeout=local_var_params.get('_request_timeout'),
|
||||
collection_formats=collection_formats)
|
30454
kubernetes/client/api/core_v1_api.py
Normal file
30454
kubernetes/client/api/core_v1_api.py
Normal file
File diff suppressed because it is too large
Load diff
4696
kubernetes/client/api/custom_objects_api.py
Normal file
4696
kubernetes/client/api/custom_objects_api.py
Normal file
File diff suppressed because it is too large
Load diff
142
kubernetes/client/api/discovery_api.py
Normal file
142
kubernetes/client/api/discovery_api.py
Normal file
|
@ -0,0 +1,142 @@
|
|||
# coding: utf-8
|
||||
|
||||
"""
|
||||
Kubernetes
|
||||
|
||||
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
|
||||
|
||||
The version of the OpenAPI document: release-1.32
|
||||
Generated by: https://openapi-generator.tech
|
||||
"""
|
||||
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import re # noqa: F401
|
||||
|
||||
# python 2 and python 3 compatibility library
|
||||
import six
|
||||
|
||||
from kubernetes.client.api_client import ApiClient
|
||||
from kubernetes.client.exceptions import ( # noqa: F401
|
||||
ApiTypeError,
|
||||
ApiValueError
|
||||
)
|
||||
|
||||
|
||||
class DiscoveryApi(object):
|
||||
"""NOTE: This class is auto generated by OpenAPI Generator
|
||||
Ref: https://openapi-generator.tech
|
||||
|
||||
Do not edit the class manually.
|
||||
"""
|
||||
|
||||
def __init__(self, api_client=None):
|
||||
if api_client is None:
|
||||
api_client = ApiClient()
|
||||
self.api_client = api_client
|
||||
|
||||
def get_api_group(self, **kwargs): # noqa: E501
|
||||
"""get_api_group # noqa: E501
|
||||
|
||||
get information of a group # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_group(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: V1APIGroup
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
kwargs['_return_http_data_only'] = True
|
||||
return self.get_api_group_with_http_info(**kwargs) # noqa: E501
|
||||
|
||||
def get_api_group_with_http_info(self, **kwargs): # noqa: E501
|
||||
"""get_api_group # noqa: E501
|
||||
|
||||
get information of a group # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_group_with_http_info(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _return_http_data_only: response data without head status code
|
||||
and headers
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
|
||||
local_var_params = locals()
|
||||
|
||||
all_params = [
|
||||
]
|
||||
all_params.extend(
|
||||
[
|
||||
'async_req',
|
||||
'_return_http_data_only',
|
||||
'_preload_content',
|
||||
'_request_timeout'
|
||||
]
|
||||
)
|
||||
|
||||
for key, val in six.iteritems(local_var_params['kwargs']):
|
||||
if key not in all_params:
|
||||
raise ApiTypeError(
|
||||
"Got an unexpected keyword argument '%s'"
|
||||
" to method get_api_group" % key
|
||||
)
|
||||
local_var_params[key] = val
|
||||
del local_var_params['kwargs']
|
||||
|
||||
collection_formats = {}
|
||||
|
||||
path_params = {}
|
||||
|
||||
query_params = []
|
||||
|
||||
header_params = {}
|
||||
|
||||
form_params = []
|
||||
local_var_files = {}
|
||||
|
||||
body_params = None
|
||||
# HTTP header `Accept`
|
||||
header_params['Accept'] = self.api_client.select_header_accept(
|
||||
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
|
||||
|
||||
# Authentication setting
|
||||
auth_settings = ['BearerToken'] # noqa: E501
|
||||
|
||||
return self.api_client.call_api(
|
||||
'/apis/discovery.k8s.io/', 'GET',
|
||||
path_params,
|
||||
query_params,
|
||||
header_params,
|
||||
body=body_params,
|
||||
post_params=form_params,
|
||||
files=local_var_files,
|
||||
response_type='V1APIGroup', # noqa: E501
|
||||
auth_settings=auth_settings,
|
||||
async_req=local_var_params.get('async_req'),
|
||||
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
|
||||
_preload_content=local_var_params.get('_preload_content', True),
|
||||
_request_timeout=local_var_params.get('_request_timeout'),
|
||||
collection_formats=collection_formats)
|
1402
kubernetes/client/api/discovery_v1_api.py
Normal file
1402
kubernetes/client/api/discovery_v1_api.py
Normal file
File diff suppressed because it is too large
Load diff
142
kubernetes/client/api/events_api.py
Normal file
142
kubernetes/client/api/events_api.py
Normal file
|
@ -0,0 +1,142 @@
|
|||
# coding: utf-8
|
||||
|
||||
"""
|
||||
Kubernetes
|
||||
|
||||
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
|
||||
|
||||
The version of the OpenAPI document: release-1.32
|
||||
Generated by: https://openapi-generator.tech
|
||||
"""
|
||||
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import re # noqa: F401
|
||||
|
||||
# python 2 and python 3 compatibility library
|
||||
import six
|
||||
|
||||
from kubernetes.client.api_client import ApiClient
|
||||
from kubernetes.client.exceptions import ( # noqa: F401
|
||||
ApiTypeError,
|
||||
ApiValueError
|
||||
)
|
||||
|
||||
|
||||
class EventsApi(object):
|
||||
"""NOTE: This class is auto generated by OpenAPI Generator
|
||||
Ref: https://openapi-generator.tech
|
||||
|
||||
Do not edit the class manually.
|
||||
"""
|
||||
|
||||
def __init__(self, api_client=None):
|
||||
if api_client is None:
|
||||
api_client = ApiClient()
|
||||
self.api_client = api_client
|
||||
|
||||
def get_api_group(self, **kwargs): # noqa: E501
|
||||
"""get_api_group # noqa: E501
|
||||
|
||||
get information of a group # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_group(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: V1APIGroup
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
kwargs['_return_http_data_only'] = True
|
||||
return self.get_api_group_with_http_info(**kwargs) # noqa: E501
|
||||
|
||||
def get_api_group_with_http_info(self, **kwargs): # noqa: E501
|
||||
"""get_api_group # noqa: E501
|
||||
|
||||
get information of a group # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_group_with_http_info(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _return_http_data_only: response data without head status code
|
||||
and headers
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
|
||||
local_var_params = locals()
|
||||
|
||||
all_params = [
|
||||
]
|
||||
all_params.extend(
|
||||
[
|
||||
'async_req',
|
||||
'_return_http_data_only',
|
||||
'_preload_content',
|
||||
'_request_timeout'
|
||||
]
|
||||
)
|
||||
|
||||
for key, val in six.iteritems(local_var_params['kwargs']):
|
||||
if key not in all_params:
|
||||
raise ApiTypeError(
|
||||
"Got an unexpected keyword argument '%s'"
|
||||
" to method get_api_group" % key
|
||||
)
|
||||
local_var_params[key] = val
|
||||
del local_var_params['kwargs']
|
||||
|
||||
collection_formats = {}
|
||||
|
||||
path_params = {}
|
||||
|
||||
query_params = []
|
||||
|
||||
header_params = {}
|
||||
|
||||
form_params = []
|
||||
local_var_files = {}
|
||||
|
||||
body_params = None
|
||||
# HTTP header `Accept`
|
||||
header_params['Accept'] = self.api_client.select_header_accept(
|
||||
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
|
||||
|
||||
# Authentication setting
|
||||
auth_settings = ['BearerToken'] # noqa: E501
|
||||
|
||||
return self.api_client.call_api(
|
||||
'/apis/events.k8s.io/', 'GET',
|
||||
path_params,
|
||||
query_params,
|
||||
header_params,
|
||||
body=body_params,
|
||||
post_params=form_params,
|
||||
files=local_var_files,
|
||||
response_type='V1APIGroup', # noqa: E501
|
||||
auth_settings=auth_settings,
|
||||
async_req=local_var_params.get('async_req'),
|
||||
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
|
||||
_preload_content=local_var_params.get('_preload_content', True),
|
||||
_request_timeout=local_var_params.get('_request_timeout'),
|
||||
collection_formats=collection_formats)
|
1402
kubernetes/client/api/events_v1_api.py
Normal file
1402
kubernetes/client/api/events_v1_api.py
Normal file
File diff suppressed because it is too large
Load diff
142
kubernetes/client/api/flowcontrol_apiserver_api.py
Normal file
142
kubernetes/client/api/flowcontrol_apiserver_api.py
Normal file
|
@ -0,0 +1,142 @@
|
|||
# coding: utf-8
|
||||
|
||||
"""
|
||||
Kubernetes
|
||||
|
||||
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
|
||||
|
||||
The version of the OpenAPI document: release-1.32
|
||||
Generated by: https://openapi-generator.tech
|
||||
"""
|
||||
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import re # noqa: F401
|
||||
|
||||
# python 2 and python 3 compatibility library
|
||||
import six
|
||||
|
||||
from kubernetes.client.api_client import ApiClient
|
||||
from kubernetes.client.exceptions import ( # noqa: F401
|
||||
ApiTypeError,
|
||||
ApiValueError
|
||||
)
|
||||
|
||||
|
||||
class FlowcontrolApiserverApi(object):
|
||||
"""NOTE: This class is auto generated by OpenAPI Generator
|
||||
Ref: https://openapi-generator.tech
|
||||
|
||||
Do not edit the class manually.
|
||||
"""
|
||||
|
||||
def __init__(self, api_client=None):
|
||||
if api_client is None:
|
||||
api_client = ApiClient()
|
||||
self.api_client = api_client
|
||||
|
||||
def get_api_group(self, **kwargs): # noqa: E501
|
||||
"""get_api_group # noqa: E501
|
||||
|
||||
get information of a group # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_group(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: V1APIGroup
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
kwargs['_return_http_data_only'] = True
|
||||
return self.get_api_group_with_http_info(**kwargs) # noqa: E501
|
||||
|
||||
def get_api_group_with_http_info(self, **kwargs): # noqa: E501
|
||||
"""get_api_group # noqa: E501
|
||||
|
||||
get information of a group # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_group_with_http_info(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _return_http_data_only: response data without head status code
|
||||
and headers
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
|
||||
local_var_params = locals()
|
||||
|
||||
all_params = [
|
||||
]
|
||||
all_params.extend(
|
||||
[
|
||||
'async_req',
|
||||
'_return_http_data_only',
|
||||
'_preload_content',
|
||||
'_request_timeout'
|
||||
]
|
||||
)
|
||||
|
||||
for key, val in six.iteritems(local_var_params['kwargs']):
|
||||
if key not in all_params:
|
||||
raise ApiTypeError(
|
||||
"Got an unexpected keyword argument '%s'"
|
||||
" to method get_api_group" % key
|
||||
)
|
||||
local_var_params[key] = val
|
||||
del local_var_params['kwargs']
|
||||
|
||||
collection_formats = {}
|
||||
|
||||
path_params = {}
|
||||
|
||||
query_params = []
|
||||
|
||||
header_params = {}
|
||||
|
||||
form_params = []
|
||||
local_var_files = {}
|
||||
|
||||
body_params = None
|
||||
# HTTP header `Accept`
|
||||
header_params['Accept'] = self.api_client.select_header_accept(
|
||||
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
|
||||
|
||||
# Authentication setting
|
||||
auth_settings = ['BearerToken'] # noqa: E501
|
||||
|
||||
return self.api_client.call_api(
|
||||
'/apis/flowcontrol.apiserver.k8s.io/', 'GET',
|
||||
path_params,
|
||||
query_params,
|
||||
header_params,
|
||||
body=body_params,
|
||||
post_params=form_params,
|
||||
files=local_var_files,
|
||||
response_type='V1APIGroup', # noqa: E501
|
||||
auth_settings=auth_settings,
|
||||
async_req=local_var_params.get('async_req'),
|
||||
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
|
||||
_preload_content=local_var_params.get('_preload_content', True),
|
||||
_request_timeout=local_var_params.get('_request_timeout'),
|
||||
collection_formats=collection_formats)
|
3044
kubernetes/client/api/flowcontrol_apiserver_v1_api.py
Normal file
3044
kubernetes/client/api/flowcontrol_apiserver_v1_api.py
Normal file
File diff suppressed because it is too large
Load diff
142
kubernetes/client/api/internal_apiserver_api.py
Normal file
142
kubernetes/client/api/internal_apiserver_api.py
Normal file
|
@ -0,0 +1,142 @@
|
|||
# coding: utf-8
|
||||
|
||||
"""
|
||||
Kubernetes
|
||||
|
||||
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
|
||||
|
||||
The version of the OpenAPI document: release-1.32
|
||||
Generated by: https://openapi-generator.tech
|
||||
"""
|
||||
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import re # noqa: F401
|
||||
|
||||
# python 2 and python 3 compatibility library
|
||||
import six
|
||||
|
||||
from kubernetes.client.api_client import ApiClient
|
||||
from kubernetes.client.exceptions import ( # noqa: F401
|
||||
ApiTypeError,
|
||||
ApiValueError
|
||||
)
|
||||
|
||||
|
||||
class InternalApiserverApi(object):
|
||||
"""NOTE: This class is auto generated by OpenAPI Generator
|
||||
Ref: https://openapi-generator.tech
|
||||
|
||||
Do not edit the class manually.
|
||||
"""
|
||||
|
||||
def __init__(self, api_client=None):
|
||||
if api_client is None:
|
||||
api_client = ApiClient()
|
||||
self.api_client = api_client
|
||||
|
||||
def get_api_group(self, **kwargs): # noqa: E501
|
||||
"""get_api_group # noqa: E501
|
||||
|
||||
get information of a group # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_group(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: V1APIGroup
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
kwargs['_return_http_data_only'] = True
|
||||
return self.get_api_group_with_http_info(**kwargs) # noqa: E501
|
||||
|
||||
def get_api_group_with_http_info(self, **kwargs): # noqa: E501
|
||||
"""get_api_group # noqa: E501
|
||||
|
||||
get information of a group # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_group_with_http_info(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _return_http_data_only: response data without head status code
|
||||
and headers
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
|
||||
local_var_params = locals()
|
||||
|
||||
all_params = [
|
||||
]
|
||||
all_params.extend(
|
||||
[
|
||||
'async_req',
|
||||
'_return_http_data_only',
|
||||
'_preload_content',
|
||||
'_request_timeout'
|
||||
]
|
||||
)
|
||||
|
||||
for key, val in six.iteritems(local_var_params['kwargs']):
|
||||
if key not in all_params:
|
||||
raise ApiTypeError(
|
||||
"Got an unexpected keyword argument '%s'"
|
||||
" to method get_api_group" % key
|
||||
)
|
||||
local_var_params[key] = val
|
||||
del local_var_params['kwargs']
|
||||
|
||||
collection_formats = {}
|
||||
|
||||
path_params = {}
|
||||
|
||||
query_params = []
|
||||
|
||||
header_params = {}
|
||||
|
||||
form_params = []
|
||||
local_var_files = {}
|
||||
|
||||
body_params = None
|
||||
# HTTP header `Accept`
|
||||
header_params['Accept'] = self.api_client.select_header_accept(
|
||||
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
|
||||
|
||||
# Authentication setting
|
||||
auth_settings = ['BearerToken'] # noqa: E501
|
||||
|
||||
return self.api_client.call_api(
|
||||
'/apis/internal.apiserver.k8s.io/', 'GET',
|
||||
path_params,
|
||||
query_params,
|
||||
header_params,
|
||||
body=body_params,
|
||||
post_params=form_params,
|
||||
files=local_var_files,
|
||||
response_type='V1APIGroup', # noqa: E501
|
||||
auth_settings=auth_settings,
|
||||
async_req=local_var_params.get('async_req'),
|
||||
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
|
||||
_preload_content=local_var_params.get('_preload_content', True),
|
||||
_request_timeout=local_var_params.get('_request_timeout'),
|
||||
collection_formats=collection_formats)
|
1593
kubernetes/client/api/internal_apiserver_v1alpha1_api.py
Normal file
1593
kubernetes/client/api/internal_apiserver_v1alpha1_api.py
Normal file
File diff suppressed because it is too large
Load diff
244
kubernetes/client/api/logs_api.py
Normal file
244
kubernetes/client/api/logs_api.py
Normal file
|
@ -0,0 +1,244 @@
|
|||
# coding: utf-8
|
||||
|
||||
"""
|
||||
Kubernetes
|
||||
|
||||
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
|
||||
|
||||
The version of the OpenAPI document: release-1.32
|
||||
Generated by: https://openapi-generator.tech
|
||||
"""
|
||||
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import re # noqa: F401
|
||||
|
||||
# python 2 and python 3 compatibility library
|
||||
import six
|
||||
|
||||
from kubernetes.client.api_client import ApiClient
|
||||
from kubernetes.client.exceptions import ( # noqa: F401
|
||||
ApiTypeError,
|
||||
ApiValueError
|
||||
)
|
||||
|
||||
|
||||
class LogsApi(object):
|
||||
"""NOTE: This class is auto generated by OpenAPI Generator
|
||||
Ref: https://openapi-generator.tech
|
||||
|
||||
Do not edit the class manually.
|
||||
"""
|
||||
|
||||
def __init__(self, api_client=None):
|
||||
if api_client is None:
|
||||
api_client = ApiClient()
|
||||
self.api_client = api_client
|
||||
|
||||
def log_file_handler(self, logpath, **kwargs): # noqa: E501
|
||||
"""log_file_handler # noqa: E501
|
||||
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.log_file_handler(logpath, async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param str logpath: path to the log (required)
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: None
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
kwargs['_return_http_data_only'] = True
|
||||
return self.log_file_handler_with_http_info(logpath, **kwargs) # noqa: E501
|
||||
|
||||
def log_file_handler_with_http_info(self, logpath, **kwargs): # noqa: E501
|
||||
"""log_file_handler # noqa: E501
|
||||
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.log_file_handler_with_http_info(logpath, async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param str logpath: path to the log (required)
|
||||
:param _return_http_data_only: response data without head status code
|
||||
and headers
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: None
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
|
||||
local_var_params = locals()
|
||||
|
||||
all_params = [
|
||||
'logpath'
|
||||
]
|
||||
all_params.extend(
|
||||
[
|
||||
'async_req',
|
||||
'_return_http_data_only',
|
||||
'_preload_content',
|
||||
'_request_timeout'
|
||||
]
|
||||
)
|
||||
|
||||
for key, val in six.iteritems(local_var_params['kwargs']):
|
||||
if key not in all_params:
|
||||
raise ApiTypeError(
|
||||
"Got an unexpected keyword argument '%s'"
|
||||
" to method log_file_handler" % key
|
||||
)
|
||||
local_var_params[key] = val
|
||||
del local_var_params['kwargs']
|
||||
# verify the required parameter 'logpath' is set
|
||||
if self.api_client.client_side_validation and ('logpath' not in local_var_params or # noqa: E501
|
||||
local_var_params['logpath'] is None): # noqa: E501
|
||||
raise ApiValueError("Missing the required parameter `logpath` when calling `log_file_handler`") # noqa: E501
|
||||
|
||||
collection_formats = {}
|
||||
|
||||
path_params = {}
|
||||
if 'logpath' in local_var_params:
|
||||
path_params['logpath'] = local_var_params['logpath'] # noqa: E501
|
||||
|
||||
query_params = []
|
||||
|
||||
header_params = {}
|
||||
|
||||
form_params = []
|
||||
local_var_files = {}
|
||||
|
||||
body_params = None
|
||||
# Authentication setting
|
||||
auth_settings = ['BearerToken'] # noqa: E501
|
||||
|
||||
return self.api_client.call_api(
|
||||
'/logs/{logpath}', 'GET',
|
||||
path_params,
|
||||
query_params,
|
||||
header_params,
|
||||
body=body_params,
|
||||
post_params=form_params,
|
||||
files=local_var_files,
|
||||
response_type=None, # noqa: E501
|
||||
auth_settings=auth_settings,
|
||||
async_req=local_var_params.get('async_req'),
|
||||
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
|
||||
_preload_content=local_var_params.get('_preload_content', True),
|
||||
_request_timeout=local_var_params.get('_request_timeout'),
|
||||
collection_formats=collection_formats)
|
||||
|
||||
def log_file_list_handler(self, **kwargs): # noqa: E501
|
||||
"""log_file_list_handler # noqa: E501
|
||||
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.log_file_list_handler(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: None
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
kwargs['_return_http_data_only'] = True
|
||||
return self.log_file_list_handler_with_http_info(**kwargs) # noqa: E501
|
||||
|
||||
def log_file_list_handler_with_http_info(self, **kwargs): # noqa: E501
|
||||
"""log_file_list_handler # noqa: E501
|
||||
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.log_file_list_handler_with_http_info(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _return_http_data_only: response data without head status code
|
||||
and headers
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: None
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
|
||||
local_var_params = locals()
|
||||
|
||||
all_params = [
|
||||
]
|
||||
all_params.extend(
|
||||
[
|
||||
'async_req',
|
||||
'_return_http_data_only',
|
||||
'_preload_content',
|
||||
'_request_timeout'
|
||||
]
|
||||
)
|
||||
|
||||
for key, val in six.iteritems(local_var_params['kwargs']):
|
||||
if key not in all_params:
|
||||
raise ApiTypeError(
|
||||
"Got an unexpected keyword argument '%s'"
|
||||
" to method log_file_list_handler" % key
|
||||
)
|
||||
local_var_params[key] = val
|
||||
del local_var_params['kwargs']
|
||||
|
||||
collection_formats = {}
|
||||
|
||||
path_params = {}
|
||||
|
||||
query_params = []
|
||||
|
||||
header_params = {}
|
||||
|
||||
form_params = []
|
||||
local_var_files = {}
|
||||
|
||||
body_params = None
|
||||
# Authentication setting
|
||||
auth_settings = ['BearerToken'] # noqa: E501
|
||||
|
||||
return self.api_client.call_api(
|
||||
'/logs/', 'GET',
|
||||
path_params,
|
||||
query_params,
|
||||
header_params,
|
||||
body=body_params,
|
||||
post_params=form_params,
|
||||
files=local_var_files,
|
||||
response_type=None, # noqa: E501
|
||||
auth_settings=auth_settings,
|
||||
async_req=local_var_params.get('async_req'),
|
||||
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
|
||||
_preload_content=local_var_params.get('_preload_content', True),
|
||||
_request_timeout=local_var_params.get('_request_timeout'),
|
||||
collection_formats=collection_formats)
|
142
kubernetes/client/api/networking_api.py
Normal file
142
kubernetes/client/api/networking_api.py
Normal file
|
@ -0,0 +1,142 @@
|
|||
# coding: utf-8
|
||||
|
||||
"""
|
||||
Kubernetes
|
||||
|
||||
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
|
||||
|
||||
The version of the OpenAPI document: release-1.32
|
||||
Generated by: https://openapi-generator.tech
|
||||
"""
|
||||
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import re # noqa: F401
|
||||
|
||||
# python 2 and python 3 compatibility library
|
||||
import six
|
||||
|
||||
from kubernetes.client.api_client import ApiClient
|
||||
from kubernetes.client.exceptions import ( # noqa: F401
|
||||
ApiTypeError,
|
||||
ApiValueError
|
||||
)
|
||||
|
||||
|
||||
class NetworkingApi(object):
|
||||
"""NOTE: This class is auto generated by OpenAPI Generator
|
||||
Ref: https://openapi-generator.tech
|
||||
|
||||
Do not edit the class manually.
|
||||
"""
|
||||
|
||||
def __init__(self, api_client=None):
|
||||
if api_client is None:
|
||||
api_client = ApiClient()
|
||||
self.api_client = api_client
|
||||
|
||||
def get_api_group(self, **kwargs): # noqa: E501
|
||||
"""get_api_group # noqa: E501
|
||||
|
||||
get information of a group # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_group(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: V1APIGroup
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
kwargs['_return_http_data_only'] = True
|
||||
return self.get_api_group_with_http_info(**kwargs) # noqa: E501
|
||||
|
||||
def get_api_group_with_http_info(self, **kwargs): # noqa: E501
|
||||
"""get_api_group # noqa: E501
|
||||
|
||||
get information of a group # noqa: E501
|
||||
This method makes a synchronous HTTP request by default. To make an
|
||||
asynchronous HTTP request, please pass async_req=True
|
||||
>>> thread = api.get_api_group_with_http_info(async_req=True)
|
||||
>>> result = thread.get()
|
||||
|
||||
:param async_req bool: execute request asynchronously
|
||||
:param _return_http_data_only: response data without head status code
|
||||
and headers
|
||||
:param _preload_content: if False, the urllib3.HTTPResponse object will
|
||||
be returned without reading/decoding response
|
||||
data. Default is True.
|
||||
:param _request_timeout: timeout setting for this request. If one
|
||||
number provided, it will be total request
|
||||
timeout. It can also be a pair (tuple) of
|
||||
(connection, read) timeouts.
|
||||
:return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
|
||||
If the method is called asynchronously,
|
||||
returns the request thread.
|
||||
"""
|
||||
|
||||
local_var_params = locals()
|
||||
|
||||
all_params = [
|
||||
]
|
||||
all_params.extend(
|
||||
[
|
||||
'async_req',
|
||||
'_return_http_data_only',
|
||||
'_preload_content',
|
||||
'_request_timeout'
|
||||
]
|
||||
)
|
||||
|
||||
for key, val in six.iteritems(local_var_params['kwargs']):
|
||||
if key not in all_params:
|
||||
raise ApiTypeError(
|
||||
"Got an unexpected keyword argument '%s'"
|
||||
" to method get_api_group" % key
|
||||
)
|
||||
local_var_params[key] = val
|
||||
del local_var_params['kwargs']
|
||||
|
||||
collection_formats = {}
|
||||
|
||||
path_params = {}
|
||||
|
||||
query_params = []
|
||||
|
||||
header_params = {}
|
||||
|
||||
form_params = []
|
||||
local_var_files = {}
|
||||
|
||||
body_params = None
|
||||
# HTTP header `Accept`
|
||||
header_params['Accept'] = self.api_client.select_header_accept(
|
||||
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
|
||||
|
||||
# Authentication setting
|
||||
auth_settings = ['BearerToken'] # noqa: E501
|
||||
|
||||
return self.api_client.call_api(
|
||||
'/apis/networking.k8s.io/', 'GET',
|
||||
path_params,
|
||||
query_params,
|
||||
header_params,
|
||||
body=body_params,
|
||||
post_params=form_params,
|
||||
files=local_var_files,
|
||||
response_type='V1APIGroup', # noqa: E501
|
||||
auth_settings=auth_settings,
|
||||
async_req=local_var_params.get('async_req'),
|
||||
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
|
||||
_preload_content=local_var_params.get('_preload_content', True),
|
||||
_request_timeout=local_var_params.get('_request_timeout'),
|
||||
collection_formats=collection_formats)
|
4140
kubernetes/client/api/networking_v1_api.py
Normal file
4140
kubernetes/client/api/networking_v1_api.py
Normal file
File diff suppressed because it is too large
Load diff
2630
kubernetes/client/api/networking_v1beta1_api.py
Normal file
2630
kubernetes/client/api/networking_v1beta1_api.py
Normal file
File diff suppressed because it is too large
Load diff
Some files were not shown because too many files have changed in this diff Show more
Reference in a new issue