fix(kubernetes): temporary solution for updated k8s python client
This commit is contained in:
parent
07d6fe7442
commit
9129813244
1478 changed files with 422354 additions and 2 deletions
49
kubernetes/base/config/__init__.py
Normal file
49
kubernetes/base/config/__init__.py
Normal file
|
@ -0,0 +1,49 @@
|
|||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from os.path import exists, expanduser
|
||||
|
||||
from .config_exception import ConfigException
|
||||
from .incluster_config import load_incluster_config
|
||||
from .kube_config import (KUBE_CONFIG_DEFAULT_LOCATION,
|
||||
list_kube_config_contexts, load_kube_config,
|
||||
load_kube_config_from_dict, new_client_from_config, new_client_from_config_dict)
|
||||
|
||||
|
||||
def load_config(**kwargs):
|
||||
"""
|
||||
Wrapper function to load the kube_config.
|
||||
It will initially try to load_kube_config from provided path,
|
||||
then check if the KUBE_CONFIG_DEFAULT_LOCATION exists
|
||||
If neither exists, it will fall back to load_incluster_config
|
||||
and inform the user accordingly.
|
||||
|
||||
:param kwargs: A combination of all possible kwargs that
|
||||
can be passed to either load_kube_config or
|
||||
load_incluster_config functions.
|
||||
"""
|
||||
if "config_file" in kwargs.keys():
|
||||
load_kube_config(**kwargs)
|
||||
elif "kube_config_path" in kwargs.keys():
|
||||
kwargs["config_file"] = kwargs.pop("kube_config_path", None)
|
||||
load_kube_config(**kwargs)
|
||||
elif exists(expanduser(KUBE_CONFIG_DEFAULT_LOCATION)):
|
||||
load_kube_config(**kwargs)
|
||||
else:
|
||||
print(
|
||||
"kube_config_path not provided and "
|
||||
"default location ({0}) does not exist. "
|
||||
"Using inCluster Config. "
|
||||
"This might not work.".format(KUBE_CONFIG_DEFAULT_LOCATION))
|
||||
load_incluster_config(**kwargs)
|
17
kubernetes/base/config/config_exception.py
Normal file
17
kubernetes/base/config/config_exception.py
Normal file
|
@ -0,0 +1,17 @@
|
|||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
class ConfigException(Exception):
|
||||
pass
|
84
kubernetes/base/config/dateutil.py
Normal file
84
kubernetes/base/config/dateutil.py
Normal file
|
@ -0,0 +1,84 @@
|
|||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import datetime
|
||||
import math
|
||||
import re
|
||||
|
||||
|
||||
class TimezoneInfo(datetime.tzinfo):
|
||||
def __init__(self, h, m):
|
||||
self._name = "UTC"
|
||||
if h != 0 and m != 0:
|
||||
self._name += "%+03d:%2d" % (h, m)
|
||||
self._delta = datetime.timedelta(hours=h, minutes=math.copysign(m, h))
|
||||
|
||||
def utcoffset(self, dt):
|
||||
return self._delta
|
||||
|
||||
def tzname(self, dt):
|
||||
return self._name
|
||||
|
||||
def dst(self, dt):
|
||||
return datetime.timedelta(0)
|
||||
|
||||
|
||||
UTC = TimezoneInfo(0, 0)
|
||||
|
||||
# ref https://www.ietf.org/rfc/rfc3339.txt
|
||||
_re_rfc3339 = re.compile(r"(\d\d\d\d)-(\d\d)-(\d\d)" # full-date
|
||||
r"[ Tt]" # Separator
|
||||
r"(\d\d):(\d\d):(\d\d)([.,]\d+)?" # partial-time
|
||||
r"([zZ ]|[-+]\d\d?:\d\d)?", # time-offset
|
||||
re.VERBOSE + re.IGNORECASE)
|
||||
_re_timezone = re.compile(r"([-+])(\d\d?):?(\d\d)?")
|
||||
|
||||
MICROSEC_PER_SEC = 1000000
|
||||
|
||||
|
||||
def parse_rfc3339(s):
|
||||
if isinstance(s, datetime.datetime):
|
||||
# no need to parse it, just make sure it has a timezone.
|
||||
if not s.tzinfo:
|
||||
return s.replace(tzinfo=UTC)
|
||||
return s
|
||||
groups = _re_rfc3339.search(s).groups()
|
||||
dt = [0] * 7
|
||||
for x in range(6):
|
||||
dt[x] = int(groups[x])
|
||||
us = 0
|
||||
if groups[6] is not None:
|
||||
partial_sec = float(groups[6].replace(",", "."))
|
||||
us = int(MICROSEC_PER_SEC * partial_sec)
|
||||
tz = UTC
|
||||
if groups[7] is not None and groups[7] != 'Z' and groups[7] != 'z':
|
||||
tz_groups = _re_timezone.search(groups[7]).groups()
|
||||
hour = int(tz_groups[1])
|
||||
minute = 0
|
||||
if tz_groups[0] == "-":
|
||||
hour *= -1
|
||||
if tz_groups[2]:
|
||||
minute = int(tz_groups[2])
|
||||
tz = TimezoneInfo(hour, minute)
|
||||
return datetime.datetime(
|
||||
year=dt[0], month=dt[1], day=dt[2],
|
||||
hour=dt[3], minute=dt[4], second=dt[5],
|
||||
microsecond=us, tzinfo=tz)
|
||||
|
||||
|
||||
def format_rfc3339(date_time):
|
||||
if date_time.tzinfo is None:
|
||||
date_time = date_time.replace(tzinfo=UTC)
|
||||
date_time = date_time.astimezone(UTC)
|
||||
return date_time.strftime('%Y-%m-%dT%H:%M:%SZ')
|
68
kubernetes/base/config/dateutil_test.py
Normal file
68
kubernetes/base/config/dateutil_test.py
Normal file
|
@ -0,0 +1,68 @@
|
|||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
from datetime import datetime
|
||||
|
||||
from .dateutil import UTC, TimezoneInfo, format_rfc3339, parse_rfc3339
|
||||
|
||||
|
||||
class DateUtilTest(unittest.TestCase):
|
||||
|
||||
def _parse_rfc3339_test(self, st, y, m, d, h, mn, s, us):
|
||||
actual = parse_rfc3339(st)
|
||||
expected = datetime(y, m, d, h, mn, s, us, UTC)
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
def test_parse_rfc3339(self):
|
||||
self._parse_rfc3339_test("2017-07-25T04:44:21Z",
|
||||
2017, 7, 25, 4, 44, 21, 0)
|
||||
self._parse_rfc3339_test("2017-07-25 04:44:21Z",
|
||||
2017, 7, 25, 4, 44, 21, 0)
|
||||
self._parse_rfc3339_test("2017-07-25T04:44:21",
|
||||
2017, 7, 25, 4, 44, 21, 0)
|
||||
self._parse_rfc3339_test("2017-07-25T04:44:21z",
|
||||
2017, 7, 25, 4, 44, 21, 0)
|
||||
self._parse_rfc3339_test("2017-07-25T04:44:21+03:00",
|
||||
2017, 7, 25, 1, 44, 21, 0)
|
||||
self._parse_rfc3339_test("2017-07-25T04:44:21-03:00",
|
||||
2017, 7, 25, 7, 44, 21, 0)
|
||||
|
||||
self._parse_rfc3339_test("2017-07-25T04:44:21,005Z",
|
||||
2017, 7, 25, 4, 44, 21, 5000)
|
||||
self._parse_rfc3339_test("2017-07-25T04:44:21.005Z",
|
||||
2017, 7, 25, 4, 44, 21, 5000)
|
||||
self._parse_rfc3339_test("2017-07-25 04:44:21.0050Z",
|
||||
2017, 7, 25, 4, 44, 21, 5000)
|
||||
self._parse_rfc3339_test("2017-07-25T04:44:21.5",
|
||||
2017, 7, 25, 4, 44, 21, 500000)
|
||||
self._parse_rfc3339_test("2017-07-25T04:44:21.005z",
|
||||
2017, 7, 25, 4, 44, 21, 5000)
|
||||
self._parse_rfc3339_test("2017-07-25T04:44:21.005+03:00",
|
||||
2017, 7, 25, 1, 44, 21, 5000)
|
||||
self._parse_rfc3339_test("2017-07-25T04:44:21.005-03:00",
|
||||
2017, 7, 25, 7, 44, 21, 5000)
|
||||
|
||||
def test_format_rfc3339(self):
|
||||
self.assertEqual(
|
||||
format_rfc3339(datetime(2017, 7, 25, 4, 44, 21, 0, UTC)),
|
||||
"2017-07-25T04:44:21Z")
|
||||
self.assertEqual(
|
||||
format_rfc3339(datetime(2017, 7, 25, 4, 44, 21, 0,
|
||||
TimezoneInfo(2, 0))),
|
||||
"2017-07-25T02:44:21Z")
|
||||
self.assertEqual(
|
||||
format_rfc3339(datetime(2017, 7, 25, 4, 44, 21, 0,
|
||||
TimezoneInfo(-2, 30))),
|
||||
"2017-07-25T07:14:21Z")
|
107
kubernetes/base/config/exec_provider.py
Normal file
107
kubernetes/base/config/exec_provider.py
Normal file
|
@ -0,0 +1,107 @@
|
|||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from .config_exception import ConfigException
|
||||
|
||||
|
||||
class ExecProvider(object):
|
||||
"""
|
||||
Implementation of the proposal for out-of-tree client
|
||||
authentication providers as described here --
|
||||
https://github.com/kubernetes/community/blob/master/contributors/design-proposals/auth/kubectl-exec-plugins.md
|
||||
|
||||
Missing from implementation:
|
||||
|
||||
* TLS cert support
|
||||
* caching
|
||||
"""
|
||||
|
||||
def __init__(self, exec_config, cwd, cluster=None):
|
||||
"""
|
||||
exec_config must be of type ConfigNode because we depend on
|
||||
safe_get(self, key) to correctly handle optional exec provider
|
||||
config parameters.
|
||||
"""
|
||||
for key in ['command', 'apiVersion']:
|
||||
if key not in exec_config:
|
||||
raise ConfigException(
|
||||
'exec: malformed request. missing key \'%s\'' % key)
|
||||
self.api_version = exec_config['apiVersion']
|
||||
self.args = [exec_config['command']]
|
||||
if exec_config.safe_get('args'):
|
||||
self.args.extend(exec_config['args'])
|
||||
self.env = os.environ.copy()
|
||||
if exec_config.safe_get('env'):
|
||||
additional_vars = {}
|
||||
for item in exec_config['env']:
|
||||
name = item['name']
|
||||
value = item['value']
|
||||
additional_vars[name] = value
|
||||
self.env.update(additional_vars)
|
||||
if exec_config.safe_get('provideClusterInfo'):
|
||||
self.cluster = cluster
|
||||
else:
|
||||
self.cluster = None
|
||||
self.cwd = cwd or None
|
||||
|
||||
def run(self, previous_response=None):
|
||||
is_interactive = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
|
||||
kubernetes_exec_info = {
|
||||
'apiVersion': self.api_version,
|
||||
'kind': 'ExecCredential',
|
||||
'spec': {
|
||||
'interactive': is_interactive
|
||||
}
|
||||
}
|
||||
if previous_response:
|
||||
kubernetes_exec_info['spec']['response'] = previous_response
|
||||
if self.cluster:
|
||||
kubernetes_exec_info['spec']['cluster'] = self.cluster
|
||||
|
||||
self.env['KUBERNETES_EXEC_INFO'] = json.dumps(kubernetes_exec_info)
|
||||
process = subprocess.Popen(
|
||||
self.args,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=sys.stderr if is_interactive else subprocess.PIPE,
|
||||
stdin=sys.stdin if is_interactive else None,
|
||||
cwd=self.cwd,
|
||||
env=self.env,
|
||||
universal_newlines=True,
|
||||
shell=True)
|
||||
(stdout, stderr) = process.communicate()
|
||||
exit_code = process.wait()
|
||||
if exit_code != 0:
|
||||
msg = 'exec: process returned %d' % exit_code
|
||||
stderr = stderr.strip()
|
||||
if stderr:
|
||||
msg += '. %s' % stderr
|
||||
raise ConfigException(msg)
|
||||
try:
|
||||
data = json.loads(stdout)
|
||||
except ValueError as de:
|
||||
raise ConfigException(
|
||||
'exec: failed to decode process output: %s' % de)
|
||||
for key in ('apiVersion', 'kind', 'status'):
|
||||
if key not in data:
|
||||
raise ConfigException(
|
||||
'exec: malformed response. missing key \'%s\'' % key)
|
||||
if data['apiVersion'] != self.api_version:
|
||||
raise ConfigException(
|
||||
'exec: plugin api version %s does not match %s' %
|
||||
(data['apiVersion'], self.api_version))
|
||||
return data['status']
|
188
kubernetes/base/config/exec_provider_test.py
Normal file
188
kubernetes/base/config/exec_provider_test.py
Normal file
|
@ -0,0 +1,188 @@
|
|||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import os
|
||||
import unittest
|
||||
|
||||
from unittest import mock
|
||||
|
||||
from .config_exception import ConfigException
|
||||
from .exec_provider import ExecProvider
|
||||
from .kube_config import ConfigNode
|
||||
|
||||
|
||||
class ExecProviderTest(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.input_ok = ConfigNode('test', {
|
||||
'command': 'aws-iam-authenticator',
|
||||
'args': ['token', '-i', 'dummy'],
|
||||
'apiVersion': 'client.authentication.k8s.io/v1beta1',
|
||||
'env': None
|
||||
})
|
||||
self.input_with_cluster = ConfigNode('test', {
|
||||
'command': 'aws-iam-authenticator',
|
||||
'args': ['token', '-i', 'dummy'],
|
||||
'apiVersion': 'client.authentication.k8s.io/v1beta1',
|
||||
'provideClusterInfo': True,
|
||||
'env': None
|
||||
})
|
||||
self.output_ok = """
|
||||
{
|
||||
"apiVersion": "client.authentication.k8s.io/v1beta1",
|
||||
"kind": "ExecCredential",
|
||||
"status": {
|
||||
"token": "dummy"
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
def test_missing_input_keys(self):
|
||||
exec_configs = [ConfigNode('test1', {}),
|
||||
ConfigNode('test2', {'command': ''}),
|
||||
ConfigNode('test3', {'apiVersion': ''})]
|
||||
for exec_config in exec_configs:
|
||||
with self.assertRaises(ConfigException) as context:
|
||||
ExecProvider(exec_config, None)
|
||||
self.assertIn('exec: malformed request. missing key',
|
||||
context.exception.args[0])
|
||||
|
||||
@mock.patch('subprocess.Popen')
|
||||
def test_error_code_returned(self, mock):
|
||||
instance = mock.return_value
|
||||
instance.wait.return_value = 1
|
||||
instance.communicate.return_value = ('', '')
|
||||
with self.assertRaises(ConfigException) as context:
|
||||
ep = ExecProvider(self.input_ok, None)
|
||||
ep.run()
|
||||
self.assertIn('exec: process returned %d' %
|
||||
instance.wait.return_value, context.exception.args[0])
|
||||
|
||||
@mock.patch('subprocess.Popen')
|
||||
def test_nonjson_output_returned(self, mock):
|
||||
instance = mock.return_value
|
||||
instance.wait.return_value = 0
|
||||
instance.communicate.return_value = ('', '')
|
||||
with self.assertRaises(ConfigException) as context:
|
||||
ep = ExecProvider(self.input_ok, None)
|
||||
ep.run()
|
||||
self.assertIn('exec: failed to decode process output',
|
||||
context.exception.args[0])
|
||||
|
||||
@mock.patch('subprocess.Popen')
|
||||
def test_missing_output_keys(self, mock):
|
||||
instance = mock.return_value
|
||||
instance.wait.return_value = 0
|
||||
outputs = [
|
||||
"""
|
||||
{
|
||||
"kind": "ExecCredential",
|
||||
"status": {
|
||||
"token": "dummy"
|
||||
}
|
||||
}
|
||||
""", """
|
||||
{
|
||||
"apiVersion": "client.authentication.k8s.io/v1beta1",
|
||||
"status": {
|
||||
"token": "dummy"
|
||||
}
|
||||
}
|
||||
""", """
|
||||
{
|
||||
"apiVersion": "client.authentication.k8s.io/v1beta1",
|
||||
"kind": "ExecCredential"
|
||||
}
|
||||
"""
|
||||
]
|
||||
for output in outputs:
|
||||
instance.communicate.return_value = (output, '')
|
||||
with self.assertRaises(ConfigException) as context:
|
||||
ep = ExecProvider(self.input_ok, None)
|
||||
ep.run()
|
||||
self.assertIn('exec: malformed response. missing key',
|
||||
context.exception.args[0])
|
||||
|
||||
@mock.patch('subprocess.Popen')
|
||||
def test_mismatched_api_version(self, mock):
|
||||
instance = mock.return_value
|
||||
instance.wait.return_value = 0
|
||||
wrong_api_version = 'client.authentication.k8s.io/v1'
|
||||
output = """
|
||||
{
|
||||
"apiVersion": "%s",
|
||||
"kind": "ExecCredential",
|
||||
"status": {
|
||||
"token": "dummy"
|
||||
}
|
||||
}
|
||||
""" % wrong_api_version
|
||||
instance.communicate.return_value = (output, '')
|
||||
with self.assertRaises(ConfigException) as context:
|
||||
ep = ExecProvider(self.input_ok, None)
|
||||
ep.run()
|
||||
self.assertIn(
|
||||
'exec: plugin api version %s does not match' %
|
||||
wrong_api_version,
|
||||
context.exception.args[0])
|
||||
|
||||
@mock.patch('subprocess.Popen')
|
||||
def test_ok_01(self, mock):
|
||||
instance = mock.return_value
|
||||
instance.wait.return_value = 0
|
||||
instance.communicate.return_value = (self.output_ok, '')
|
||||
ep = ExecProvider(self.input_ok, None)
|
||||
result = ep.run()
|
||||
self.assertTrue(isinstance(result, dict))
|
||||
self.assertTrue('token' in result)
|
||||
|
||||
@mock.patch('subprocess.Popen')
|
||||
def test_run_in_dir(self, mock):
|
||||
instance = mock.return_value
|
||||
instance.wait.return_value = 0
|
||||
instance.communicate.return_value = (self.output_ok, '')
|
||||
ep = ExecProvider(self.input_ok, '/some/directory')
|
||||
ep.run()
|
||||
self.assertEqual(mock.call_args[1]['cwd'], '/some/directory')
|
||||
|
||||
@mock.patch('subprocess.Popen')
|
||||
def test_ok_no_console_attached(self, mock):
|
||||
instance = mock.return_value
|
||||
instance.wait.return_value = 0
|
||||
instance.communicate.return_value = (self.output_ok, '')
|
||||
mock_stdout = unittest.mock.patch(
|
||||
'sys.stdout', new=None) # Simulate detached console
|
||||
with mock_stdout:
|
||||
ep = ExecProvider(self.input_ok, None)
|
||||
result = ep.run()
|
||||
self.assertTrue(isinstance(result, dict))
|
||||
self.assertTrue('token' in result)
|
||||
|
||||
@mock.patch('subprocess.Popen')
|
||||
def test_with_cluster_info(self, mock):
|
||||
instance = mock.return_value
|
||||
instance.wait.return_value = 0
|
||||
instance.communicate.return_value = (self.output_ok, '')
|
||||
ep = ExecProvider(self.input_with_cluster, None, {'server': 'name.company.com'})
|
||||
result = ep.run()
|
||||
self.assertTrue(isinstance(result, dict))
|
||||
self.assertTrue('token' in result)
|
||||
|
||||
obj = json.loads(mock.call_args.kwargs['env']['KUBERNETES_EXEC_INFO'])
|
||||
self.assertEqual(obj['spec']['cluster']['server'], 'name.company.com')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
121
kubernetes/base/config/incluster_config.py
Normal file
121
kubernetes/base/config/incluster_config.py
Normal file
|
@ -0,0 +1,121 @@
|
|||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import datetime
|
||||
import os
|
||||
|
||||
from kubernetes.client import Configuration
|
||||
|
||||
from .config_exception import ConfigException
|
||||
|
||||
SERVICE_HOST_ENV_NAME = "KUBERNETES_SERVICE_HOST"
|
||||
SERVICE_PORT_ENV_NAME = "KUBERNETES_SERVICE_PORT"
|
||||
SERVICE_TOKEN_FILENAME = "/var/run/secrets/kubernetes.io/serviceaccount/token"
|
||||
SERVICE_CERT_FILENAME = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
|
||||
|
||||
|
||||
def _join_host_port(host, port):
|
||||
"""Adapted golang's net.JoinHostPort"""
|
||||
template = "%s:%s"
|
||||
host_requires_bracketing = ':' in host or '%' in host
|
||||
if host_requires_bracketing:
|
||||
template = "[%s]:%s"
|
||||
return template % (host, port)
|
||||
|
||||
|
||||
class InClusterConfigLoader(object):
|
||||
def __init__(self,
|
||||
token_filename,
|
||||
cert_filename,
|
||||
try_refresh_token=True,
|
||||
environ=os.environ):
|
||||
self._token_filename = token_filename
|
||||
self._cert_filename = cert_filename
|
||||
self._environ = environ
|
||||
self._try_refresh_token = try_refresh_token
|
||||
self._token_refresh_period = datetime.timedelta(minutes=1)
|
||||
|
||||
def load_and_set(self, client_configuration=None):
|
||||
try_set_default = False
|
||||
if client_configuration is None:
|
||||
client_configuration = type.__call__(Configuration)
|
||||
try_set_default = True
|
||||
self._load_config()
|
||||
self._set_config(client_configuration)
|
||||
if try_set_default:
|
||||
Configuration.set_default(client_configuration)
|
||||
|
||||
def _load_config(self):
|
||||
if (SERVICE_HOST_ENV_NAME not in self._environ
|
||||
or SERVICE_PORT_ENV_NAME not in self._environ):
|
||||
raise ConfigException("Service host/port is not set.")
|
||||
|
||||
if (not self._environ[SERVICE_HOST_ENV_NAME]
|
||||
or not self._environ[SERVICE_PORT_ENV_NAME]):
|
||||
raise ConfigException("Service host/port is set but empty.")
|
||||
|
||||
self.host = ("https://" +
|
||||
_join_host_port(self._environ[SERVICE_HOST_ENV_NAME],
|
||||
self._environ[SERVICE_PORT_ENV_NAME]))
|
||||
|
||||
if not os.path.isfile(self._token_filename):
|
||||
raise ConfigException("Service token file does not exist.")
|
||||
|
||||
self._read_token_file()
|
||||
|
||||
if not os.path.isfile(self._cert_filename):
|
||||
raise ConfigException(
|
||||
"Service certification file does not exist.")
|
||||
|
||||
with open(self._cert_filename) as f:
|
||||
if not f.read():
|
||||
raise ConfigException("Cert file exists but empty.")
|
||||
|
||||
self.ssl_ca_cert = self._cert_filename
|
||||
|
||||
def _set_config(self, client_configuration):
|
||||
client_configuration.host = self.host
|
||||
client_configuration.ssl_ca_cert = self.ssl_ca_cert
|
||||
if self.token is not None:
|
||||
client_configuration.api_key['authorization'] = self.token
|
||||
if not self._try_refresh_token:
|
||||
return
|
||||
|
||||
def _refresh_api_key(client_configuration):
|
||||
if self.token_expires_at <= datetime.datetime.now():
|
||||
self._read_token_file()
|
||||
self._set_config(client_configuration)
|
||||
|
||||
client_configuration.refresh_api_key_hook = _refresh_api_key
|
||||
|
||||
def _read_token_file(self):
|
||||
with open(self._token_filename) as f:
|
||||
content = f.read()
|
||||
if not content:
|
||||
raise ConfigException("Token file exists but empty.")
|
||||
self.token = "bearer " + content
|
||||
self.token_expires_at = datetime.datetime.now(
|
||||
) + self._token_refresh_period
|
||||
|
||||
|
||||
def load_incluster_config(client_configuration=None, try_refresh_token=True):
|
||||
"""
|
||||
Use the service account kubernetes gives to pods to connect to kubernetes
|
||||
cluster. It's intended for clients that expect to be running inside a pod
|
||||
running on kubernetes. It will raise an exception if called from a process
|
||||
not running in a kubernetes environment."""
|
||||
InClusterConfigLoader(
|
||||
token_filename=SERVICE_TOKEN_FILENAME,
|
||||
cert_filename=SERVICE_CERT_FILENAME,
|
||||
try_refresh_token=try_refresh_token).load_and_set(client_configuration)
|
163
kubernetes/base/config/incluster_config_test.py
Normal file
163
kubernetes/base/config/incluster_config_test.py
Normal file
|
@ -0,0 +1,163 @@
|
|||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import datetime
|
||||
import os
|
||||
import tempfile
|
||||
import time
|
||||
import unittest
|
||||
|
||||
from kubernetes.client import Configuration
|
||||
|
||||
from .config_exception import ConfigException
|
||||
from .incluster_config import (SERVICE_HOST_ENV_NAME, SERVICE_PORT_ENV_NAME,
|
||||
InClusterConfigLoader, _join_host_port)
|
||||
|
||||
_TEST_TOKEN = "temp_token"
|
||||
_TEST_NEW_TOKEN = "temp_new_token"
|
||||
_TEST_CERT = "temp_cert"
|
||||
_TEST_HOST = "127.0.0.1"
|
||||
_TEST_PORT = "80"
|
||||
_TEST_HOST_PORT = "127.0.0.1:80"
|
||||
_TEST_IPV6_HOST = "::1"
|
||||
_TEST_IPV6_HOST_PORT = "[::1]:80"
|
||||
|
||||
_TEST_ENVIRON = {
|
||||
SERVICE_HOST_ENV_NAME: _TEST_HOST,
|
||||
SERVICE_PORT_ENV_NAME: _TEST_PORT
|
||||
}
|
||||
_TEST_IPV6_ENVIRON = {
|
||||
SERVICE_HOST_ENV_NAME: _TEST_IPV6_HOST,
|
||||
SERVICE_PORT_ENV_NAME: _TEST_PORT
|
||||
}
|
||||
|
||||
|
||||
class InClusterConfigTest(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self._temp_files = []
|
||||
|
||||
def tearDown(self):
|
||||
for f in self._temp_files:
|
||||
os.remove(f)
|
||||
|
||||
def _create_file_with_temp_content(self, content=""):
|
||||
handler, name = tempfile.mkstemp()
|
||||
self._temp_files.append(name)
|
||||
os.write(handler, str.encode(content))
|
||||
os.close(handler)
|
||||
return name
|
||||
|
||||
def get_test_loader(self,
|
||||
token_filename=None,
|
||||
cert_filename=None,
|
||||
environ=_TEST_ENVIRON):
|
||||
if not token_filename:
|
||||
token_filename = self._create_file_with_temp_content(_TEST_TOKEN)
|
||||
if not cert_filename:
|
||||
cert_filename = self._create_file_with_temp_content(_TEST_CERT)
|
||||
return InClusterConfigLoader(token_filename=token_filename,
|
||||
cert_filename=cert_filename,
|
||||
try_refresh_token=True,
|
||||
environ=environ)
|
||||
|
||||
def test_join_host_port(self):
|
||||
self.assertEqual(_TEST_HOST_PORT,
|
||||
_join_host_port(_TEST_HOST, _TEST_PORT))
|
||||
self.assertEqual(_TEST_IPV6_HOST_PORT,
|
||||
_join_host_port(_TEST_IPV6_HOST, _TEST_PORT))
|
||||
|
||||
def test_load_config(self):
|
||||
cert_filename = self._create_file_with_temp_content(_TEST_CERT)
|
||||
loader = self.get_test_loader(cert_filename=cert_filename)
|
||||
loader._load_config()
|
||||
self.assertEqual("https://" + _TEST_HOST_PORT, loader.host)
|
||||
self.assertEqual(cert_filename, loader.ssl_ca_cert)
|
||||
self.assertEqual('bearer ' + _TEST_TOKEN, loader.token)
|
||||
|
||||
def test_refresh_token(self):
|
||||
loader = self.get_test_loader()
|
||||
config = Configuration()
|
||||
loader.load_and_set(config)
|
||||
|
||||
self.assertEqual('bearer ' + _TEST_TOKEN,
|
||||
config.get_api_key_with_prefix('authorization'))
|
||||
self.assertEqual('bearer ' + _TEST_TOKEN, loader.token)
|
||||
self.assertIsNotNone(loader.token_expires_at)
|
||||
|
||||
old_token = loader.token
|
||||
old_token_expires_at = loader.token_expires_at
|
||||
loader._token_filename = self._create_file_with_temp_content(
|
||||
_TEST_NEW_TOKEN)
|
||||
self.assertEqual('bearer ' + _TEST_TOKEN,
|
||||
config.get_api_key_with_prefix('authorization'))
|
||||
|
||||
loader.token_expires_at = datetime.datetime.now()
|
||||
self.assertEqual('bearer ' + _TEST_NEW_TOKEN,
|
||||
config.get_api_key_with_prefix('authorization'))
|
||||
self.assertEqual('bearer ' + _TEST_NEW_TOKEN, loader.token)
|
||||
self.assertGreater(loader.token_expires_at, old_token_expires_at)
|
||||
|
||||
def _should_fail_load(self, config_loader, reason):
|
||||
try:
|
||||
config_loader.load_and_set()
|
||||
self.fail("Should fail because %s" % reason)
|
||||
except ConfigException:
|
||||
# expected
|
||||
pass
|
||||
|
||||
def test_no_port(self):
|
||||
loader = self.get_test_loader(
|
||||
environ={SERVICE_HOST_ENV_NAME: _TEST_HOST})
|
||||
self._should_fail_load(loader, "no port specified")
|
||||
|
||||
def test_empty_port(self):
|
||||
loader = self.get_test_loader(environ={
|
||||
SERVICE_HOST_ENV_NAME: _TEST_HOST,
|
||||
SERVICE_PORT_ENV_NAME: ""
|
||||
})
|
||||
self._should_fail_load(loader, "empty port specified")
|
||||
|
||||
def test_no_host(self):
|
||||
loader = self.get_test_loader(
|
||||
environ={SERVICE_PORT_ENV_NAME: _TEST_PORT})
|
||||
self._should_fail_load(loader, "no host specified")
|
||||
|
||||
def test_empty_host(self):
|
||||
loader = self.get_test_loader(environ={
|
||||
SERVICE_HOST_ENV_NAME: "",
|
||||
SERVICE_PORT_ENV_NAME: _TEST_PORT
|
||||
})
|
||||
self._should_fail_load(loader, "empty host specified")
|
||||
|
||||
def test_no_cert_file(self):
|
||||
loader = self.get_test_loader(cert_filename="not_exists_file_1123")
|
||||
self._should_fail_load(loader, "cert file does not exist")
|
||||
|
||||
def test_empty_cert_file(self):
|
||||
loader = self.get_test_loader(
|
||||
cert_filename=self._create_file_with_temp_content())
|
||||
self._should_fail_load(loader, "empty cert file provided")
|
||||
|
||||
def test_no_token_file(self):
|
||||
loader = self.get_test_loader(token_filename="not_exists_file_1123")
|
||||
self._should_fail_load(loader, "token file does not exist")
|
||||
|
||||
def test_empty_token_file(self):
|
||||
loader = self.get_test_loader(
|
||||
token_filename=self._create_file_with_temp_content())
|
||||
self._should_fail_load(loader, "empty token file provided")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
901
kubernetes/base/config/kube_config.py
Normal file
901
kubernetes/base/config/kube_config.py
Normal file
|
@ -0,0 +1,901 @@
|
|||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import atexit
|
||||
import base64
|
||||
import copy
|
||||
import datetime
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import platform
|
||||
import subprocess
|
||||
import tempfile
|
||||
import time
|
||||
from collections import namedtuple
|
||||
|
||||
import google.auth
|
||||
import google.auth.transport.requests
|
||||
import oauthlib.oauth2
|
||||
import urllib3
|
||||
import yaml
|
||||
from requests_oauthlib import OAuth2Session
|
||||
from six import PY3
|
||||
|
||||
from kubernetes.client import ApiClient, Configuration
|
||||
from kubernetes.config.exec_provider import ExecProvider
|
||||
|
||||
from .config_exception import ConfigException
|
||||
from .dateutil import UTC, format_rfc3339, parse_rfc3339
|
||||
|
||||
try:
|
||||
import adal
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
EXPIRY_SKEW_PREVENTION_DELAY = datetime.timedelta(minutes=5)
|
||||
KUBE_CONFIG_DEFAULT_LOCATION = os.environ.get('KUBECONFIG', '~/.kube/config')
|
||||
ENV_KUBECONFIG_PATH_SEPARATOR = ';' if platform.system() == 'Windows' else ':'
|
||||
_temp_files = {}
|
||||
|
||||
|
||||
def _cleanup_temp_files():
|
||||
global _temp_files
|
||||
for temp_file in _temp_files.values():
|
||||
try:
|
||||
os.remove(temp_file)
|
||||
except OSError:
|
||||
pass
|
||||
_temp_files = {}
|
||||
|
||||
|
||||
def _create_temp_file_with_content(content, temp_file_path=None):
|
||||
if len(_temp_files) == 0:
|
||||
atexit.register(_cleanup_temp_files)
|
||||
# Because we may change context several times, try to remember files we
|
||||
# created and reuse them at a small memory cost.
|
||||
content_key = str(content)
|
||||
if content_key in _temp_files:
|
||||
return _temp_files[content_key]
|
||||
if temp_file_path and not os.path.isdir(temp_file_path):
|
||||
os.makedirs(name=temp_file_path)
|
||||
fd, name = tempfile.mkstemp(dir=temp_file_path)
|
||||
os.close(fd)
|
||||
_temp_files[content_key] = name
|
||||
with open(name, 'wb') as fd:
|
||||
fd.write(content.encode() if isinstance(content, str) else content)
|
||||
return name
|
||||
|
||||
|
||||
def _is_expired(expiry):
|
||||
return ((parse_rfc3339(expiry) - EXPIRY_SKEW_PREVENTION_DELAY) <=
|
||||
datetime.datetime.now(tz=UTC))
|
||||
|
||||
|
||||
class FileOrData(object):
|
||||
"""Utility class to read content of obj[%data_key_name] or file's
|
||||
content of obj[%file_key_name] and represent it as file or data.
|
||||
Note that the data is preferred. The obj[%file_key_name] will be used iff
|
||||
obj['%data_key_name'] is not set or empty. Assumption is file content is
|
||||
raw data and data field is base64 string. The assumption can be changed
|
||||
with base64_file_content flag. If set to False, the content of the file
|
||||
will assumed to be base64 and read as is. The default True value will
|
||||
result in base64 encode of the file content after read."""
|
||||
|
||||
def __init__(self, obj, file_key_name, data_key_name=None,
|
||||
file_base_path="", base64_file_content=True,
|
||||
temp_file_path=None):
|
||||
if not data_key_name:
|
||||
data_key_name = file_key_name + "-data"
|
||||
self._file = None
|
||||
self._data = None
|
||||
self._base64_file_content = base64_file_content
|
||||
self._temp_file_path = temp_file_path
|
||||
if not obj:
|
||||
return
|
||||
if data_key_name in obj:
|
||||
self._data = obj[data_key_name]
|
||||
elif file_key_name in obj:
|
||||
self._file = os.path.normpath(
|
||||
os.path.join(file_base_path, obj[file_key_name]))
|
||||
|
||||
def as_file(self):
|
||||
"""If obj[%data_key_name] exists, return name of a file with base64
|
||||
decoded obj[%data_key_name] content otherwise obj[%file_key_name]."""
|
||||
use_data_if_no_file = not self._file and self._data
|
||||
if use_data_if_no_file:
|
||||
if self._base64_file_content:
|
||||
if isinstance(self._data, str):
|
||||
content = self._data.encode()
|
||||
else:
|
||||
content = self._data
|
||||
self._file = _create_temp_file_with_content(
|
||||
base64.standard_b64decode(content), self._temp_file_path)
|
||||
else:
|
||||
self._file = _create_temp_file_with_content(
|
||||
self._data, self._temp_file_path)
|
||||
if self._file and not os.path.isfile(self._file):
|
||||
raise ConfigException("File does not exist: %s" % self._file)
|
||||
return self._file
|
||||
|
||||
def as_data(self):
|
||||
"""If obj[%data_key_name] exists, Return obj[%data_key_name] otherwise
|
||||
base64 encoded string of obj[%file_key_name] file content."""
|
||||
use_file_if_no_data = not self._data and self._file
|
||||
if use_file_if_no_data:
|
||||
with open(self._file) as f:
|
||||
if self._base64_file_content:
|
||||
self._data = bytes.decode(
|
||||
base64.standard_b64encode(str.encode(f.read())))
|
||||
else:
|
||||
self._data = f.read()
|
||||
return self._data
|
||||
|
||||
|
||||
class CommandTokenSource(object):
|
||||
def __init__(self, cmd, args, tokenKey, expiryKey):
|
||||
self._cmd = cmd
|
||||
self._args = args
|
||||
if not tokenKey:
|
||||
self._tokenKey = '{.access_token}'
|
||||
else:
|
||||
self._tokenKey = tokenKey
|
||||
if not expiryKey:
|
||||
self._expiryKey = '{.token_expiry}'
|
||||
else:
|
||||
self._expiryKey = expiryKey
|
||||
|
||||
def token(self):
|
||||
fullCmd = self._cmd + (" ") + " ".join(self._args)
|
||||
process = subprocess.Popen(
|
||||
[self._cmd] + self._args,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
universal_newlines=True)
|
||||
(stdout, stderr) = process.communicate()
|
||||
exit_code = process.wait()
|
||||
if exit_code != 0:
|
||||
msg = 'cmd-path: process returned %d' % exit_code
|
||||
msg += "\nCmd: %s" % fullCmd
|
||||
stderr = stderr.strip()
|
||||
if stderr:
|
||||
msg += '\nStderr: %s' % stderr
|
||||
raise ConfigException(msg)
|
||||
try:
|
||||
data = json.loads(stdout)
|
||||
except ValueError as de:
|
||||
raise ConfigException(
|
||||
'exec: failed to decode process output: %s' % de)
|
||||
A = namedtuple('A', ['token', 'expiry'])
|
||||
return A(
|
||||
token=data['credential']['access_token'],
|
||||
expiry=parse_rfc3339(data['credential']['token_expiry']))
|
||||
|
||||
|
||||
class KubeConfigLoader(object):
|
||||
|
||||
def __init__(self, config_dict, active_context=None,
|
||||
get_google_credentials=None,
|
||||
config_base_path="",
|
||||
config_persister=None,
|
||||
temp_file_path=None):
|
||||
|
||||
if config_dict is None:
|
||||
raise ConfigException(
|
||||
'Invalid kube-config. '
|
||||
'Expected config_dict to not be None.')
|
||||
elif isinstance(config_dict, ConfigNode):
|
||||
self._config = config_dict
|
||||
else:
|
||||
self._config = ConfigNode('kube-config', config_dict)
|
||||
|
||||
self._current_context = None
|
||||
self._user = None
|
||||
self._cluster = None
|
||||
self.set_active_context(active_context)
|
||||
self._config_base_path = config_base_path
|
||||
self._config_persister = config_persister
|
||||
self._temp_file_path = temp_file_path
|
||||
|
||||
def _refresh_credentials_with_cmd_path():
|
||||
config = self._user['auth-provider']['config']
|
||||
cmd = config['cmd-path']
|
||||
if len(cmd) == 0:
|
||||
raise ConfigException(
|
||||
'missing access token cmd '
|
||||
'(cmd-path is an empty string in your kubeconfig file)')
|
||||
if 'scopes' in config and config['scopes'] != "":
|
||||
raise ConfigException(
|
||||
'scopes can only be used '
|
||||
'when kubectl is using a gcp service account key')
|
||||
args = []
|
||||
if 'cmd-args' in config:
|
||||
args = config['cmd-args'].split()
|
||||
else:
|
||||
fields = config['cmd-path'].split()
|
||||
cmd = fields[0]
|
||||
args = fields[1:]
|
||||
|
||||
commandTokenSource = CommandTokenSource(
|
||||
cmd, args,
|
||||
config.safe_get('token-key'),
|
||||
config.safe_get('expiry-key'))
|
||||
return commandTokenSource.token()
|
||||
|
||||
def _refresh_credentials():
|
||||
# Refresh credentials using cmd-path
|
||||
if ('auth-provider' in self._user and
|
||||
'config' in self._user['auth-provider'] and
|
||||
'cmd-path' in self._user['auth-provider']['config']):
|
||||
return _refresh_credentials_with_cmd_path()
|
||||
|
||||
credentials, project_id = google.auth.default(scopes=[
|
||||
'https://www.googleapis.com/auth/cloud-platform',
|
||||
'https://www.googleapis.com/auth/userinfo.email'
|
||||
])
|
||||
request = google.auth.transport.requests.Request()
|
||||
credentials.refresh(request)
|
||||
return credentials
|
||||
|
||||
if get_google_credentials:
|
||||
self._get_google_credentials = get_google_credentials
|
||||
else:
|
||||
self._get_google_credentials = _refresh_credentials
|
||||
|
||||
def set_active_context(self, context_name=None):
|
||||
if context_name is None:
|
||||
context_name = self._config['current-context']
|
||||
self._current_context = self._config['contexts'].get_with_name(
|
||||
context_name)
|
||||
if (self._current_context['context'].safe_get('user') and
|
||||
self._config.safe_get('users')):
|
||||
user = self._config['users'].get_with_name(
|
||||
self._current_context['context']['user'], safe=True)
|
||||
if user:
|
||||
self._user = user['user']
|
||||
else:
|
||||
self._user = None
|
||||
else:
|
||||
self._user = None
|
||||
self._cluster = self._config['clusters'].get_with_name(
|
||||
self._current_context['context']['cluster'])['cluster']
|
||||
|
||||
def _load_authentication(self):
|
||||
"""Read authentication from kube-config user section if exists.
|
||||
|
||||
This function goes through various authentication methods in user
|
||||
section of kube-config and stops if it finds a valid authentication
|
||||
method. The order of authentication methods is:
|
||||
|
||||
1. auth-provider (gcp, azure, oidc)
|
||||
2. token field (point to a token file)
|
||||
3. exec provided plugin
|
||||
4. username/password
|
||||
"""
|
||||
if not self._user:
|
||||
return
|
||||
if self._load_auth_provider_token():
|
||||
return
|
||||
if self._load_user_token():
|
||||
return
|
||||
if self._load_from_exec_plugin():
|
||||
return
|
||||
self._load_user_pass_token()
|
||||
|
||||
def _load_auth_provider_token(self):
|
||||
if 'auth-provider' not in self._user:
|
||||
return
|
||||
provider = self._user['auth-provider']
|
||||
if 'name' not in provider:
|
||||
return
|
||||
if provider['name'] == 'gcp':
|
||||
return self._load_gcp_token(provider)
|
||||
if provider['name'] == 'azure':
|
||||
return self._load_azure_token(provider)
|
||||
if provider['name'] == 'oidc':
|
||||
return self._load_oid_token(provider)
|
||||
|
||||
def _azure_is_expired(self, provider):
|
||||
expires_on = provider['config']['expires-on']
|
||||
if expires_on.isdigit():
|
||||
return int(expires_on) < time.time()
|
||||
else:
|
||||
exp_time = time.strptime(expires_on, '%Y-%m-%d %H:%M:%S.%f')
|
||||
return exp_time < time.gmtime()
|
||||
|
||||
def _load_azure_token(self, provider):
|
||||
if 'config' not in provider:
|
||||
return
|
||||
if 'access-token' not in provider['config']:
|
||||
return
|
||||
if 'expires-on' in provider['config']:
|
||||
if self._azure_is_expired(provider):
|
||||
self._refresh_azure_token(provider['config'])
|
||||
self.token = 'Bearer %s' % provider['config']['access-token']
|
||||
return self.token
|
||||
|
||||
def _refresh_azure_token(self, config):
|
||||
if 'adal' not in globals():
|
||||
raise ImportError('refresh token error, adal library not imported')
|
||||
|
||||
tenant = config['tenant-id']
|
||||
authority = 'https://login.microsoftonline.com/{}'.format(tenant)
|
||||
context = adal.AuthenticationContext(
|
||||
authority, validate_authority=True, api_version='1.0'
|
||||
)
|
||||
refresh_token = config['refresh-token']
|
||||
client_id = config['client-id']
|
||||
apiserver_id = '00000002-0000-0000-c000-000000000000'
|
||||
try:
|
||||
apiserver_id = config['apiserver-id']
|
||||
except ConfigException:
|
||||
# We've already set a default above
|
||||
pass
|
||||
token_response = context.acquire_token_with_refresh_token(
|
||||
refresh_token, client_id, apiserver_id)
|
||||
|
||||
provider = self._user['auth-provider']['config']
|
||||
provider.value['access-token'] = token_response['accessToken']
|
||||
provider.value['expires-on'] = token_response['expiresOn']
|
||||
if self._config_persister:
|
||||
self._config_persister()
|
||||
|
||||
def _load_gcp_token(self, provider):
|
||||
if (('config' not in provider) or
|
||||
('access-token' not in provider['config']) or
|
||||
('expiry' in provider['config'] and
|
||||
_is_expired(provider['config']['expiry']))):
|
||||
# token is not available or expired, refresh it
|
||||
self._refresh_gcp_token()
|
||||
|
||||
self.token = "Bearer %s" % provider['config']['access-token']
|
||||
if 'expiry' in provider['config']:
|
||||
self.expiry = parse_rfc3339(provider['config']['expiry'])
|
||||
return self.token
|
||||
|
||||
def _refresh_gcp_token(self):
|
||||
if 'config' not in self._user['auth-provider']:
|
||||
self._user['auth-provider'].value['config'] = {}
|
||||
provider = self._user['auth-provider']['config']
|
||||
credentials = self._get_google_credentials()
|
||||
provider.value['access-token'] = credentials.token
|
||||
provider.value['expiry'] = format_rfc3339(credentials.expiry)
|
||||
if self._config_persister:
|
||||
self._config_persister()
|
||||
|
||||
def _load_oid_token(self, provider):
|
||||
if 'config' not in provider:
|
||||
return
|
||||
|
||||
reserved_characters = frozenset(["=", "+", "/"])
|
||||
token = provider['config']['id-token']
|
||||
|
||||
if any(char in token for char in reserved_characters):
|
||||
# Invalid jwt, as it contains url-unsafe chars
|
||||
return
|
||||
|
||||
parts = token.split('.')
|
||||
if len(parts) != 3: # Not a valid JWT
|
||||
return
|
||||
|
||||
padding = (4 - len(parts[1]) % 4) * '='
|
||||
if len(padding) == 3:
|
||||
# According to spec, 3 padding characters cannot occur
|
||||
# in a valid jwt
|
||||
# https://tools.ietf.org/html/rfc7515#appendix-C
|
||||
return
|
||||
|
||||
if PY3:
|
||||
jwt_attributes = json.loads(
|
||||
base64.urlsafe_b64decode(parts[1] + padding).decode('utf-8')
|
||||
)
|
||||
else:
|
||||
jwt_attributes = json.loads(
|
||||
base64.b64decode(parts[1] + padding)
|
||||
)
|
||||
|
||||
expire = jwt_attributes.get('exp')
|
||||
|
||||
if ((expire is not None) and
|
||||
(_is_expired(datetime.datetime.fromtimestamp(expire,
|
||||
tz=UTC)))):
|
||||
self._refresh_oidc(provider)
|
||||
|
||||
if self._config_persister:
|
||||
self._config_persister()
|
||||
|
||||
self.token = "Bearer %s" % provider['config']['id-token']
|
||||
|
||||
return self.token
|
||||
|
||||
def _refresh_oidc(self, provider):
|
||||
config = Configuration()
|
||||
|
||||
if 'idp-certificate-authority-data' in provider['config']:
|
||||
ca_cert = tempfile.NamedTemporaryFile(delete=True)
|
||||
|
||||
if PY3:
|
||||
cert = base64.b64decode(
|
||||
provider['config']['idp-certificate-authority-data']
|
||||
).decode('utf-8')
|
||||
else:
|
||||
cert = base64.b64decode(
|
||||
provider['config']['idp-certificate-authority-data'] + "=="
|
||||
)
|
||||
|
||||
with open(ca_cert.name, 'w') as fh:
|
||||
fh.write(cert)
|
||||
|
||||
config.ssl_ca_cert = ca_cert.name
|
||||
|
||||
elif 'idp-certificate-authority' in provider['config']:
|
||||
config.ssl_ca_cert = provider['config']['idp-certificate-authority']
|
||||
|
||||
else:
|
||||
config.verify_ssl = False
|
||||
|
||||
client = ApiClient(configuration=config)
|
||||
|
||||
response = client.request(
|
||||
method="GET",
|
||||
url="%s/.well-known/openid-configuration"
|
||||
% provider['config']['idp-issuer-url']
|
||||
)
|
||||
|
||||
if response.status != 200:
|
||||
return
|
||||
|
||||
response = json.loads(response.data)
|
||||
|
||||
request = OAuth2Session(
|
||||
client_id=provider['config']['client-id'],
|
||||
token=provider['config']['refresh-token'],
|
||||
auto_refresh_kwargs={
|
||||
'client_id': provider['config']['client-id'],
|
||||
'client_secret': provider['config']['client-secret']
|
||||
},
|
||||
auto_refresh_url=response['token_endpoint']
|
||||
)
|
||||
|
||||
try:
|
||||
refresh = request.refresh_token(
|
||||
token_url=response['token_endpoint'],
|
||||
refresh_token=provider['config']['refresh-token'],
|
||||
auth=(provider['config']['client-id'],
|
||||
provider['config']['client-secret']),
|
||||
verify=config.ssl_ca_cert if config.verify_ssl else None
|
||||
)
|
||||
except oauthlib.oauth2.rfc6749.errors.InvalidClientIdError:
|
||||
return
|
||||
|
||||
provider['config'].value['id-token'] = refresh['id_token']
|
||||
provider['config'].value['refresh-token'] = refresh['refresh_token']
|
||||
|
||||
def _load_from_exec_plugin(self):
|
||||
if 'exec' not in self._user:
|
||||
return
|
||||
try:
|
||||
base_path = self._get_base_path(self._cluster.path)
|
||||
status = ExecProvider(self._user['exec'], base_path, self._cluster).run()
|
||||
if 'token' in status:
|
||||
self.token = "Bearer %s" % status['token']
|
||||
elif 'clientCertificateData' in status:
|
||||
# https://kubernetes.io/docs/reference/access-authn-authz/authentication/#input-and-output-formats
|
||||
# Plugin has provided certificates instead of a token.
|
||||
if 'clientKeyData' not in status:
|
||||
logging.error('exec: missing clientKeyData field in '
|
||||
'plugin output')
|
||||
return None
|
||||
self.cert_file = FileOrData(
|
||||
status, None,
|
||||
data_key_name='clientCertificateData',
|
||||
file_base_path=base_path,
|
||||
base64_file_content=False,
|
||||
temp_file_path=self._temp_file_path).as_file()
|
||||
self.key_file = FileOrData(
|
||||
status, None,
|
||||
data_key_name='clientKeyData',
|
||||
file_base_path=base_path,
|
||||
base64_file_content=False,
|
||||
temp_file_path=self._temp_file_path).as_file()
|
||||
else:
|
||||
logging.error('exec: missing token or clientCertificateData '
|
||||
'field in plugin output')
|
||||
return None
|
||||
if 'expirationTimestamp' in status:
|
||||
self.expiry = parse_rfc3339(status['expirationTimestamp'])
|
||||
return True
|
||||
except Exception as e:
|
||||
logging.error(str(e))
|
||||
|
||||
def _load_user_token(self):
|
||||
base_path = self._get_base_path(self._user.path)
|
||||
token = FileOrData(
|
||||
self._user, 'tokenFile', 'token',
|
||||
file_base_path=base_path,
|
||||
base64_file_content=False,
|
||||
temp_file_path=self._temp_file_path).as_data()
|
||||
if token:
|
||||
self.token = "Bearer %s" % token
|
||||
return True
|
||||
|
||||
def _load_user_pass_token(self):
|
||||
if 'username' in self._user and 'password' in self._user:
|
||||
self.token = urllib3.util.make_headers(
|
||||
basic_auth=(self._user['username'] + ':' +
|
||||
self._user['password'])).get('authorization')
|
||||
return True
|
||||
|
||||
def _get_base_path(self, config_path):
|
||||
if self._config_base_path is not None:
|
||||
return self._config_base_path
|
||||
if config_path is not None:
|
||||
return os.path.abspath(os.path.dirname(config_path))
|
||||
return ""
|
||||
|
||||
def _load_cluster_info(self):
|
||||
if 'server' in self._cluster:
|
||||
self.host = self._cluster['server'].rstrip('/')
|
||||
if self.host.startswith("https"):
|
||||
base_path = self._get_base_path(self._cluster.path)
|
||||
self.ssl_ca_cert = FileOrData(
|
||||
self._cluster, 'certificate-authority',
|
||||
file_base_path=base_path,
|
||||
temp_file_path=self._temp_file_path).as_file()
|
||||
if 'cert_file' not in self.__dict__:
|
||||
# cert_file could have been provided by
|
||||
# _load_from_exec_plugin; only load from the _user
|
||||
# section if we need it.
|
||||
self.cert_file = FileOrData(
|
||||
self._user, 'client-certificate',
|
||||
file_base_path=base_path,
|
||||
temp_file_path=self._temp_file_path).as_file()
|
||||
self.key_file = FileOrData(
|
||||
self._user, 'client-key',
|
||||
file_base_path=base_path,
|
||||
temp_file_path=self._temp_file_path).as_file()
|
||||
if 'insecure-skip-tls-verify' in self._cluster:
|
||||
self.verify_ssl = not self._cluster['insecure-skip-tls-verify']
|
||||
if 'tls-server-name' in self._cluster:
|
||||
self.tls_server_name = self._cluster['tls-server-name']
|
||||
|
||||
def _set_config(self, client_configuration):
|
||||
if 'token' in self.__dict__:
|
||||
client_configuration.api_key['authorization'] = self.token
|
||||
|
||||
def _refresh_api_key(client_configuration):
|
||||
if ('expiry' in self.__dict__ and _is_expired(self.expiry)):
|
||||
self._load_authentication()
|
||||
self._set_config(client_configuration)
|
||||
client_configuration.refresh_api_key_hook = _refresh_api_key
|
||||
# copy these keys directly from self to configuration object
|
||||
keys = ['host', 'ssl_ca_cert', 'cert_file', 'key_file', 'verify_ssl','tls_server_name']
|
||||
for key in keys:
|
||||
if key in self.__dict__:
|
||||
setattr(client_configuration, key, getattr(self, key))
|
||||
|
||||
def load_and_set(self, client_configuration):
|
||||
self._load_authentication()
|
||||
self._load_cluster_info()
|
||||
self._set_config(client_configuration)
|
||||
|
||||
def list_contexts(self):
|
||||
return [context.value for context in self._config['contexts']]
|
||||
|
||||
@property
|
||||
def current_context(self):
|
||||
return self._current_context.value
|
||||
|
||||
|
||||
class ConfigNode(object):
|
||||
"""Remembers each config key's path and construct a relevant exception
|
||||
message in case of missing keys. The assumption is all access keys are
|
||||
present in a well-formed kube-config."""
|
||||
|
||||
def __init__(self, name, value, path=None):
|
||||
self.name = name
|
||||
self.value = value
|
||||
self.path = path
|
||||
|
||||
def __contains__(self, key):
|
||||
return key in self.value
|
||||
|
||||
def __len__(self):
|
||||
return len(self.value)
|
||||
|
||||
def safe_get(self, key):
|
||||
if (isinstance(self.value, list) and isinstance(key, int) or
|
||||
key in self.value):
|
||||
return self.value[key]
|
||||
|
||||
def __getitem__(self, key):
|
||||
v = self.safe_get(key)
|
||||
if v is None:
|
||||
raise ConfigException(
|
||||
'Invalid kube-config file. Expected key %s in %s'
|
||||
% (key, self.name))
|
||||
if isinstance(v, dict) or isinstance(v, list):
|
||||
return ConfigNode('%s/%s' % (self.name, key), v, self.path)
|
||||
else:
|
||||
return v
|
||||
|
||||
def get_with_name(self, name, safe=False):
|
||||
if not isinstance(self.value, list):
|
||||
raise ConfigException(
|
||||
'Invalid kube-config file. Expected %s to be a list'
|
||||
% self.name)
|
||||
result = None
|
||||
for v in self.value:
|
||||
if 'name' not in v:
|
||||
raise ConfigException(
|
||||
'Invalid kube-config file. '
|
||||
'Expected all values in %s list to have \'name\' key'
|
||||
% self.name)
|
||||
if v['name'] == name:
|
||||
if result is None:
|
||||
result = v
|
||||
else:
|
||||
raise ConfigException(
|
||||
'Invalid kube-config file. '
|
||||
'Expected only one object with name %s in %s list'
|
||||
% (name, self.name))
|
||||
if result is not None:
|
||||
if isinstance(result, ConfigNode):
|
||||
return result
|
||||
else:
|
||||
return ConfigNode(
|
||||
'%s[name=%s]' %
|
||||
(self.name, name), result, self.path)
|
||||
if safe:
|
||||
return None
|
||||
raise ConfigException(
|
||||
'Invalid kube-config file. '
|
||||
'Expected object with name %s in %s list' % (name, self.name))
|
||||
|
||||
|
||||
class KubeConfigMerger:
|
||||
|
||||
"""Reads and merges configuration from one or more kube-config's.
|
||||
The property `config` can be passed to the KubeConfigLoader as config_dict.
|
||||
|
||||
It uses a path attribute from ConfigNode to store the path to kubeconfig.
|
||||
This path is required to load certs from relative paths.
|
||||
|
||||
A method `save_changes` updates changed kubeconfig's (it compares current
|
||||
state of dicts with).
|
||||
"""
|
||||
|
||||
def __init__(self, paths):
|
||||
self.paths = []
|
||||
self.config_files = {}
|
||||
self.config_merged = None
|
||||
if hasattr(paths, 'read'):
|
||||
self._load_config_from_file_like_object(paths)
|
||||
else:
|
||||
self._load_config_from_file_path(paths)
|
||||
|
||||
@property
|
||||
def config(self):
|
||||
return self.config_merged
|
||||
|
||||
def _load_config_from_file_like_object(self, string):
|
||||
if hasattr(string, 'getvalue'):
|
||||
config = yaml.safe_load(string.getvalue())
|
||||
else:
|
||||
config = yaml.safe_load(string.read())
|
||||
|
||||
if config is None:
|
||||
raise ConfigException(
|
||||
'Invalid kube-config.')
|
||||
if self.config_merged is None:
|
||||
self.config_merged = copy.deepcopy(config)
|
||||
# doesn't need to do any further merging
|
||||
|
||||
def _load_config_from_file_path(self, string):
|
||||
for path in string.split(ENV_KUBECONFIG_PATH_SEPARATOR):
|
||||
if path:
|
||||
path = os.path.expanduser(path)
|
||||
if os.path.exists(path):
|
||||
self.paths.append(path)
|
||||
self.load_config(path)
|
||||
self.config_saved = copy.deepcopy(self.config_files)
|
||||
|
||||
def load_config(self, path):
|
||||
with open(path) as f:
|
||||
config = yaml.safe_load(f)
|
||||
|
||||
if config is None:
|
||||
raise ConfigException(
|
||||
'Invalid kube-config. '
|
||||
'%s file is empty' % path)
|
||||
|
||||
if self.config_merged is None:
|
||||
config_merged = copy.deepcopy(config)
|
||||
for item in ('clusters', 'contexts', 'users'):
|
||||
config_merged[item] = []
|
||||
self.config_merged = ConfigNode(path, config_merged, path)
|
||||
for item in ('clusters', 'contexts', 'users'):
|
||||
self._merge(item, config.get(item, []) or [], path)
|
||||
|
||||
if 'current-context' in config:
|
||||
self.config_merged.value['current-context'] = config['current-context']
|
||||
|
||||
self.config_files[path] = config
|
||||
|
||||
def _merge(self, item, add_cfg, path):
|
||||
for new_item in add_cfg:
|
||||
for exists in self.config_merged.value[item]:
|
||||
if exists['name'] == new_item['name']:
|
||||
break
|
||||
else:
|
||||
self.config_merged.value[item].append(ConfigNode(
|
||||
'{}/{}'.format(path, new_item), new_item, path))
|
||||
|
||||
def save_changes(self):
|
||||
for path in self.paths:
|
||||
if self.config_saved[path] != self.config_files[path]:
|
||||
self.save_config(path)
|
||||
self.config_saved = copy.deepcopy(self.config_files)
|
||||
|
||||
def save_config(self, path):
|
||||
with open(path, 'w') as f:
|
||||
yaml.safe_dump(self.config_files[path], f,
|
||||
default_flow_style=False)
|
||||
|
||||
|
||||
def _get_kube_config_loader_for_yaml_file(
|
||||
filename, persist_config=False, **kwargs):
|
||||
return _get_kube_config_loader(
|
||||
filename=filename,
|
||||
persist_config=persist_config,
|
||||
**kwargs)
|
||||
|
||||
|
||||
def _get_kube_config_loader(
|
||||
filename=None,
|
||||
config_dict=None,
|
||||
persist_config=False,
|
||||
**kwargs):
|
||||
if config_dict is None:
|
||||
kcfg = KubeConfigMerger(filename)
|
||||
if persist_config and 'config_persister' not in kwargs:
|
||||
kwargs['config_persister'] = kcfg.save_changes
|
||||
|
||||
if kcfg.config is None:
|
||||
raise ConfigException(
|
||||
'Invalid kube-config file. '
|
||||
'No configuration found.')
|
||||
return KubeConfigLoader(
|
||||
config_dict=kcfg.config,
|
||||
config_base_path=None,
|
||||
**kwargs)
|
||||
else:
|
||||
return KubeConfigLoader(
|
||||
config_dict=config_dict,
|
||||
config_base_path=None,
|
||||
**kwargs)
|
||||
|
||||
|
||||
def list_kube_config_contexts(config_file=None):
|
||||
|
||||
if config_file is None:
|
||||
config_file = KUBE_CONFIG_DEFAULT_LOCATION
|
||||
|
||||
loader = _get_kube_config_loader(filename=config_file)
|
||||
return loader.list_contexts(), loader.current_context
|
||||
|
||||
|
||||
def load_kube_config(config_file=None, context=None,
|
||||
client_configuration=None,
|
||||
persist_config=True,
|
||||
temp_file_path=None):
|
||||
"""Loads authentication and cluster information from kube-config file
|
||||
and stores them in kubernetes.client.configuration.
|
||||
|
||||
:param config_file: Name of the kube-config file.
|
||||
:param context: set the active context. If is set to None, current_context
|
||||
from config file will be used.
|
||||
:param client_configuration: The kubernetes.client.Configuration to
|
||||
set configs to.
|
||||
:param persist_config: If True, config file will be updated when changed
|
||||
(e.g GCP token refresh).
|
||||
:param temp_file_path: store temp files path.
|
||||
"""
|
||||
|
||||
if config_file is None:
|
||||
config_file = KUBE_CONFIG_DEFAULT_LOCATION
|
||||
|
||||
loader = _get_kube_config_loader(
|
||||
filename=config_file, active_context=context,
|
||||
persist_config=persist_config,
|
||||
temp_file_path=temp_file_path)
|
||||
|
||||
if client_configuration is None:
|
||||
config = type.__call__(Configuration)
|
||||
loader.load_and_set(config)
|
||||
Configuration.set_default(config)
|
||||
else:
|
||||
loader.load_and_set(client_configuration)
|
||||
|
||||
|
||||
def load_kube_config_from_dict(config_dict, context=None,
|
||||
client_configuration=None,
|
||||
persist_config=True,
|
||||
temp_file_path=None):
|
||||
"""Loads authentication and cluster information from config_dict file
|
||||
and stores them in kubernetes.client.configuration.
|
||||
|
||||
:param config_dict: Takes the config file as a dict.
|
||||
:param context: set the active context. If is set to None, current_context
|
||||
from config file will be used.
|
||||
:param client_configuration: The kubernetes.client.Configuration to
|
||||
set configs to.
|
||||
:param persist_config: If True, config file will be updated when changed
|
||||
(e.g GCP token refresh).
|
||||
:param temp_file_path: store temp files path.
|
||||
"""
|
||||
if config_dict is None:
|
||||
raise ConfigException(
|
||||
'Invalid kube-config dict. '
|
||||
'No configuration found.')
|
||||
|
||||
loader = _get_kube_config_loader(
|
||||
config_dict=config_dict, active_context=context,
|
||||
persist_config=persist_config,
|
||||
temp_file_path=temp_file_path)
|
||||
|
||||
if client_configuration is None:
|
||||
config = type.__call__(Configuration)
|
||||
loader.load_and_set(config)
|
||||
Configuration.set_default(config)
|
||||
else:
|
||||
loader.load_and_set(client_configuration)
|
||||
|
||||
|
||||
def new_client_from_config(
|
||||
config_file=None,
|
||||
context=None,
|
||||
persist_config=True,
|
||||
client_configuration=None):
|
||||
"""
|
||||
Loads configuration the same as load_kube_config but returns an ApiClient
|
||||
to be used with any API object. This will allow the caller to concurrently
|
||||
talk with multiple clusters.
|
||||
"""
|
||||
if client_configuration is None:
|
||||
client_configuration = type.__call__(Configuration)
|
||||
load_kube_config(config_file=config_file, context=context,
|
||||
client_configuration=client_configuration,
|
||||
persist_config=persist_config)
|
||||
return ApiClient(configuration=client_configuration)
|
||||
|
||||
|
||||
def new_client_from_config_dict(
|
||||
config_dict=None,
|
||||
context=None,
|
||||
persist_config=True,
|
||||
temp_file_path=None,
|
||||
client_configuration=None):
|
||||
"""
|
||||
Loads configuration the same as load_kube_config_from_dict but returns an ApiClient
|
||||
to be used with any API object. This will allow the caller to concurrently
|
||||
talk with multiple clusters.
|
||||
"""
|
||||
if client_configuration is None:
|
||||
client_configuration = type.__call__(Configuration)
|
||||
load_kube_config_from_dict(config_dict=config_dict, context=context,
|
||||
client_configuration=client_configuration,
|
||||
persist_config=persist_config,
|
||||
temp_file_path=temp_file_path)
|
||||
return ApiClient(configuration=client_configuration)
|
1915
kubernetes/base/config/kube_config_test.py
Normal file
1915
kubernetes/base/config/kube_config_test.py
Normal file
File diff suppressed because it is too large
Load diff
Reference in a new issue