Skip to content
This repository has been archived by the owner on Dec 15, 2021. It is now read-only.

Commit

Permalink
Return proper error (#312)
Browse files Browse the repository at this point in the history
* Return proper error

* Avoid to redefine err. Add integration test for updating

* Do not use temporary files

* Wait until the updated pod is ready

* Fix condition

* Increase timeout and debug info

* Increase debug

* Wait in two steps

* Change matching expression

* Remove debug code

* Increase timeout

* Add debug

* Redirect debug info

* Change approach

* Return to initial state

* Move update test

* Fix RBAC roles permission. Restore order and approach

* Remove unnecessary code
  • Loading branch information
andresmgot authored and sebgoa committed Sep 26, 2017
1 parent acb60a1 commit 6bbb7d1
Show file tree
Hide file tree
Showing 5 changed files with 40 additions and 10 deletions.
7 changes: 7 additions & 0 deletions examples/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,13 @@ get-python:
get-python-verify:
kubeless function call get-python |egrep hello.world

get-python-update:
printf 'def foo():\n%4sreturn "hello world updated"\n' | kubeless function update get-python --runtime python2.7 --handler helloget.foo --from-file /dev/stdin
echo "curl localhost:8080/api/v1/proxy/namespaces/default/services/get-python/"

get-python-update-verify:
kubeless function call get-python |egrep hello.world.updated

get-python-34:
kubeless function deploy get-python --trigger-http --runtime python3.4 --handler helloget.foo --from-file python/helloget.py
echo "curl localhost:8080/api/v1/proxy/namespaces/default/services/get-python/"
Expand Down
2 changes: 1 addition & 1 deletion kubeless-rbac.jsonnet
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ local kubeless = import "kubeless.jsonnet";
local controller_account = kubeless.controller_account;
local controller_roles = [{
apiGroups: ["*"],
resources: ["services", "deployments", "functions", "configmaps"],
resources: ["services", "deployments", "functions", "configmaps", "pods", "replicasets"],
verbs: ["*"]
}];

Expand Down
22 changes: 13 additions & 9 deletions pkg/utils/k8sutil.go
Original file line number Diff line number Diff line change
Expand Up @@ -776,7 +776,8 @@ func ensureFuncConfigMap(client kubernetes.Interface, funcObj *spec.Function, or

_, err = client.Core().ConfigMaps(funcObj.Metadata.Namespace).Create(configMap)
if err != nil && k8sErrors.IsAlreadyExists(err) {
data, err := json.Marshal(configMap)
var data []byte
data, err = json.Marshal(configMap)
if err != nil {
return err
}
Expand Down Expand Up @@ -807,12 +808,12 @@ func ensureFuncService(client kubernetes.Interface, funcObj *spec.Function, or [
}
_, err := client.Core().Services(funcObj.Metadata.Namespace).Create(svc)
if err != nil && k8sErrors.IsAlreadyExists(err) {
data, err := json.Marshal(svc)
var data []byte
data, err = json.Marshal(svc)
if err != nil {
return err
}
_, err = client.Core().Services(funcObj.Metadata.Namespace).Patch(svc.Name, types.StrategicMergePatchType, data)

}
return err
}
Expand Down Expand Up @@ -959,7 +960,8 @@ func ensureFuncDeployment(client kubernetes.Interface, funcObj *spec.Function, o

_, err = client.Extensions().Deployments(funcObj.Metadata.Namespace).Create(dpm)
if err != nil && k8sErrors.IsAlreadyExists(err) {
data, err := json.Marshal(dpm)
var data []byte
data, err = json.Marshal(dpm)
if err != nil {
return err
}
Expand All @@ -971,7 +973,11 @@ func ensureFuncDeployment(client kubernetes.Interface, funcObj *spec.Function, o
// kick existing function pods then it will be recreated
// with the new data mount from updated configmap.
// TODO: This is a workaround. Do something better.
pods, err := GetPodsByLabel(client, funcObj.Metadata.Namespace, "function", funcObj.Metadata.Name)
var pods *v1.PodList
pods, err = GetPodsByLabel(client, funcObj.Metadata.Namespace, "function", funcObj.Metadata.Name)
if err != nil {
return err
}
for _, pod := range pods.Items {
err = client.Core().Pods(funcObj.Metadata.Namespace).Delete(pod.Name, &metav1.DeleteOptions{})
if err != nil && !k8sErrors.IsNotFound(err) {
Expand Down Expand Up @@ -1014,14 +1020,12 @@ func ensureFuncJob(client kubernetes.Interface, funcObj *spec.Function, or []met

_, err := client.BatchV2alpha1().CronJobs(funcObj.Metadata.Namespace).Create(job)
if err != nil && k8sErrors.IsAlreadyExists(err) {
data, err := json.Marshal(job)
var data []byte
data, err = json.Marshal(job)
if err != nil {
return err
}
_, err = client.BatchV2alpha1().CronJobs(funcObj.Metadata.Namespace).Patch(job.Name, types.StrategicMergePatchType, data)
if err != nil {
return err
}
}

return err
Expand Down
18 changes: 18 additions & 0 deletions script/libtest.bash
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,16 @@ k8s_wait_for_pod_ready() {
sleep 1
done
}
k8s_wait_for_uniq_pod() {
echo_info "Waiting for pod '${@}' to be the only one running ... "
local -i cnt=${TEST_MAX_WAIT_SEC:?}
until [[ $(kubectl get pod "${@}" -ogo-template='{{.items|len}}') == 1 ]]; do
((cnt=cnt-1)) || return 1
sleep 1
done
k8s_wait_for_pod_ready "${@}"
echo "Finished waiting"
}
k8s_wait_for_pod_gone() {
echo_info "Waiting for pod '${@}' to be gone ... "
local -i cnt=${TEST_MAX_WAIT_SEC:?}
Expand Down Expand Up @@ -243,4 +253,12 @@ test_kubeless_function() {
esac
make -sC examples ${func}-verify
}
test_kubeless_function_update() {
local func=${1:?} func_topic
echo_info "UPDATE: $func"
make -sC examples ${func}-update
k8s_wait_for_uniq_pod -l function=${func}
make -sC examples ${func}-update-verify
}
# vim: sw=4 ts=4 et si
1 change: 1 addition & 0 deletions tests/integration-tests.bats
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ load ../script/libtest
# 'bats' lacks loop support, unroll-them-all ->
@test "Test function: get-python" {
test_kubeless_function get-python
test_kubeless_function_update get-python
}
@test "Test function: get-nodejs" {
test_kubeless_function get-nodejs
Expand Down

0 comments on commit 6bbb7d1

Please sign in to comment.