1
0
mirror of https://github.com/SAP/jenkins-library.git synced 2024-12-14 11:03:09 +02:00
sap-jenkins-library/vars/dockerExecuteOnKubernetes.groovy
Oliver Nocon 7a961ef38e
seleniumExecuteTests - add step to run Selenium tests (#318)
It comes with an extension to executeDocker and executeDockerOnKubernetes to run sidecar containers.

This helps to execute Selenium tests using two Docker images:

1. Execution runtime for tests (e.g. node image)
2. Selenium instance which holds Selenium server + browser

* add documentation & some name cleanup
* include PR feedback
* add step documentation to structure
2018-10-04 17:06:42 +02:00

203 lines
7.7 KiB
Groovy

import com.sap.piper.ConfigurationHelper
import com.sap.piper.JenkinsUtils
import com.sap.piper.k8s.SystemEnv
import groovy.transform.Field
import hudson.AbortException
@Field def STEP_NAME = 'dockerExecuteOnKubernetes'
@Field def PLUGIN_ID_KUBERNETES = 'kubernetes'
@Field Set GENERAL_CONFIG_KEYS = ['jenkinsKubernetes']
@Field Set PARAMETER_KEYS = [
'containerCommands', //specify start command for containers to overwrite Piper default (`/usr/bin/tail -f /dev/null`). If container's defaultstart command should be used provide empty string like: `['selenium/standalone-chrome': '']`
'containerEnvVars', //specify environment variables per container. If not provided dockerEnvVars will be used
'containerMap', //specify multiple images which then form a kubernetes pod, example: containerMap: ['maven:3.5-jdk-8-alpine': 'mavenexecute','selenium/standalone-chrome': 'selenium']
'containerName', //optional configuration in combination with containerMap to define the container where the commands should be executed in
'containerPortMappings', //map which defines per docker image the port mappings, like containerPortMappings: ['selenium/standalone-chrome': [[name: 'selPort', containerPort: 4444, hostPort: 4444]]]
'containerWorkspaces', //specify workspace (=home directory of user) per container. If not provided dockerWorkspace will be used. If empty, home directory will not be set.
'dockerImage',
'dockerWorkspace',
'dockerEnvVars'
]
@Field Set STEP_CONFIG_KEYS = PARAMETER_KEYS.plus(['stashIncludes', 'stashExcludes'])
void call(Map parameters = [:], body) {
handlePipelineStepErrors(stepName: STEP_NAME, stepParameters: parameters) {
if (!JenkinsUtils.isPluginActive(PLUGIN_ID_KUBERNETES)) {
error("[ERROR][${STEP_NAME}] not supported. Plugin '${PLUGIN_ID_KUBERNETES}' is not installed or not active.")
}
final script = parameters.script
if (script == null)
script = [commonPipelineEnvironment: commonPipelineEnvironment]
ConfigurationHelper configHelper = ConfigurationHelper
.loadStepDefaults(this)
.mixinGeneralConfig(script.commonPipelineEnvironment, GENERAL_CONFIG_KEYS)
.mixinStepConfig(script.commonPipelineEnvironment, STEP_CONFIG_KEYS)
.mixinStageConfig(script.commonPipelineEnvironment, parameters.stageName ?: env.STAGE_NAME, STEP_CONFIG_KEYS)
.mixin(parameters, PARAMETER_KEYS)
.addIfEmpty('uniqueId', UUID.randomUUID().toString())
Map config = [:]
if (parameters.containerMap) {
config = configHelper.use()
executeOnPodWithCustomContainerList(config: config) { body() }
} else {
config = configHelper
.withMandatoryProperty('dockerImage')
.use()
executeOnPodWithSingleContainer(config: config) { body() }
}
}
}
void executeOnPodWithCustomContainerList(Map parameters, body) {
def config = parameters.config
podTemplate(getOptions(config)) {
node(config.uniqueId) {
//allow execution in dedicated container
if (config.containerName) {
container(name: config.containerName){
body()
}
} else {
body()
}
}
}
}
def getOptions(config) {
return [name : 'dynamic-agent-' + config.uniqueId,
label : config.uniqueId,
containers: getContainerList(config)]
}
void executeOnPodWithSingleContainer(Map parameters, body) {
Map containerMap = [:]
def config = parameters.config
containerMap[config.get('dockerImage').toString()] = 'container-exec'
config.containerMap = containerMap
/*
* There could be exceptions thrown by
- The podTemplate
- The container method
- The body
* We use nested exception handling in this case.
* In the first 2 cases, the workspace has not been modified. Hence, we can stash existing workspace as container and
* unstash in the finally block. In case of exception thrown by the body, we need to stash the workspace from the container
* in finally block
*/
try {
stashWorkspace(config, 'workspace')
podTemplate(getOptions(config)) {
node(config.uniqueId) {
container(name: 'container-exec') {
try {
unstashWorkspace(config, 'workspace')
body()
} finally {
stashWorkspace(config, 'container')
}
}
}
}
} catch (e) {
stashWorkspace(config, 'container')
throw e
} finally {
unstashWorkspace(config, 'container')
}
}
private void stashWorkspace(config, prefix) {
try {
// Every dockerImage used in the dockerExecuteOnKubernetes should have user id 1000
sh "chown -R 1000:1000 ."
stash(
name: "${prefix}-${config.uniqueId}",
include: config.stashIncludes.workspace,
exclude: config.stashExcludes.excludes
)
} catch (AbortException | IOException e) {
echo "${e.getMessage()}"
}
}
private void unstashWorkspace(config, prefix) {
try {
unstash "${prefix}-${config.uniqueId}"
} catch (AbortException | IOException e) {
echo "${e.getMessage()}"
}
}
private List getContainerList(config) {
result = []
result.push(containerTemplate(
name: 'jnlp',
image: config.jenkinsKubernetes.jnlpAgent
))
config.containerMap.each { imageName, containerName ->
def templateParameters = [
name: containerName.toLowerCase(),
image: imageName,
alwaysPullImage: true,
envVars: getContainerEnvs(config, imageName)
]
if (!config.containerCommands?.get(imageName)?.isEmpty()) {
templateParameters.command = config.containerCommands?.get(imageName)?: '/usr/bin/tail -f /dev/null'
}
if (config.containerPortMappings?.get(imageName)) {
def ports = []
def portCounter = 0
config.containerPortMappings.get(imageName).each {mapping ->
mapping.name = "${containerName}${portCounter}".toString()
ports.add(portMapping(mapping))
portCounter ++
}
templateParameters.ports = ports
}
result.push(containerTemplate(templateParameters))
}
return result
}
/**
* Returns a list of envVar object consisting of set
* environment variables, params (Parametrized Build) and working directory.
* (Kubernetes-Plugin only!)
* @param config Map with configurations
*/
private List getContainerEnvs(config, imageName) {
def containerEnv = []
def dockerEnvVars = config.containerEnvVars?.get(imageName) ?: config.dockerEnvVars ?: [:]
def dockerWorkspace = config.containerWorkspaces?.get(imageName) != null ? config.containerWorkspaces?.get(imageName) : config.dockerWorkspace ?: ''
if (dockerEnvVars) {
for (String k : dockerEnvVars.keySet()) {
containerEnv << envVar(key: k, value: dockerEnvVars[k].toString())
}
}
if (dockerWorkspace) {
containerEnv << envVar(key: "HOME", value: dockerWorkspace)
}
// Inherit the proxy information from the master to the container
SystemEnv systemEnv = new SystemEnv()
for (String env : systemEnv.getEnv().keySet()) {
containerEnv << envVar(key: env, value: systemEnv.get(env))
}
// ContainerEnv array can't be empty. Using a stub to avoid failure.
if (!containerEnv) {
containerEnv << envVar(key: "EMPTY_VAR", value: " ")
}
return containerEnv
}