9 Jenkins Hacks That Will Make Your Life Easier
Practical Jenkins tips from real infrastructure work. No fluff, just stuff that actually helps.
Hack #1: Use Docker Containers Instead of Plugins
Stop installing plugins for every tool. Jenkins plugins get outdated, have security issues, and create dependency hell. Instead, use Docker containers.
Bad approach - Installing AWS CLI plugin, Ansible plugin, Terraform plugin…
Good approach - Just use the official Docker images:
pipeline {
agent { label 'docker' }
stages {
stage('AWS Operations') {
agent {
docker {
image 'amazon/aws-cli:2.15.0'
args '-u root --entrypoint=""'
reuseNode true
}
}
steps {
sh 'aws s3 ls'
sh 'aws ec2 describe-instances --query "Reservations[].Instances[].InstanceId"'
}
}
stage('Run Ansible') {
agent {
docker {
image 'willhallonline/ansible:2.16-alpine-3.18'
reuseNode true
}
}
steps {
dir('ansible') {
sh 'ansible-playbook -i inventory site.yml'
}
}
}
stage('Packer Build') {
agent {
docker {
image 'hashicorp/packer:1.10'
args '--entrypoint=""'
reuseNode true
}
}
steps {
dir('packer') {
sh 'packer init .'
sh 'packer build template.pkr.hcl'
}
}
}
}
}
Benefits:
- Always use latest tool versions
- No plugin compatibility issues
- Same container works locally and in CI
- Easy to update - just change the image tag
Hack #2: IAM Role Sessions for Cross-Account Access
Never use static AWS credentials. Use IAM instance profiles on your Jenkins server, then assume roles for cross-account access.
@Field String AWS_ACCOUNT_PROD = '111111111111'
@Field String AWS_ACCOUNT_DEV = '222222222222'
pipeline {
agent { label 'aws-enabled' }
stages {
stage('Deploy to Dev') {
steps {
script {
withAWSRole(AWS_ACCOUNT_DEV, 'jenkins-deploy-role') {
sh 'aws s3 sync ./dist s3://dev-bucket/'
}
}
}
}
stage('Deploy to Prod') {
steps {
script {
withAWSRole(AWS_ACCOUNT_PROD, 'jenkins-deploy-role') {
sh 'aws s3 sync ./dist s3://prod-bucket/'
}
}
}
}
}
}
// Reusable role assumption - put this in shared library
def withAWSRole(String accountId, String roleName, Closure body) {
def roleArn = "arn:aws:iam::${accountId}:role/${roleName}"
def sessionName = "jenkins-${env.BUILD_NUMBER}"
// Get temporary credentials
def creds = sh(
script: """
aws sts assume-role \
--role-arn ${roleArn} \
--role-session-name ${sessionName} \
--duration-seconds 3600 \
--output json
""",
returnStdout: true
).trim()
def parsed = readJSON(text: creds)
withEnv([
"AWS_ACCESS_KEY_ID=${parsed.Credentials.AccessKeyId}",
"AWS_SECRET_ACCESS_KEY=${parsed.Credentials.SecretAccessKey}",
"AWS_SESSION_TOKEN=${parsed.Credentials.SessionToken}"
]) {
body()
}
}
Your Jenkins EC2 instance needs an instance profile with sts:AssumeRole permission. The target accounts need a role that trusts your Jenkins account.
Hack #3: Secrets Manager Instead of Jenkins Credentials
Jenkins credentials store is fine for small setups, but Secrets Manager is better for teams:
pipeline {
agent { label 'linux' }
stages {
stage('Get Secrets') {
steps {
script {
// Fetch secrets at runtime
def dbCreds = getSecret('prod/database/credentials')
def apiKey = getSecret('prod/api/key')
// Use them (they're masked in logs)
withEnv([
"DB_USER=${dbCreds.username}",
"DB_PASS=${dbCreds.password}",
"API_KEY=${apiKey.key}"
]) {
sh './deploy.sh'
}
}
}
}
}
}
@NonCPS
def getSecret(String secretName) {
def cmd = "aws secretsmanager get-secret-value --secret-id ${secretName} --query SecretString --output text"
def result = sh(script: cmd, returnStdout: true).trim()
return readJSON(text: result)
}
Why @NonCPS? The JSON parsing doesn’t need to survive Jenkins restarts, so we skip serialization overhead.
Hack #4: Packer + Ansible AMI Pipeline
Real-world AMI build pipeline with proper error handling:
@Field String AMI_NAME_PREFIX = 'app-server'
@Field String AWS_REGION = 'us-west-2'
pipeline {
agent { label 'packer-builder' }
parameters {
choice(name: 'ENVIRONMENT', choices: ['dev', 'staging', 'prod'], description: 'Target environment')
string(name: 'APP_VERSION', defaultValue: 'latest', description: 'Application version to bake')
}
environment {
PACKER_LOG = '1'
ANSIBLE_FORCE_COLOR = 'true'
}
stages {
stage('Validate') {
steps {
dir('packer') {
sh 'packer init .'
sh 'packer validate -var-file=vars/${ENVIRONMENT}.pkrvars.hcl template.pkr.hcl'
}
dir('ansible') {
sh 'ansible-lint site.yml'
}
}
}
stage('Build AMI') {
steps {
dir('packer') {
script {
def output = sh(
script: """
packer build \
-var-file=vars/${ENVIRONMENT}.pkrvars.hcl \
-var 'app_version=${APP_VERSION}' \
-machine-readable \
template.pkr.hcl | tee packer-output.txt
""",
returnStdout: true
)
// Extract AMI ID from output
def amiLine = sh(
script: "grep 'artifact,0,id' packer-output.txt | tail -1 | cut -d: -f2",
returnStdout: true
).trim()
env.NEW_AMI_ID = amiLine
echo "Built AMI: ${env.NEW_AMI_ID}"
}
}
}
}
stage('Test AMI') {
steps {
script {
// Launch test instance
def instanceId = sh(
script: """
aws ec2 run-instances \
--image-id ${NEW_AMI_ID} \
--instance-type t3.micro \
--subnet-id subnet-xxxxx \
--security-group-ids sg-xxxxx \
--tag-specifications 'ResourceType=instance,Tags=[{Key=Name,Value=ami-test-${BUILD_NUMBER}}]' \
--query 'Instances[0].InstanceId' \
--output text
""",
returnStdout: true
).trim()
env.TEST_INSTANCE_ID = instanceId
// Wait for instance and run tests
sh "aws ec2 wait instance-running --instance-ids ${instanceId}"
sleep(60) // Wait for services to start
// Your smoke tests here
sh "./scripts/smoke-test.sh ${instanceId}"
}
}
post {
always {
// Cleanup test instance
sh "aws ec2 terminate-instances --instance-ids ${TEST_INSTANCE_ID} || true"
}
}
}
stage('Update ASG') {
when {
expression { params.ENVIRONMENT == 'prod' }
}
steps {
script {
// Create new launch template version
sh """
aws ec2 create-launch-template-version \
--launch-template-name ${AMI_NAME_PREFIX}-lt \
--source-version '\$Latest' \
--launch-template-data '{"ImageId":"${NEW_AMI_ID}"}'
"""
// Trigger ASG refresh
sh """
aws autoscaling start-instance-refresh \
--auto-scaling-group-name ${AMI_NAME_PREFIX}-asg \
--preferences '{"MinHealthyPercentage": 75}'
"""
}
}
}
}
post {
success {
script {
sendSESNotification('SUCCESS', "AMI ${NEW_AMI_ID} built and deployed to ${ENVIRONMENT}")
}
}
failure {
script {
sendSESNotification('FAILED', "AMI build failed for ${ENVIRONMENT}")
}
}
}
}
Packer template (template.pkr.hcl):
packer {
required_plugins {
amazon = {
version = ">= 1.3.0"
source = "github.com/hashicorp/amazon"
}
ansible = {
version = ">= 1.1.0"
source = "github.com/hashicorp/ansible"
}
}
}
variable "environment" {
type = string
}
variable "app_version" {
type = string
default = "latest"
}
source "amazon-ebs" "main" {
ami_name = "app-server-${var.environment}-{{timestamp}}"
instance_type = "t3.medium"
region = "us-west-2"
source_ami_filter {
filters = {
name = "ubuntu/images/hvm-ssd/ubuntu-jammy-22.04-amd64-server-*"
root-device-type = "ebs"
virtualization-type = "hvm"
}
owners = ["099720109477"] # Canonical
most_recent = true
}
ssh_username = "ubuntu"
tags = {
Name = "app-server-${var.environment}"
Environment = var.environment
AppVersion = var.app_version
BuildTime = timestamp()
}
}
build {
sources = ["source.amazon-ebs.main"]
provisioner "ansible" {
playbook_file = "../ansible/site.yml"
extra_arguments = [
"-e", "app_version=${var.app_version}",
"-e", "environment=${var.environment}"
]
}
}
Hack #5: EC2 Fleet Plugin for Dynamic Agents
Don’t run Jenkins agents 24/7. Use EC2 Fleet plugin to spin them up on demand:
Jenkins Configuration as Code (jenkins.yaml):
jenkins:
clouds:
- amazonEC2:
name: "ec2-fleet"
region: "us-west-2"
useInstanceProfileForCredentials: true
templates:
- ami: "ami-xxxxxxxxx"
amiType:
unixData:
sshPort: "22"
associatePublicIp: false
connectBySSHProcess: false
connectionStrategy: PRIVATE_IP
description: "Linux Docker Agent"
ebsEncryptRootVolume: DEFAULT
ebsOptimized: true
hostKeyVerificationStrategy: OFF
iamInstanceProfile: "jenkins-agent-role"
idleTerminationMinutes: "30"
instanceCapStr: "10"
labelString: "linux docker packer-builder"
launchTimeoutStr: "300"
maxTotalUses: -1
minimumNumberOfInstances: 0
minimumNumberOfSpareInstances: 0
mode: EXCLUSIVE
monitoring: true
numExecutors: 2
remoteAdmin: "ubuntu"
securityGroups: "sg-xxxxxxxxx"
stopOnTerminate: false
subnetId: "subnet-xxxxxxxxx"
t2Unlimited: false
tags:
- name: "Name"
value: "jenkins-agent"
type: T3Medium
useEphemeralDevices: false
Key settings:
useInstanceProfileForCredentials: true- No static AWS credsidleTerminationMinutes: "30"- Kill idle agents after 30 minsconnectionStrategy: PRIVATE_IP- Agents in private subnetmode: EXCLUSIVE- Only run jobs with matching labels
Hack #6: Kubernetes Pod Templates
For teams running Kubernetes, define pod templates in your Jenkinsfile:
pipeline {
agent {
kubernetes {
yaml '''
apiVersion: v1
kind: Pod
metadata:
labels:
jenkins: agent
spec:
serviceAccountName: jenkins-agent
containers:
- name: golang
image: golang:1.22-alpine
command: ['sleep', 'infinity']
resources:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "1Gi"
cpu: "1"
- name: docker
image: docker:24-dind
securityContext:
privileged: true
volumeMounts:
- name: docker-socket
mountPath: /var/run/docker.sock
- name: aws
image: amazon/aws-cli:2.15.0
command: ['sleep', 'infinity']
volumes:
- name: docker-socket
emptyDir: {}
'''
}
}
stages {
stage('Build Go App') {
steps {
container('golang') {
sh '''
go mod download
go build -o app ./cmd/server
go test ./...
'''
}
}
}
stage('Build Docker Image') {
steps {
container('docker') {
sh '''
docker build -t myapp:${BUILD_NUMBER} .
docker push myregistry/myapp:${BUILD_NUMBER}
'''
}
}
}
stage('Deploy') {
steps {
container('aws') {
sh 'aws eks update-kubeconfig --name my-cluster'
sh 'kubectl set image deployment/myapp myapp=myregistry/myapp:${BUILD_NUMBER}'
}
}
}
}
}
Hack #7: @Field and @NonCPS Annotations
These two annotations solve real problems:
@Field - Share Variables Across Stages
Without @Field, variables defined outside pipeline block aren’t accessible in all contexts:
import groovy.transform.Field
@Field String APP_NAME = 'my-service'
@Field Map ENVIRONMENTS = [
dev: [account: '111111111111', region: 'us-west-2'],
staging: [account: '222222222222', region: 'us-west-2'],
prod: [account: '333333333333', region: 'us-east-1']
]
pipeline {
agent { label 'linux' }
stages {
stage('Deploy') {
steps {
script {
def env = ENVIRONMENTS[params.ENVIRONMENT]
echo "Deploying ${APP_NAME} to account ${env.account}"
}
}
}
}
}
@NonCPS - For Non-Serializable Operations
Jenkins saves pipeline state for resume-after-restart. Some operations can’t be serialized. Use @NonCPS:
import groovy.transform.Field
import groovy.json.JsonSlurper
@NonCPS
def parseJson(String json) {
return new JsonSlurper().parseText(json)
}
@NonCPS
def sortByDate(List items) {
return items.sort { a, b ->
Date.parse('yyyy-MM-dd', a.date) <=> Date.parse('yyyy-MM-dd', b.date)
}
}
@NonCPS
def getMatchingTags(String pattern) {
def tags = sh(script: "git tag -l '${pattern}'", returnStdout: true).trim()
return tags.split('\n').findAll { it }
}
pipeline {
agent { label 'linux' }
stages {
stage('Process') {
steps {
script {
def response = sh(script: 'curl -s https://api.example.com/data', returnStdout: true)
def data = parseJson(response)
def sorted = sortByDate(data.items)
sorted.each { item ->
echo "Processing: ${item.name}"
}
}
}
}
}
}
Common @NonCPS use cases:
- JSON parsing with JsonSlurper
- Sorting collections
- Regex operations
- Date parsing
- Any Groovy closure that doesn’t serialize
Hack #8: SES Notifications (Replace Email Plugin)
The email-ext plugin is clunky. Use SES directly:
def sendSESNotification(String status, String message) {
def subject = "[Jenkins] ${env.JOB_NAME} #${env.BUILD_NUMBER} - ${status}"
def body = """
Build: ${env.JOB_NAME} #${env.BUILD_NUMBER}
Status: ${status}
Message: ${message}
Console: ${env.BUILD_URL}console
""".stripIndent()
// Using AWS CLI - no plugin needed
sh """
aws ses send-email \
--from 'jenkins@yourdomain.com' \
--to 'team@yourdomain.com' \
--subject '${subject}' \
--text '${body}' \
--region us-west-2
"""
}
// For HTML emails with more detail
def sendDetailedSESNotification(String status, Map details) {
def color = status == 'SUCCESS' ? '#36a64f' : '#dc3545'
def html = """
<!DOCTYPE html>
<html>
<body style="font-family: Arial, sans-serif;">
<div style="border-left: 4px solid ${color}; padding-left: 12px;">
<h2>${env.JOB_NAME} #${env.BUILD_NUMBER}</h2>
<p><strong>Status:</strong> ${status}</p>
<p><strong>Duration:</strong> ${currentBuild.durationString}</p>
<p><strong>Triggered by:</strong> ${details.triggeredBy ?: 'Unknown'}</p>
${details.amiId ? "<p><strong>AMI ID:</strong> ${details.amiId}</p>" : ''}
<p><a href="${env.BUILD_URL}">View Build</a></p>
</div>
</body>
</html>
"""
writeFile file: 'email.html', text: html
sh """
aws ses send-email \
--from 'jenkins@yourdomain.com' \
--to 'team@yourdomain.com' \
--subject '[Jenkins] ${env.JOB_NAME} - ${status}' \
--html file://email.html \
--region us-west-2
"""
}
Hack #9: Practical Pipeline Patterns
Pattern 1: dir() for Clean Directory Management
stage('Multi-Component Build') {
steps {
// Each dir() creates the directory if it doesn't exist
dir('backend') {
git branch: 'main', url: 'https://github.com/org/backend.git'
sh 'go build -o ../dist/backend ./cmd/server'
}
dir('frontend') {
git branch: 'main', url: 'https://github.com/org/frontend.git'
sh 'npm ci && npm run build'
sh 'cp -r dist/* ../dist/static/'
}
dir('dist') {
sh 'tar -czf app-${BUILD_NUMBER}.tar.gz *'
archiveArtifacts artifacts: '*.tar.gz'
}
}
}
Pattern 2: Retry with Exponential Backoff
def retryWithBackoff(int maxAttempts, Closure body) {
def attempt = 0
def delay = 10
while (attempt < maxAttempts) {
attempt++
try {
return body()
} catch (Exception e) {
if (attempt >= maxAttempts) {
throw e
}
echo "Attempt ${attempt} failed: ${e.message}. Retrying in ${delay}s..."
sleep(delay)
delay = delay * 2 // Exponential backoff
}
}
}
// Usage
stage('Deploy') {
steps {
script {
retryWithBackoff(3) {
sh './deploy.sh'
}
}
}
}
Pattern 3: Parallel with Controlled Concurrency
stage('Deploy All Environments') {
steps {
script {
def environments = ['dev', 'staging', 'qa']
def parallelStages = [:]
environments.each { env ->
parallelStages["Deploy ${env}"] = {
stage("Deploy ${env}") {
lock("deploy-${env}") { // Prevent concurrent deploys to same env
sh "./deploy.sh ${env}"
}
}
}
}
parallel parallelStages
}
}
}
Pattern 4: Stash/Unstash Across Nodes
stage('Build') {
agent { label 'build-node' }
steps {
sh 'go build -o app .'
stash includes: 'app', name: 'binary'
}
}
stage('Test on Multiple OS') {
parallel {
stage('Test Ubuntu') {
agent { label 'ubuntu' }
steps {
unstash 'binary'
sh './app --self-test'
}
}
stage('Test Amazon Linux') {
agent { label 'amazon-linux' }
steps {
unstash 'binary'
sh './app --self-test'
}
}
}
}
Pattern 5: Matrix Builds
stage('Test Matrix') {
matrix {
axes {
axis {
name 'GO_VERSION'
values '1.21', '1.22'
}
axis {
name 'OS'
values 'linux', 'darwin'
}
}
stages {
stage('Test') {
agent {
docker {
image "golang:${GO_VERSION}"
}
}
steps {
sh "GOOS=${OS} go test ./..."
}
}
}
}
}
Bonus: JNLP4 Security
Only enable JNLP4-connect protocol. JNLP, JNLP2, JNLP3 have known vulnerabilities.
In jenkins.yaml (Configuration as Code):
jenkins:
slaveAgentPort: 50000
agentProtocols:
- "JNLP4-connect" # Only this one - encrypted and authenticated
security:
masterKillSwitch: false # Enable agent-to-controller security
Or via Groovy init script:
import jenkins.model.Jenkins
def jenkins = Jenkins.getInstance()
Set<String> protocols = new HashSet<>()
protocols.add('JNLP4-connect')
jenkins.setAgentProtocols(protocols)
jenkins.save()
Quick Reference
| What | Old Way | Better Way |
|---|---|---|
| AWS CLI | Install plugin | docker { image 'amazon/aws-cli:2.15.0' } |
| Ansible | Install plugin | docker { image 'willhallonline/ansible:2.16' } |
| Terraform | Install plugin | docker { image 'hashicorp/terraform:1.7' } |
| AWS Creds | Jenkins credentials | IAM Instance Profile + Assume Role |
| email-ext plugin | AWS SES | |
| Secrets | Jenkins credentials | AWS Secrets Manager |
| JSON parsing | Pipeline step | @NonCPS with JsonSlurper |
| Global vars | Environment block | @Field annotation |
| Agents | Always-on EC2 | EC2 Fleet (auto-scale) |