Code Display Demo
This page demonstrates the enhanced code display capabilities in our documentation.
GitHub Actions Integration Examples
Below is an example of a GitHub Actions workflow for setting up and scanning containers:
name : Setup Minikube and Run CINC Auditor Scan
on :
workflow_dispatch :
inputs :
minikube_version :
description : 'Minikube version to use'
required : true
default : 'v1.32.0'
kubernetes_version :
description : 'Kubernetes version to use'
required : true
default : 'v1.28.3'
cinc_profile :
description : 'CINC Auditor profile to run'
required : true
default : 'dev-sec/linux-baseline'
threshold :
description : 'Minimum passing score (0-100)'
required : true
default : '70'
jobs :
setup-and-scan :
name : Setup minikube and run CINC Auditor scan
runs-on : ubuntu-latest
steps :
- name : Checkout code
uses : actions/checkout@v4
- name : Setup minikube
id : minikube
uses : medyagh/setup-minikube@master
with :
minikube-version : ${{ github.event.inputs.minikube_version }}
kubernetes-version : ${{ github.event.inputs.kubernetes_version }}
github-token : ${{ secrets.GITHUB_TOKEN }}
driver : docker
start-args : --nodes=2
- name : Get cluster status
run : |
kubectl get nodes
minikube status
- name : Set up CINC Auditor environment
run : |
# Install CINC Auditor
curl -L https://omnitruck.cinc.sh/install.sh | sudo bash -s -- -P cinc-auditor
# Install train-k8s-container plugin
cinc-auditor plugin install train-k8s-container
# Install SAF-CLI for result processing
npm install -g @mitre/saf
# Verify installation
cinc-auditor --version
cinc-auditor plugin list
saf --version
- name : Create namespace and test pod
run : |
# Create namespace
kubectl create namespace inspec-test
# Create test pod
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: inspec-target
namespace: inspec-test
labels:
app: inspec-target
scan-target: "true"
spec:
containers:
- name: busybox
image: busybox:latest
command: ["sleep", "infinity"]
EOF
# Wait for pod to be running
kubectl wait --for=condition=ready pod/inspec-target -n inspec-test --timeout=120s
# Verify pod is running
kubectl get pods -n inspec-test
- name : Set up RBAC configuration
run : |
# Create service account
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: ServiceAccount
metadata:
name: inspec-scanner
namespace: inspec-test
EOF
# Create role
cat <<EOF | kubectl apply -f -
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: inspec-container-role
namespace: inspec-test
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["create"]
resourceNames: ["inspec-target"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get"]
resourceNames: ["inspec-target"]
EOF
# Create role binding
cat <<EOF | kubectl apply -f -
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: inspec-container-rolebinding
namespace: inspec-test
subjects:
- kind: ServiceAccount
name: inspec-scanner
namespace: inspec-test
roleRef:
kind: Role
name: inspec-container-role
apiGroup: rbac.authorization.k8s.io
EOF
# Verify RBAC setup
kubectl get serviceaccount,role,rolebinding -n inspec-test
- name : Generate restricted kubeconfig
run : |
# Get token
TOKEN=$(kubectl create token inspec-scanner -n inspec-test --duration=15m)
# Get cluster information
SERVER=$(kubectl config view --minify --output=jsonpath='{.clusters[0].cluster.server}')
CA_DATA=$(kubectl config view --raw --minify --flatten -o jsonpath='{.clusters[].cluster.certificate-authority-data}')
# Create kubeconfig
cat > restricted-kubeconfig.yaml << EOF
apiVersion: v1
kind: Config
preferences: {}
clusters:
- cluster:
server: ${SERVER}
certificate-authority-data: ${CA_DATA}
name: scanner-cluster
contexts:
- context:
cluster: scanner-cluster
namespace: inspec-test
user: scanner-user
name: scanner-context
current-context: scanner-context
users:
- name: scanner-user
user:
token: ${TOKEN}
EOF
# Set proper permissions
chmod 600 restricted-kubeconfig.yaml
# Test the kubeconfig
KUBECONFIG=restricted-kubeconfig.yaml kubectl get pods -n inspec-test
- name : Run CINC Auditor scan with restricted access
run : |
# Download CINC profile
if [[ "${{ github.event.inputs.cinc_profile }}" == http* ]]; then
# If it's a URL, use it directly
PROFILE="${{ github.event.inputs.cinc_profile }}"
elif [[ "${{ github.event.inputs.cinc_profile }}" == */* ]]; then
# If it's a profile from Chef Supermarket (e.g., dev-sec/linux-baseline)
PROFILE="${{ github.event.inputs.cinc_profile }}"
else
# If it's a local path
PROFILE="./${{ github.event.inputs.cinc_profile }}"
fi
# Run CINC Auditor with the train-k8s-container transport
KUBECONFIG=restricted-kubeconfig.yaml cinc-auditor exec ${PROFILE} \
-t k8s-container://inspec-test/inspec-target/busybox \
--reporter cli json:cinc-results.json
# Store the exit code
CINC_EXIT_CODE=$?
echo "CINC Auditor scan completed with exit code: ${CINC_EXIT_CODE}"
- name : Process results with SAF-CLI
run : |
# Generate summary report with SAF-CLI
echo "Generating scan summary with SAF-CLI:"
saf summary --input cinc-results.json --output-md scan-summary.md
# Display the summary in the logs
cat scan-summary.md
# Add to GitHub step summary
echo "## CINC Auditor Scan Results" > $GITHUB_STEP_SUMMARY
cat scan-summary.md >> $GITHUB_STEP_SUMMARY
# Create a proper threshold file
cat > threshold.yml << EOF
compliance :
min : ${{ github.event.inputs.threshold }}
failed :
critical :
max : 0 # No critical failures allowed
EOF
# Apply threshold check
echo "Checking against threshold with min compliance of ${{ github.event.inputs.threshold }}%:"
saf threshold -i cinc-results.json -t threshold.yml
THRESHOLD_EXIT_CODE=$?
if [ $THRESHOLD_EXIT_CODE -eq 0 ]; then
echo "✅ Security scan passed threshold requirements" | tee -a $GITHUB_STEP_SUMMARY
else
echo "❌ Security scan failed to meet threshold requirements" | tee -a $GITHUB_STEP_SUMMARY
# Uncomment to enforce the threshold as a quality gate
# exit $THRESHOLD_EXIT_CODE
fi
- name : Upload CINC Auditor results
if : always()
uses : actions/upload-artifact@v4
with :
name : cinc-results
path : |
cinc-results.json
scan-summary.md
- name : Cleanup resources
if : always()
run : |
kubectl delete namespace inspec-test
Highlighting Important Sections
Let's highlight the key parts of the configuration:
name : Setup Minikube and Run CINC Auditor Scan
on :
workflow_dispatch :
inputs :
minikube_version :
description : 'Minikube version to use'
required : true
default : 'v1.32.0'
kubernetes_version :
description : 'Kubernetes version to use'
required : true
default : 'v1.28.3'
cinc_profile :
description : 'CINC Auditor profile to run'
required : true
default : 'dev-sec/linux-baseline'
threshold :
description : 'Minimum passing score (0-100)'
required : true
default : '70'
jobs :
setup-and-scan :
name : Setup minikube and run CINC Auditor scan
runs-on : ubuntu-latest
steps :
- name : Checkout code
uses : actions/checkout@v4
- name : Setup minikube
id : minikube
uses : medyagh/setup-minikube@master
with :
minikube-version : ${{ github.event.inputs.minikube_version }}
kubernetes-version : ${{ github.event.inputs.kubernetes_version }}
github-token : ${{ secrets.GITHUB_TOKEN }}
driver : docker
start-args : --nodes=2
- name : Get cluster status
run : |
kubectl get nodes
minikube status
- name : Set up CINC Auditor environment
run : |
# Install CINC Auditor
curl -L https://omnitruck.cinc.sh/install.sh | sudo bash -s -- -P cinc-auditor
# Install train-k8s-container plugin
cinc-auditor plugin install train-k8s-container
# Install SAF-CLI for result processing
npm install -g @mitre/saf
# Verify installation
cinc-auditor --version
cinc-auditor plugin list
saf --version
- name : Create namespace and test pod
run : |
# Create namespace
kubectl create namespace inspec-test
# Create test pod
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: inspec-target
namespace: inspec-test
labels:
app: inspec-target
scan-target: "true"
spec:
containers:
- name: busybox
image: busybox:latest
command: ["sleep", "infinity"]
EOF
# Wait for pod to be running
kubectl wait --for=condition=ready pod/inspec-target -n inspec-test --timeout=120s
# Verify pod is running
kubectl get pods -n inspec-test
- name : Set up RBAC configuration
run : |
# Create service account
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: ServiceAccount
metadata:
name: inspec-scanner
namespace: inspec-test
EOF
# Create role
cat <<EOF | kubectl apply -f -
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: inspec-container-role
namespace: inspec-test
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["create"]
resourceNames: ["inspec-target"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get"]
resourceNames: ["inspec-target"]
EOF
# Create role binding
cat <<EOF | kubectl apply -f -
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: inspec-container-rolebinding
namespace: inspec-test
subjects:
- kind: ServiceAccount
name: inspec-scanner
namespace: inspec-test
roleRef:
kind: Role
name: inspec-container-role
apiGroup: rbac.authorization.k8s.io
EOF
# Verify RBAC setup
kubectl get serviceaccount,role,rolebinding -n inspec-test
- name : Generate restricted kubeconfig
run : |
# Get token
TOKEN=$(kubectl create token inspec-scanner -n inspec-test --duration=15m)
# Get cluster information
SERVER=$(kubectl config view --minify --output=jsonpath='{.clusters[0].cluster.server}')
CA_DATA=$(kubectl config view --raw --minify --flatten -o jsonpath='{.clusters[].cluster.certificate-authority-data}')
# Create kubeconfig
cat > restricted-kubeconfig.yaml << EOF
apiVersion: v1
kind: Config
preferences: {}
clusters:
- cluster:
server: ${SERVER}
certificate-authority-data: ${CA_DATA}
name: scanner-cluster
contexts:
- context:
cluster: scanner-cluster
namespace: inspec-test
user: scanner-user
name: scanner-context
current-context: scanner-context
users:
- name: scanner-user
user:
token: ${TOKEN}
EOF
# Set proper permissions
chmod 600 restricted-kubeconfig.yaml
# Test the kubeconfig
KUBECONFIG=restricted-kubeconfig.yaml kubectl get pods -n inspec-test
- name : Run CINC Auditor scan with restricted access
run : |
# Download CINC profile
if [[ "${{ github.event.inputs.cinc_profile }}" == http* ]]; then
# If it's a URL, use it directly
PROFILE="${{ github.event.inputs.cinc_profile }}"
elif [[ "${{ github.event.inputs.cinc_profile }}" == */* ]]; then
# If it's a profile from Chef Supermarket (e.g., dev-sec/linux-baseline)
PROFILE="${{ github.event.inputs.cinc_profile }}"
else
# If it's a local path
PROFILE="./${{ github.event.inputs.cinc_profile }}"
fi
# Run CINC Auditor with the train-k8s-container transport
KUBECONFIG=restricted-kubeconfig.yaml cinc-auditor exec ${PROFILE} \
-t k8s-container://inspec-test/inspec-target/busybox \
--reporter cli json:cinc-results.json
# Store the exit code
CINC_EXIT_CODE=$?
echo "CINC Auditor scan completed with exit code: ${CINC_EXIT_CODE}"
- name : Process results with SAF-CLI
run : |
# Generate summary report with SAF-CLI
echo "Generating scan summary with SAF-CLI:"
saf summary --input cinc-results.json --output-md scan-summary.md
# Display the summary in the logs
cat scan-summary.md
# Add to GitHub step summary
echo "## CINC Auditor Scan Results" > $GITHUB_STEP_SUMMARY
cat scan-summary.md >> $GITHUB_STEP_SUMMARY
# Create a proper threshold file
cat > threshold.yml << EOF
compliance :
min : ${{ github.event.inputs.threshold }}
failed :
critical :
max : 0 # No critical failures allowed
EOF
# Apply threshold check
echo "Checking against threshold with min compliance of ${{ github.event.inputs.threshold }}%:"
saf threshold -i cinc-results.json -t threshold.yml
THRESHOLD_EXIT_CODE=$?
if [ $THRESHOLD_EXIT_CODE -eq 0 ]; then
echo "✅ Security scan passed threshold requirements" | tee -a $GITHUB_STEP_SUMMARY
else
echo "❌ Security scan failed to meet threshold requirements" | tee -a $GITHUB_STEP_SUMMARY
# Uncomment to enforce the threshold as a quality gate
# exit $THRESHOLD_EXIT_CODE
fi
- name : Upload CINC Auditor results
if : always()
uses : actions/upload-artifact@v4
with :
name : cinc-results
path : |
cinc-results.json
scan-summary.md
- name : Cleanup resources
if : always()
run : |
kubectl delete namespace inspec-test
Adding Annotations
Here's the same code with annotations explaining key components:
name : Setup Minikube and Run CINC Auditor Scan
on :
workflow_dispatch :
inputs :
minikube_version :
description : 'Minikube version to use'
required : true
default : 'v1.32.0'
kubernetes_version :
description : 'Kubernetes version to use'
required : true
default : 'v1.28.3'
cinc_profile :
description : 'CINC Auditor profile to run'
required : true
default : 'dev-sec/linux-baseline'
threshold :
description : 'Minimum passing score (0-100)'
required : true
default : '70'
jobs :
setup-and-scan :
name : Setup minikube and run CINC Auditor scan
runs-on : ubuntu-latest
steps :
- name : Checkout code
uses : actions/checkout@v4
- name : Setup minikube
id : minikube
uses : medyagh/setup-minikube@master
with :
minikube-version : ${{ github.event.inputs.minikube_version }}
kubernetes-version : ${{ github.event.inputs.kubernetes_version }}
github-token : ${{ secrets.GITHUB_TOKEN }}
driver : docker
start-args : --nodes=2
- name : Get cluster status
run : |
kubectl get nodes
minikube status
- name : Set up CINC Auditor environment
run : |
# Install CINC Auditor
curl -L https://omnitruck.cinc.sh/install.sh | sudo bash -s -- -P cinc-auditor
# Install train-k8s-container plugin
cinc-auditor plugin install train-k8s-container
# Install SAF-CLI for result processing
npm install -g @mitre/saf
# Verify installation
cinc-auditor --version
cinc-auditor plugin list
saf --version
- name : Create namespace and test pod
run : |
# Create namespace
kubectl create namespace inspec-test
# Create test pod
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: inspec-target
namespace: inspec-test
labels:
app: inspec-target
scan-target: "true"
spec:
containers:
- name: busybox
image: busybox:latest
command: ["sleep", "infinity"]
EOF
# Wait for pod to be running
kubectl wait --for=condition=ready pod/inspec-target -n inspec-test --timeout=120s
# Verify pod is running
kubectl get pods -n inspec-test
- name : Set up RBAC configuration
run : |
# Create service account
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: ServiceAccount
metadata:
name: inspec-scanner
namespace: inspec-test
EOF
# Create role
cat <<EOF | kubectl apply -f -
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: inspec-container-role
namespace: inspec-test
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["create"]
resourceNames: ["inspec-target"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get"]
resourceNames: ["inspec-target"]
EOF
# Create role binding
cat <<EOF | kubectl apply -f -
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: inspec-container-rolebinding
namespace: inspec-test
subjects:
- kind: ServiceAccount
name: inspec-scanner
namespace: inspec-test
roleRef:
kind: Role
name: inspec-container-role
apiGroup: rbac.authorization.k8s.io
EOF
# Verify RBAC setup
kubectl get serviceaccount,role,rolebinding -n inspec-test
- name : Generate restricted kubeconfig
run : |
# Get token
TOKEN=$(kubectl create token inspec-scanner -n inspec-test --duration=15m)
# Get cluster information
SERVER=$(kubectl config view --minify --output=jsonpath='{.clusters[0].cluster.server}')
CA_DATA=$(kubectl config view --raw --minify --flatten -o jsonpath='{.clusters[].cluster.certificate-authority-data}')
# Create kubeconfig
cat > restricted-kubeconfig.yaml << EOF
apiVersion: v1
kind: Config
preferences: {}
clusters:
- cluster:
server: ${SERVER}
certificate-authority-data: ${CA_DATA}
name: scanner-cluster
contexts:
- context:
cluster: scanner-cluster
namespace: inspec-test
user: scanner-user
name: scanner-context
current-context: scanner-context
users:
- name: scanner-user
user:
token: ${TOKEN}
EOF
# Set proper permissions
chmod 600 restricted-kubeconfig.yaml
# Test the kubeconfig
KUBECONFIG=restricted-kubeconfig.yaml kubectl get pods -n inspec-test
- name : Run CINC Auditor scan with restricted access
run : |
# Download CINC profile
if [[ "${{ github.event.inputs.cinc_profile }}" == http* ]]; then
# If it's a URL, use it directly
PROFILE="${{ github.event.inputs.cinc_profile }}"
elif [[ "${{ github.event.inputs.cinc_profile }}" == */* ]]; then
# If it's a profile from Chef Supermarket (e.g., dev-sec/linux-baseline)
PROFILE="${{ github.event.inputs.cinc_profile }}"
else
# If it's a local path
PROFILE="./${{ github.event.inputs.cinc_profile }}"
fi
# Run CINC Auditor with the train-k8s-container transport
KUBECONFIG=restricted-kubeconfig.yaml cinc-auditor exec ${PROFILE} \
-t k8s-container://inspec-test/inspec-target/busybox \
--reporter cli json:cinc-results.json
# Store the exit code
CINC_EXIT_CODE=$?
echo "CINC Auditor scan completed with exit code: ${CINC_EXIT_CODE}"
- name : Process results with SAF-CLI
run : |
# Generate summary report with SAF-CLI
echo "Generating scan summary with SAF-CLI:"
saf summary --input cinc-results.json --output-md scan-summary.md
# Display the summary in the logs
cat scan-summary.md
# Add to GitHub step summary
echo "## CINC Auditor Scan Results" > $GITHUB_STEP_SUMMARY
cat scan-summary.md >> $GITHUB_STEP_SUMMARY
# Create a proper threshold file
cat > threshold.yml << EOF
compliance :
min : ${{ github.event.inputs.threshold }}
failed :
critical :
max : 0 # No critical failures allowed
EOF
# Apply threshold check
echo "Checking against threshold with min compliance of ${{ github.event.inputs.threshold }}%:"
saf threshold -i cinc-results.json -t threshold.yml
THRESHOLD_EXIT_CODE=$?
if [ $THRESHOLD_EXIT_CODE -eq 0 ]; then
echo "✅ Security scan passed threshold requirements" | tee -a $GITHUB_STEP_SUMMARY
else
echo "❌ Security scan failed to meet threshold requirements" | tee -a $GITHUB_STEP_SUMMARY
# Uncomment to enforce the threshold as a quality gate
# exit $THRESHOLD_EXIT_CODE
fi
- name : Upload CINC Auditor results
if : always()
uses : actions/upload-artifact@v4
with :
name : cinc-results
path : |
cinc-results.json
scan-summary.md
- name : Cleanup resources
if : always()
run : |
kubectl delete namespace inspec-test
This is the workflow name that appears in the GitHub Actions tab
This workflow runs when code is pushed to the main branch
This section defines the environment variables used throughout the workflow
This job sets up the Kubernetes environment for scanning
This step uses the official GitHub Action for Kubernetes
The scanning job runs after the setup job completes successfully
GitLab CI Integration
Let's compare with a GitLab CI configuration:
GitLab CI Basic GitLab CI with Services
stages :
- deploy
- scan
- report
- cleanup
variables :
SCANNER_NAMESPACE : "inspec-test"
TARGET_LABEL : "app=target-app"
THRESHOLD_VALUE : "70" # Minimum passing score (0-100)
deploy_container :
stage : deploy
script :
- echo "$KUBE_CONFIG" | base64 -d > kubeconfig.yaml
- export KUBECONFIG=kubeconfig.yaml
- |
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: scan-target-${CI_PIPELINE_ID}
namespace: ${SCANNER_NAMESPACE}
labels:
app: target-app
pipeline: "${CI_PIPELINE_ID}"
spec:
containers:
- name: target
image: registry.example.com/my-image:latest
command: ["sleep", "1h"]
EOF
- |
# Wait for pod to be ready
kubectl wait --for=condition=ready pod/scan-target-${CI_PIPELINE_ID} \
-n ${SCANNER_NAMESPACE} --timeout=120s
- |
# Save target info for later stages
echo "TARGET_POD=scan-target-${CI_PIPELINE_ID}" >> deploy.env
echo "TARGET_CONTAINER=target" >> deploy.env
artifacts :
reports :
dotenv : deploy.env
create_access :
stage : scan
needs : [ deploy_container ]
script :
- echo "$KUBE_CONFIG" | base64 -d > kubeconfig.yaml
- export KUBECONFIG=kubeconfig.yaml
- |
# Create the role for this specific pod
cat <<EOF | kubectl apply -f -
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: scanner-role-${CI_PIPELINE_ID}
namespace: ${SCANNER_NAMESPACE}
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["create"]
resourceNames: ["${TARGET_POD}"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get"]
resourceNames: ["${TARGET_POD}"]
EOF
- |
# Create service account
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: ServiceAccount
metadata:
name: scanner-sa-${CI_PIPELINE_ID}
namespace: ${SCANNER_NAMESPACE}
EOF
- |
# Create role binding
cat <<EOF | kubectl apply -f -
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: scanner-binding-${CI_PIPELINE_ID}
namespace: ${SCANNER_NAMESPACE}
subjects:
- kind: ServiceAccount
name: scanner-sa-${CI_PIPELINE_ID}
namespace: ${SCANNER_NAMESPACE}
roleRef:
kind: Role
name: scanner-role-${CI_PIPELINE_ID}
apiGroup: rbac.authorization.k8s.io
EOF
- |
# Generate token
TOKEN=$(kubectl create token scanner-sa-${CI_PIPELINE_ID} \
-n ${SCANNER_NAMESPACE} --duration=30m)
echo "SCANNER_TOKEN=${TOKEN}" >> scanner.env
# Save cluster info
SERVER=$(kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}')
CA_DATA=$(kubectl config view --raw --minify --flatten \
-o jsonpath='{.clusters[].cluster.certificate-authority-data}')
echo "CLUSTER_SERVER=${SERVER}" >> scanner.env
echo "CLUSTER_CA_DATA=${CA_DATA}" >> scanner.env
artifacts :
reports :
dotenv : scanner.env
run_scan :
stage : scan
needs : [ deploy_container , create_access ]
script :
- |
# Create a kubeconfig file
cat > scan-kubeconfig.yaml << EOF
apiVersion: v1
kind: Config
preferences: {}
clusters:
- cluster:
server: ${CLUSTER_SERVER}
certificate-authority-data: ${CLUSTER_CA_DATA}
name: scanner-cluster
contexts:
- context:
cluster: scanner-cluster
namespace: ${SCANNER_NAMESPACE}
user: scanner-user
name: scanner-context
current-context: scanner-context
users:
- name: scanner-user
user:
token: ${SCANNER_TOKEN}
EOF
- |
# Install CINC Auditor
curl -L https://omnitruck.cinc.sh/install.sh | sudo bash -s -- -P cinc-auditor
# Install train-k8s-container plugin
cinc-auditor plugin install train-k8s-container
# Install SAF CLI
npm install -g @mitre/saf
# Run cinc-auditor scan
KUBECONFIG=scan-kubeconfig.yaml \
cinc-auditor exec ${CINC_PROFILE_PATH} \
-t k8s-container://${SCANNER_NAMESPACE}/${TARGET_POD}/${TARGET_CONTAINER} \
--reporter json:scan-results.json
# Generate scan summary using SAF CLI
saf summary --input scan-results.json --output-md scan-summary.md
# Display summary in job output
cat scan-summary.md
# Check scan against threshold
saf threshold -i scan-results.json -t ${THRESHOLD_VALUE}
THRESHOLD_RESULT=$?
# Save result for later stages
echo "THRESHOLD_PASSED=${THRESHOLD_RESULT}" >> scan.env
if [ ${THRESHOLD_RESULT} -eq 0 ]; then
echo "✅ Security scan passed threshold requirements"
else
echo "❌ Security scan failed to meet threshold requirements"
# Uncomment to enforce threshold as a gate
# exit ${THRESHOLD_RESULT}
fi
artifacts :
paths :
- scan-results.json
- scan-summary.md
reports :
dotenv : scan.env
generate_report :
stage : report
needs : [ run_scan ]
script :
- |
# Install SAF CLI if needed in this stage
which saf || npm install -g @mitre/saf
# Generate a more comprehensive report
saf view -i scan-results.json --output scan-report.html
# Create a simple markdown report for the MR
cat > scan-report.md << EOF
# Security Scan Results
## Summary
$(cat scan-summary.md)
## Threshold Check
${THRESHOLD_PASSED} -eq 0 && echo "✅ **PASSED**" || echo "❌ **FAILED**"
Threshold: ${THRESHOLD_VALUE}%
## Details
For full results, see the artifacts.
EOF
artifacts :
paths :
- scan-report.html
- scan-report.md
when : always
cleanup :
stage : cleanup
needs : [ run_scan ]
when : always # Run even if previous stages failed
script :
- echo "$KUBE_CONFIG" | base64 -d > kubeconfig.yaml
- export KUBECONFIG=kubeconfig.yaml
- |
# Delete all resources
kubectl delete pod/${TARGET_POD} -n ${SCANNER_NAMESPACE} --ignore-not-found
kubectl delete role/scanner-role-${CI_PIPELINE_ID} -n ${SCANNER_NAMESPACE} --ignore-not-found
kubectl delete sa/scanner-sa-${CI_PIPELINE_ID} -n ${SCANNER_NAMESPACE} --ignore-not-found
kubectl delete rolebinding/scanner-binding-${CI_PIPELINE_ID} \
-n ${SCANNER_NAMESPACE} --ignore-not-found
stages :
- deploy
- scan
- report
- cleanup
variables :
SCANNER_NAMESPACE : "inspec-test"
TARGET_LABEL : "app=target-app"
THRESHOLD_VALUE : "70" # Minimum passing score (0-100)
# Define a custom service image for CINC Auditor
services :
- name : registry.example.com/cinc-auditor-scanner:latest
alias : cinc-scanner
entrypoint : [ "sleep" , "infinity" ]
deploy_container :
stage : deploy
script :
- echo "$KUBE_CONFIG" | base64 -d > kubeconfig.yaml
- export KUBECONFIG=kubeconfig.yaml
- |
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: scan-target-${CI_PIPELINE_ID}
namespace: ${SCANNER_NAMESPACE}
labels:
app: target-app
pipeline: "${CI_PIPELINE_ID}"
spec:
containers:
- name: target
image: registry.example.com/my-image:latest
command: ["sleep", "1h"]
EOF
- |
# Wait for pod to be ready
kubectl wait --for=condition=ready pod/scan-target-${CI_PIPELINE_ID} \
-n ${SCANNER_NAMESPACE} --timeout=120s
- |
# Save target info for later stages
echo "TARGET_POD=scan-target-${CI_PIPELINE_ID}" >> deploy.env
echo "TARGET_CONTAINER=target" >> deploy.env
artifacts :
reports :
dotenv : deploy.env
create_access :
stage : scan
needs : [ deploy_container ]
script :
- echo "$KUBE_CONFIG" | base64 -d > kubeconfig.yaml
- export KUBECONFIG=kubeconfig.yaml
- |
# Create the role for this specific pod
cat <<EOF | kubectl apply -f -
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: scanner-role-${CI_PIPELINE_ID}
namespace: ${SCANNER_NAMESPACE}
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["create"]
resourceNames: ["${TARGET_POD}"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get"]
resourceNames: ["${TARGET_POD}"]
EOF
- |
# Create service account
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: ServiceAccount
metadata:
name: scanner-sa-${CI_PIPELINE_ID}
namespace: ${SCANNER_NAMESPACE}
EOF
- |
# Create role binding
cat <<EOF | kubectl apply -f -
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: scanner-binding-${CI_PIPELINE_ID}
namespace: ${SCANNER_NAMESPACE}
subjects:
- kind: ServiceAccount
name: scanner-sa-${CI_PIPELINE_ID}
namespace: ${SCANNER_NAMESPACE}
roleRef:
kind: Role
name: scanner-role-${CI_PIPELINE_ID}
apiGroup: rbac.authorization.k8s.io
EOF
- |
# Generate token
TOKEN=$(kubectl create token scanner-sa-${CI_PIPELINE_ID} \
-n ${SCANNER_NAMESPACE} --duration=30m)
echo "SCANNER_TOKEN=${TOKEN}" >> scanner.env
# Save cluster info
SERVER=$(kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}')
CA_DATA=$(kubectl config view --raw --minify --flatten \
-o jsonpath='{.clusters[].cluster.certificate-authority-data}')
echo "CLUSTER_SERVER=${SERVER}" >> scanner.env
echo "CLUSTER_CA_DATA=${CA_DATA}" >> scanner.env
artifacts :
reports :
dotenv : scanner.env
run_scan :
stage : scan
needs : [ deploy_container , create_access ]
# This job uses the cinc-scanner service container
# The service container already has CINC Auditor and the SAF CLI installed
script :
- |
# Create a kubeconfig file
cat > scan-kubeconfig.yaml << EOF
apiVersion: v1
kind: Config
preferences: {}
clusters:
- cluster:
server: ${CLUSTER_SERVER}
certificate-authority-data: ${CLUSTER_CA_DATA}
name: scanner-cluster
contexts:
- context:
cluster: scanner-cluster
namespace: ${SCANNER_NAMESPACE}
user: scanner-user
name: scanner-context
current-context: scanner-context
users:
- name: scanner-user
user:
token: ${SCANNER_TOKEN}
EOF
- |
# Copy kubeconfig and profiles to service container
docker cp scan-kubeconfig.yaml cinc-scanner:/tmp/
docker cp ${CINC_PROFILE_PATH} cinc-scanner:/tmp/profile
# Run scan in service container
docker exec cinc-scanner bash -c "
KUBECONFIG=/tmp/scan-kubeconfig.yaml \
cinc-auditor exec /tmp/profile \
-t k8s-container://${SCANNER_NAMESPACE}/${TARGET_POD}/${TARGET_CONTAINER} \
--reporter json:/tmp/scan-results.json
# Generate scan summary using SAF CLI
saf summary --input /tmp/scan-results.json --output-md /tmp/scan-summary.md
# Check scan against threshold
saf threshold -i /tmp/scan-results.json -t ${THRESHOLD_VALUE}
echo \$? > /tmp/threshold_result.txt
"
# Copy results back from service container
docker cp cinc-scanner:/tmp/scan-results.json ./scan-results.json
docker cp cinc-scanner:/tmp/scan-summary.md ./scan-summary.md
docker cp cinc-scanner:/tmp/threshold_result.txt ./threshold_result.txt
# Display summary in job output
cat scan-summary.md
# Process threshold result
THRESHOLD_RESULT=$(cat threshold_result.txt)
echo "THRESHOLD_PASSED=${THRESHOLD_RESULT}" >> scan.env
if [ ${THRESHOLD_RESULT} -eq 0 ]; then
echo "✅ Security scan passed threshold requirements"
else
echo "❌ Security scan failed to meet threshold requirements"
# Uncomment to enforce threshold as a gate
# exit ${THRESHOLD_RESULT}
fi
artifacts :
paths :
- scan-results.json
- scan-summary.md
reports :
dotenv : scan.env
# For distroless containers, we need a specialized approach
run_distroless_scan :
stage : scan
needs : [ deploy_container , create_access ]
# This job will only run if the DISTROLESS variable is set to "true"
rules :
- if : $DISTROLESS == "true"
# Use our specialized distroless scanner service container
services :
- name : registry.example.com/distroless-scanner:latest
alias : distroless-scanner
entrypoint : [ "sleep" , "infinity" ]
script :
- |
# Create a kubeconfig file
cat > scan-kubeconfig.yaml << EOF
apiVersion: v1
kind: Config
preferences: {}
clusters:
- cluster:
server: ${CLUSTER_SERVER}
certificate-authority-data: ${CLUSTER_CA_DATA}
name: scanner-cluster
contexts:
- context:
cluster: scanner-cluster
namespace: ${SCANNER_NAMESPACE}
user: scanner-user
name: scanner-context
current-context: scanner-context
users:
- name: scanner-user
user:
token: ${SCANNER_TOKEN}
EOF
- |
# Copy kubeconfig and profiles to distroless scanner service container
docker cp scan-kubeconfig.yaml distroless-scanner:/tmp/
docker cp ${CINC_PROFILE_PATH} distroless-scanner:/tmp/profile
# Run specialized distroless scan in service container
docker exec distroless-scanner bash -c "
KUBECONFIG=/tmp/scan-kubeconfig.yaml \
/opt/scripts/scan-distroless.sh \
${SCANNER_NAMESPACE} ${TARGET_POD} ${TARGET_CONTAINER} \
/tmp/profile /tmp/scan-results.json
# Generate scan summary using SAF CLI
saf summary --input /tmp/scan-results.json --output-md /tmp/scan-summary.md
# Check scan against threshold
saf threshold -i /tmp/scan-results.json -t ${THRESHOLD_VALUE}
echo \$? > /tmp/threshold_result.txt
"
# Copy results back from service container
docker cp distroless-scanner:/tmp/scan-results.json ./scan-results.json
docker cp distroless-scanner:/tmp/scan-summary.md ./scan-summary.md
docker cp distroless-scanner:/tmp/threshold_result.txt ./threshold_result.txt
# Display summary in job output
cat scan-summary.md
# Process threshold result
THRESHOLD_RESULT=$(cat threshold_result.txt)
echo "THRESHOLD_PASSED=${THRESHOLD_RESULT}" >> scan.env
if [ ${THRESHOLD_RESULT} -eq 0 ]; then
echo "✅ Security scan passed threshold requirements"
else
echo "❌ Security scan failed to meet threshold requirements"
# Uncomment to enforce threshold as a gate
# exit ${THRESHOLD_RESULT}
fi
artifacts :
paths :
- scan-results.json
- scan-summary.md
reports :
dotenv : scan.env
generate_report :
stage : report
needs : [ run_scan ]
script :
- |
# Install SAF CLI if needed in this stage
which saf || npm install -g @mitre/saf
# Generate a more comprehensive report
saf view -i scan-results.json --output scan-report.html
# Create a simple markdown report for the MR
cat > scan-report.md << EOF
# Security Scan Results
## Summary
$(cat scan-summary.md)
## Threshold Check
${THRESHOLD_PASSED} -eq 0 && echo "✅ **PASSED**" || echo "❌ **FAILED**"
Threshold: ${THRESHOLD_VALUE}%
## Details
For full results, see the artifacts.
EOF
artifacts :
paths :
- scan-report.html
- scan-report.md
when : always
cleanup :
stage : cleanup
needs : [ run_scan ]
when : always # Run even if previous stages failed
script :
- echo "$KUBE_CONFIG" | base64 -d > kubeconfig.yaml
- export KUBECONFIG=kubeconfig.yaml
- |
# Delete all resources
kubectl delete pod/${TARGET_POD} -n ${SCANNER_NAMESPACE} --ignore-not-found
kubectl delete role/scanner-role-${CI_PIPELINE_ID} -n ${SCANNER_NAMESPACE} --ignore-not-found
kubectl delete sa/scanner-sa-${CI_PIPELINE_ID} -n ${SCANNER_NAMESPACE} --ignore-not-found
kubectl delete rolebinding/scanner-binding-${CI_PIPELINE_ID} \
-n ${SCANNER_NAMESPACE} --ignore-not-found
Sidecar Scanner Configuration
This is a configuration for the sidecar container approach:
name : CINC Auditor Sidecar Container Scan
on :
workflow_dispatch :
inputs :
kubernetes_version :
description : 'Kubernetes version to use'
required : true
default : 'v1.28.3'
target_image :
description : 'Target container image to scan'
required : true
default : 'busybox:latest'
is_distroless :
description : 'Is the target a distroless container?'
required : true
default : 'false'
type : boolean
threshold :
description : 'Minimum passing score (0-100)'
required : true
default : '70'
jobs :
sidecar-scan :
name : Sidecar Container Scan
runs-on : ubuntu-latest
steps :
- name : Checkout code
uses : actions/checkout@v4
- name : Setup Kubernetes
id : kind
uses : helm/kind-action@v1.8.0
with :
version : v0.20.0
cluster_name : scan-cluster
config : |
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
kubeadmConfigPatches:
- |
kind: InitConfiguration
nodeRegistration:
kubeletExtraArgs:
feature-gates: "EphemeralContainers=true"
"system-reserved": "cpu=500m,memory=500Mi"
image: kindest/node:${{ github.event.inputs.kubernetes_version }}
- name : Get cluster status
run : |
kubectl get nodes
kubectl cluster-info
- name : Build CINC Auditor Scanner container
run : |
# Create a Dockerfile for the CINC Auditor scanner container
cat > Dockerfile.scanner << EOF
FROM ruby:3.0-slim
# Install dependencies
RUN apt-get update && apt-get install -y \
curl \
gnupg \
procps \
nodejs \
npm \
&& rm -rf /var/lib/apt/lists/*
# Install CINC Auditor
RUN curl -L https://omnitruck.cinc.sh/install.sh | bash -s -- -P cinc-auditor
# Install SAF CLI
RUN npm install -g @mitre/saf
# Copy profiles
COPY examples/cinc-profiles/container-baseline /opt/profiles/container-baseline
# Verify installation
RUN cinc-auditor --version && \
saf --version
# Create a simple script to scan in sidecar mode
RUN echo '#!/bin/bash \n\
TARGET_PID=\$(ps aux | grep -v grep | grep "\$1" | head -1 | awk "{print \\\$2}") \n\
echo "Target process identified: PID \$TARGET_PID" \n\
\n\
cinc-auditor exec /opt/profiles/\$2 \\\n\
-b os=linux \\\n\
--target=/proc/\$TARGET_PID/root \\\n\
--reporter cli json:/results/scan-results.json \n\
\n\
saf summary --input /results/scan-results.json --output-md /results/scan-summary.md \n\
\n\
saf threshold -i /results/scan-results.json -t /opt/thresholds/threshold.yml \n\
echo \$? > /results/threshold-result.txt \n\
\n\
touch /results/scan-complete \n\
' > /usr/local/bin/run-scanner
RUN chmod +x /usr/local/bin/run-scanner
# Default command
CMD ["/bin/bash"]
EOF
# Build the scanner image
docker build -t cinc-scanner:latest -f Dockerfile.scanner .
# Load the image into kind
kind load docker-image cinc-scanner:latest --name scan-cluster
- name : Create namespace and prepare environment
run : |
# Create namespace
kubectl create namespace inspec-test
# Create threshold ConfigMap
cat > threshold.yml << EOF
compliance:
min: ${{ github.event.inputs.threshold }}
failed:
critical:
max: 0 # No critical failures allowed
EOF
kubectl create configmap inspec-thresholds \
--from-file=threshold.yml=threshold.yml \
-n inspec-test
- name : Deploy pod with scanner sidecar
run : |
# Create the pod with shared process namespace
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: app-scanner
namespace: inspec-test
labels:
app: scanner-pod
spec:
shareProcessNamespace: true # Enable shared process namespace
containers:
# Target container to be scanned
- name: target
image: ${{ github.event.inputs.target_image }}
command: ["sleep", "3600"]
# CINC Auditor scanner sidecar
- name: scanner
image: cinc-scanner:latest
command:
- "/bin/bash"
- "-c"
- |
# Wait for the main container to start
sleep 10
echo "Starting CINC Auditor scan..."
# Use the script to find process and run scanner
run-scanner "sleep 3600" "container-baseline"
# Keep container running briefly to allow result retrieval
echo "Scan complete. Results available in /results directory."
sleep 300
volumeMounts:
- name: shared-results
mountPath: /results
- name: thresholds
mountPath: /opt/thresholds
volumes:
- name: shared-results
emptyDir: {}
- name: thresholds
configMap:
name: inspec-thresholds
EOF
# Wait for pod to be ready
kubectl wait --for=condition=ready pod/app-scanner -n inspec-test --timeout=300s
# Verify the pod is ready
kubectl get pod app-scanner -n inspec-test
- name : Wait for scan to complete and retrieve results
run : |
# Wait for scan to complete
echo "Waiting for scan to complete..."
until kubectl exec -it app-scanner -n inspec-test -c scanner -- ls /results/scan-complete >/dev/null 2>&1; do
echo "Scan in progress..."
sleep 5
done
# Retrieve scan results
echo "Retrieving scan results..."
kubectl cp inspec-test/app-scanner:/results/scan-results.json ./scan-results.json -c scanner
kubectl cp inspec-test/app-scanner:/results/scan-summary.md ./scan-summary.md -c scanner
# Check threshold result
if kubectl exec -it app-scanner -n inspec-test -c scanner -- cat /results/threshold-result.txt >/dev/null 2>&1; then
THRESHOLD_RESULT=$(kubectl exec -it app-scanner -n inspec-test -c scanner -- cat /results/threshold-result.txt)
echo "THRESHOLD_RESULT=${THRESHOLD_RESULT}" >> $GITHUB_ENV
if [ "${THRESHOLD_RESULT}" -eq 0 ]; then
echo "✅ Security scan passed threshold requirements"
else
echo "❌ Security scan failed to meet threshold requirements"
fi
else
echo "Warning: Threshold result not found"
echo "THRESHOLD_RESULT=1" >> $GITHUB_ENV
fi
# Display summary in job output
echo "============= SCAN SUMMARY ============="
cat scan-summary.md
echo "========================================"
- name : Process results with SAF-CLI
run : |
# Install SAF CLI
npm install -g @mitre/saf
# Generate reports
saf view -i scan-results.json --output scan-report.html
saf generate -i scan-results.json -o csv > results.csv
saf generate -i scan-results.json -o junit > junit-results.xml
# Add to GitHub step summary
echo "## CINC Auditor Scan Results" > $GITHUB_STEP_SUMMARY
cat scan-summary.md >> $GITHUB_STEP_SUMMARY
# Add threshold result to summary
if [ "${{ env.THRESHOLD_RESULT }}" -eq 0 ]; then
echo "## ✅ Security scan passed threshold requirements" >> $GITHUB_STEP_SUMMARY
else
echo "## ❌ Security scan failed to meet threshold requirements" >> $GITHUB_STEP_SUMMARY
fi
echo "Threshold: ${{ github.event.inputs.threshold }}%" >> $GITHUB_STEP_SUMMARY
- name : Upload CINC Auditor results
if : always()
uses : actions/upload-artifact@v4
with :
name : cinc-results
path : |
scan-results.json
scan-summary.md
scan-report.html
results.csv
junit-results.xml
- name : Cleanup resources
if : always()
run : |
kubectl delete namespace inspec-test
Key Configuration Elements
Let's break down the important configuration elements:
Service Account : The scanner requires appropriate RBAC permissions
Container Configuration : The scanner is deployed alongside the application container
Volume Mounts : Configuration is provided through ConfigMaps
Environment Variables : Control the scanner's behavior
Comparing Different Approaches
Here's a comparison of different scanning approaches:
Conclusion
The code display capabilities demonstrated on this page help make our documentation more:
Clear : Code is syntax highlighted and properly formatted
Interactive : Copy buttons and line highlighting improve usability
Annotated : Comments help explain complex configurations
Consistent : Using the same example files throughout documentation