Skip to content

Instantly share code, notes, and snippets.

@unclebean
Last active October 1, 2025 02:59
Show Gist options
  • Select an option

  • Save unclebean/c9769cce87fc0ce5444bf2fdeffaf0da to your computer and use it in GitHub Desktop.

Select an option

Save unclebean/c9769cce87fc0ce5444bf2fdeffaf0da to your computer and use it in GitHub Desktop.
Karate gatling
implementation("com.fasterxml.jackson.module:jackson-module-afterburner")
@Configuration
public class JacksonConfig {
@Bean
public Jackson2ObjectMapperBuilderCustomizer afterburnerModule() {
return builder -> builder.modulesToInstall(new AfterburnerModule());
}
}
java -XX:StartFlightRecording=duration=60s,filename=myrecording.jfr -jar myapp.jar
sudo jcmd 12345 JFR.start name=loadtest duration=120s filename=/tmp/loadtest.jfr settings=profile
import os, requests, textwrap
project_id = os.environ["CI_PROJECT_ID"]
mr_iid = os.environ["CI_MERGE_REQUEST_IID"]
api_base = os.environ.get("CI_API_V4_URL", "https://gitlab.com/api/v4")
gl_token = os.environ["GITLAB_TOKEN"]
# Azure OpenAI settings
aoai_endpoint = os.environ["AZURE_OPENAI_ENDPOINT"].rstrip("/")
aoai_deployment = os.environ["AZURE_OPENAI_DEPLOYMENT"]
aoai_version = os.environ.get("AZURE_OPENAI_API_VERSION", "2024-02-15-preview")
aoai_url = f"{aoai_endpoint}/openai/deployments/{aoai_deployment}/chat/completions?api-version={aoai_version}"
aoai_headers = {
"api-key": os.environ["AZURE_OPENAI_API_KEY"],
"Content-Type": "application/json"
}
# 1) Pull diff from the MR
r = requests.get(
f"{api_base}/projects/{project_id}/merge_requests/{mr_iid}/changes",
headers={"PRIVATE-TOKEN": gl_token},
timeout=60
)
r.raise_for_status()
changes = r.json().get("changes", [])
# 2) Build a compact prompt (clip to keep tokens sane)
def clip(s, n=12000):
return s if len(s) <= n else s[:n] + "\n...[truncated]..."
prompt = [
{"role":"system","content":(
"You are a senior code reviewer. Be surgical and concise. "
"Focus on bugs, security issues (secrets, injection, unsafe deserialization, SSRF, path traversal), "
"race conditions, performance hotspots, test coverage gaps, and style consistency. "
"When possible, cite file paths and approximate line ranges from the diff."
)},
{"role":"user","content":"Review the following diff chunks file by file. Only comment on meaningful issues."}
]
for ch in changes:
diff = ch.get("diff") or ""
if not diff:
continue
prompt.append({"role":"user","content": f"File: {ch.get('new_path', ch.get('old_path','?'))}\n{clip(diff)}"})
# 3) Call Azure OpenAI
resp = requests.post(aoai_url, headers=aoai_headers, json={
"messages": prompt,
"temperature": 0.2,
"max_tokens": 1200
}, timeout=120)
resp.raise_for_status()
review = resp.json()["choices"][0]["message"]["content"].strip()
# 4) Post the review back to the MR as a single note
requests.post(
f"{api_base}/projects/{project_id}/merge_requests/{mr_iid}/notes",
headers={"PRIVATE-TOKEN": gl_token},
data={"body": f"## AI Review (Azure OpenAI)\n{review}"},
timeout=30
).raise_for_status()
- name: List users and groups on RHEL 8
hosts: all
gather_facts: false
tasks:
- name: Gather passwd DB
ansible.builtin.getent:
database: passwd
- name: Gather group DB
ansible.builtin.getent:
database: group
# All usernames
- name: Show all users (usernames)
ansible.builtin.debug:
var: getent_passwd.keys() | list | sort
# All group names
- name: Show all groups (group names)
ansible.builtin.debug:
var: getent_group.keys() | list | sort
# Human users only (uid >= 1000, excluding nobody 65534)
- name: Show human users
ansible.builtin.debug:
msg: >-
{{
getent_passwd
| dict2items
| selectattr('value.1', 'defined') # value[1] is uid
| selectattr('value.1', 'ge', 1000)
| rejectattr('value.1', 'equalto', 65534)
| map(attribute='key')
| list
| sort
}}
# Group → members map
- name: Show group membership
ansible.builtin.debug:
msg: >-
{{
getent_group
| dict2items
| map('combine', {'name': item.key, 'members': item.value.3 | default([])})
| list
}}
vars:
item: "{{ item }}"
loop: "{{ getent_group | dict2items }}"
- hosts: all
become: true
vars:
pkg_name: myapp
download_dir: /tmp
tasks:
- name: Ensure dnf repo tools
ansible.builtin.package:
name: dnf-plugins-core
state: present
when: ansible_pkg_mgr == 'dnf'
- name: Query repo for latest RPM URL
ansible.builtin.command: dnf repoquery --latest-limit=1 --location {{ pkg_name }}
register: rpm_url
changed_when: false
when: ansible_pkg_mgr == 'dnf'
- name: Download RPM from URL
ansible.builtin.get_url:
url: "{{ rpm_url.stdout }}"
dest: "{{ download_dir }}/"
register: dl
when: ansible_pkg_mgr == 'dnf'
- ansible.builtin.set_fact:
rpm_file: "{{ dl.dest }}"
when: ansible_pkg_mgr == 'dnf'
# Same uninstall/install steps as above, using {{ rpm_file }}
- name: Uninstall (skip scripts) if installed
ansible.builtin.command: rpm -q {{ pkg_name }}
register: q
failed_when: false
changed_when: false
- name: Erase without scripts
ansible.builtin.command: rpm -evh --noscripts {{ pkg_name }}
when: q.rc == 0
- name: Install without scripts from file
ansible.builtin.command: rpm -ivh --noscripts "{{ rpm_file }}"
openssl pkcs12 -export \
-in cert.pem \
-inkey key.pem \
-certfile ca.pem \
-out keystore.p12 \
-name springboot-cert \
-passout pass:
openssl pkcs12 -export \
-in <(awk '/-----BEGIN CERTIFICATE-----/,/-----END CERTIFICATE-----/' full.pem) \
-inkey <(awk '/-----BEGIN .*PRIVATE KEY-----/,/-----END .*PRIVATE KEY-----/' full.pem) \
-certfile ca.pem \
-out keystore.p12 \
-name springboot-cert \
-passout pass:
USER=$(systemctl cat myapp.service | grep '^User=' | cut -d= -f2)
GROUP=$(systemctl cat myapp.service | grep '^Group=' | cut -d= -f2)
echo "User: $USER"
echo "Group: $GROUP"
sudo nano /etc/systemd/system/create-gitlab-runner.service
[Unit]
Description=Ensure gitlab-runner user exists
Before=gitlab-runner.service
[Service]
Type=oneshot
ExecStart=/usr/local/bin/create-gitlab-user.sh
RemainAfterExit=yes
[Install]
WantedBy=multi-user.target
sudo nano /usr/local/bin/create-gitlab-user.sh
#!/bin/bash
id gitlab-runner &>/dev/null || useradd --system --create-home --shell /sbin/nologin gitlab-runner
sudo chmod +x /usr/local/bin/create-gitlab-user.sh
sudo systemctl enable create-gitlab-runner.service
# Put this once at the top of .gitlab-ci.yml
workflow:
rules:
# Prefer MR pipelines
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
when: always
# If there is an open MR, skip the extra push pipeline
- if: '$CI_PIPELINE_SOURCE == "push" && $CI_OPEN_MERGE_REQUESTS'
when: never
# Otherwise allow everything (normal pushes without MR, schedules, etc.)
- when: always
# Your base config — common settings for all jobs
.base_config:
stage: build
image: alpine:latest
script:
- echo "do something common"
# This job runs only on develop and feature branches, not on release/*
normal_task:
extends: .base_config
rules:
# skip release branches
- if: '$CI_COMMIT_BRANCH =~ /^release\/.+$/'
when: never
# run on develop
- if: '$CI_COMMIT_BRANCH == "develop"'
when: on_success
# run on everything else (features, bugfix, hotfix)
- when: on_success
# This job runs only on release/*
release_task:
extends: .base_config
rules:
- if: '$CI_COMMIT_BRANCH =~ /^release\/.+$/'
when: on_success
- when: never
logging.level.org.springframework.web=DEBUG
logging.level.org.springframework.boot.autoconfigure.web=DEBUG
server.error.include-stacktrace=always
server.error.include-message=always
<configuration>
<!-- Prefer explicit LOG_HOST, then HOSTNAME, then Spring's detected hostname, else 'unknown' -->
<property name="LOG_HOST" value="${LOG_HOST:${HOSTNAME:${spring.cloud.client.hostname:unknown}}}"/>
<appender name="splunk" class="com.splunk.logging.HttpEventCollectorLogbackAppender">
<url>https://${SPLUNK_HOST:${splunk.hec.host}}:${SPLUNK_PORT:${splunk.hec.port:-8088}}/services/collector</url>
<token>${SPLUNK_TOKEN:${splunk.hec.token}}</token>
<index>${SPLUNK_INDEX:${splunk.hec.index:-main}}</index>
<source>${APP_NAME:${spring.application.name:-my-app}}</source>
<sourcetype>_json</sourcetype>
<host>${LOG_HOST}</host>
</appender>
<root level="INFO">
<appender-ref ref="splunk"/>
</root>
</configuration>
retag:
stage: build
needs:
- job: build
artifacts: true
script:
- echo "APP_VERSION=${APP_VERSION}-hotfix1" > exported.env
artifacts:
reports:
dotenv: exported.env
deploy-hotfix:
stage: deploy
needs:
- job: retag
artifacts: true
script:
- echo "Deploying hotfix: $APP_VERSION"
- vars:
pkg_name: myapp
download_dir: /tmp
- name: Ensure dnf download tool
become: true
ansible.builtin.package:
name: dnf-plugins-core
state: present
- name: Download the RPM file
become: true
ansible.builtin.command: >
dnf download -y --arch={{ ansible_architecture }}
--destdir={{ download_dir }} {{ pkg_name }}
register: dl_cmd
changed_when: true
- name: Find the downloaded RPM path
ansible.builtin.find:
paths: "{{ download_dir }}"
patterns: "{{ pkg_name }}-*.rpm"
file_type: file
register: rpms
- name: Pick newest RPM
ansible.builtin.set_fact:
rpm_file: "{{ (rpms.files | sort(attribute='mtime', reverse=true))[0].path }}"
- name: Install without scripts
become: true
ansible.builtin.command: rpm -ivh --noscripts "{{ rpm_file }}"
import io.gatling.core.Predef._
import io.gatling.http.Predef._
import java.security.SecureRandom
import javax.net.ssl.SSLContext
class MySimulation extends Simulation {
val sslContext: SSLContext = CustomSSLContext.createSSLContext(
"/path/to/your.jks",
"your-password"
)
val httpProtocol = http
.baseUrl("https://your-api-host.com")
.disableWarmUp
.disableCaching
.sslContext(sslContext)
val scn = scenario("Get Token Scenario")
.exec(
http("Get Token")
.post("/auth/token")
.header("Content-Type", "application/json")
.body(StringBody("""{ "client_id": "xxx", "client_secret": "yyy" }"""))
.check(jsonPath("$.access_token").saveAs("accessToken"))
)
.exec(session => {
println("Access Token: " + session("accessToken").as[String])
session
})
setUp(scn.inject(atOnceUsers(1))).protocols(httpProtocol)
}
#!/usr/bin/env bash
set -euo pipefail
check_pair() {
# $1 = user, $2 = group
local u="$1" g="$2"
getent passwd "$u" >/dev/null || return 1
getent group "$g" >/dev/null || return 1
# ensure user is in the group (primary or supplementary)
local ugid ggid
ugid="$(id -g "$u")"
ggid="$(getent group "$g" | cut -d: -f3)"
if [[ "$ugid" == "$ggid" ]] || id -nG "$u" | tr ' ' '\n' | grep -qx "$g"; then
return 0
fi
return 2
}
pairs=(
"user_a:user_a_group"
"user_b:user_b_group"
"user_c:user_c_group"
)
SELECTED_USER=""
SELECTED_GROUP=""
for p in "${pairs[@]}"; do
IFS=: read -r u g <<<"$p"
if check_pair "$u" "$g"; then
SELECTED_USER="$u"
SELECTED_GROUP="$g"
break
fi
done
if [[ -n "$SELECTED_USER" ]]; then
echo "Using ${SELECTED_USER}:${SELECTED_GROUP}"
# Example action:
# chown -R "${SELECTED_USER}:${SELECTED_GROUP}" /opt/myapp
exit 0
else
echo "WARNING: none of the expected user:group pairs exist or user not in group" >&2
# Choose exit code to suit your installer:
# exit 1 # fail hard (e.g., in %pre)
exit 0 # or continue with a warning
fi
# Upsert KEY=VALUE into an .env file without touching other keys.
# - Replaces the first active occurrence of KEY (handles optional "export" and spaces).
# - Drops any duplicate KEY lines.
# - Appends KEY if not present.
# - Leaves comments and unrelated lines exactly as-is.
upsert_env() {
key="$1"; val="$2"; file="$3"
[ -n "$key" ] && [ -n "$file" ] || { echo "upsert_env: missing args" >&2; return 1; }
umask 022
mkdir -p "$(dirname "$file")"
touch "$file"
cp -p "$file" "${file}.bak" 2>/dev/null || true
tmp="$(mktemp)"
awk -v k="$key" -v v="$val" '
BEGIN { done=0 }
# Keep commented lines untouched
/^[[:space:]]*#/ { print; next }
# Match: optional spaces, optional "export", KEY, optional spaces, "="
$0 ~ "^[[:space:]]*(export[[:space:]]+)?(" k ")[[:space:]]*=" {
if (!done) { print k "=" v; done=1 } # overwrite first match
next # drop duplicates
}
{ print } # pass-through others
END { if (!done) print k "=" v } # append if never seen
' "$file" > "$tmp" && mv "$tmp" "$file"
# Reasonable perms for systemd EnvironmentFile
chmod 0640 "$file" 2>/dev/null || chmod 0644 "$file"
}
ENV_FILE="/etc/myapp/.env"
# assume SELECTED_USER / SELECTED_GROUP are already determined
if [ -n "${SELECTED_USER:-}" ] && [ -n "${SELECTED_GROUP:-}" ]; then
upsert_env APP_USER "$SELECTED_USER" "$ENV_FILE"
upsert_env APP_GROUP "$SELECTED_GROUP" "$ENV_FILE"
echo "Updated $ENV_FILE (APP_USER, APP_GROUP)."
# If systemd reads this file, reload/restart as needed:
# systemctl daemon-reload
# systemctl try-restart myapp.service
else
echo "WARNING: no valid user:group selected; not updating $ENV_FILE" >&2
fi
# Example: safe construction of the drop-in dir
set -eu
service_name="${service_name:-myapp}" # can be "myapp" or "myapp.service"
unit_base="${service_name%.service}" # strip .service if present
unit="${unit_base}.service"
dropin_dir="/etc/systemd/system/${unit}.d"
sudo install -d -m 0755 "$dropin_dir"
sudo tee "${dropin_dir}/10-identity.conf" >/dev/null <<EOF
[Service]
User=$MYAPP_USER
Group=$MYAPP_GROUP
EOF
sudo systemctl daemon-reload
sudo systemctl try-restart "$unit" || true
package performance
import com.typesafe.config.ConfigFactory
import com.example.util.JwtGenerator
import io.gatling.core.Predef._
import io.gatling.http.Predef._
import scala.concurrent.duration._
class SearchSimulation extends Simulation {
// Choose env: fallback to "env-dev.conf" or override via system property
val envName = System.getProperty("env", "dev")
val config = ConfigFactory.load(s"env-$envName.conf")
val jksPath = config.getString("jks.path")
val jksPassword = config.getString("jks.password")
val jksAlias = config.getString("jks.alias")
val baseUrl = config.getString("api.baseUrl")
val jwtToken = JwtGenerator.generateToken(jksPath, jksPassword, jksAlias)
val httpProtocol = http
.baseUrl(baseUrl)
.acceptHeader("application/json")
.contentTypeHeader("application/json")
val scn = scenario("JWT Search")
.exec(
http("Search request")
.post("/api/search")
.header("Authorization", s"Bearer $jwtToken")
.body(StringBody("""{ "query": "gatling perf test" }"""))
.asJson
.check(status.is(200))
)
setUp(
scn.inject(rampUsers(10).during(10.seconds))
).protocols(httpProtocol)
}
<configuration>
<appender name="SPLUNK" class="com.splunk.logging.HttpEventCollectorLogbackAppender">
<!-- Replace with your HEC endpoint -->
<url>https://your-splunk-host:8088</url>
<token>YOUR_HEC_TOKEN</token>
<index>main</index>
<source>spring-boot</source>
<sourcetype>_json</sourcetype>
<disableCertificateValidation>true</disableCertificateValidation>
<layout class="ch.qos.logback.classic.PatternLayout">
<Pattern>%d{yyyy-MM-dd'T'HH:mm:ss.SSSZ} %-5level %logger{36} - %msg%n</Pattern>
</layout>
</appender>
<root level="INFO">
<appender-ref ref="SPLUNK" />
</root>
</configuration>
<appender name="ASYNC_SPLUNK" class="ch.qos.logback.classic.AsyncAppender">
<appender-ref ref="SPLUNK" />
</appender>
<root level="INFO">
<appender-ref ref="ASYNC_SPLUNK" />
</root>
<configuration scan="true">
<appender name="SPLUNK" class="com.splunk.logging.HttpEventCollectorLogbackAppender">
<url>${SPLUNK_HEC_URL}</url>
<token>${SPLUNK_HEC_TOKEN}</token>
<index>${SPLUNK_HEC_INDEX:-main}</index>
<source>spring-boot</source>
<sourcetype>_json</sourcetype>
<disableCertificateValidation>${SPLUNK_DISABLE_CERT_VALIDATION:-true}</disableCertificateValidation>
<layout class="ch.qos.logback.classic.PatternLayout">
<Pattern>%d{yyyy-MM-dd'T'HH:mm:ss.SSSZ} %-5level %logger{36} - %msg%n</Pattern>
</layout>
</appender>
<root level="INFO">
<appender-ref ref="SPLUNK" />
</root>
</configuration>
plugins {
id 'java'
id 'io.gatling.gradle' version '3.9.5.2'
}
java {
toolchain {
languageVersion = JavaLanguageVersion.of(17)
}
}
dependencies {
gatlingImplementation 'com.intuit.karate:karate-core:1.4.1'
gatlingImplementation 'com.intuit.karate:karate-gatling:1.4.1'
gatlingImplementation 'io.gatling.highcharts:gatling-charts-highcharts:3.9.5'
gatlingImplementation 'io.gatling:gatling-app:3.9.5'
}
curl -k -H "Authorization: Splunk YOUR_HEC_TOKEN" \
-H "Content-Type: application/json" \
-d '{"event": "test-event"}' \
https://your-splunk-host:8088/services/collector
#!/bin/bash
check_env_variables() {
echo "Running check_env_variables..."
# your logic here
}
update_env_variable() {
echo "Running update_env_variable..."
# your logic here
}
load_env_and_print_db_host() {
echo "Running load_env_and_print_db_host..."
# your logic here
}
case "$1" in
check)
shift
check_env_variables "$@"
;;
update)
shift
update_env_variable "$@"
;;
load)
shift
load_env_and_print_db_host "$@"
;;
help|--help|-h)
cat <<EOF
📘 Available Commands:
check <env_file> <mandatory_keys...>
- Check required keys in the .env file
update <env_file> <key> <value>
- Update or insert a key=value pair in the .env file
load <env_file>
- Load the .env file and print DB_HOST and DB_PORT
help
- Show this help message
EOF
;;
*)
echo "❌ Unknown command: $1"
echo "Use: $0 help"
exit 1
;;
esac
#!/bin/bash
# Get current hostname
HOST=$(hostname)
# Set variable based on hostname
if [ "$HOST" = "dev-server" ]; then
export MY_SYS_VAR="dev_value"
elif [ "$HOST" = "prod-server" ]; then
export MY_SYS_VAR="prod_value"
else
export MY_SYS_VAR="default_value"
fi
echo "MY_SYS_VAR is set to: $MY_SYS_VAR"
-Xms2g # 初始堆内存,根据实际负载设置
-Xmx2g # 最大堆内存,与上面相同避免动态扩容
-XX:+UseG1GC # 使用 G1 垃圾收集器(JDK 21 默认)
-XX:MaxGCPauseMillis=100 # 希望最大 GC 暂停时间(调优参数)
-XX:+ParallelRefProcEnabled # 加快 GC 的引用处理速度
-XX:+UnlockExperimentalVMOptions
-XX:+DisableExplicitGC # 禁用 System.gc() 显式触发 GC
-XX:+AlwaysPreTouch # 提前分配物理内存,降低启动抖动
-XX:+PerfDisableSharedMem # 禁用 PerfData(部分场景减少资源消耗)
-Djava.net.preferIPv4Stack=true # 避免某些环境下 IPv6 带来的 DNS 延迟
-Dfile.encoding=UTF-8 # 明确指定字符集
-Dspring.profiles.active=prod # 明确使用生产 profile
export JAVA_HOME=$(ls -d /usr/lib/jvm/java-21-openjdk-* | head -n 1)
exec $JAVA_HOME/bin/java -jar /app/myapp.jar
-Xlog:gc*:file=/var/log/myapp/gc.log:time,level,tags:filecount=10,filesize=10M
JAVA_OPTS="\
-Xms1g -Xmx2g \
-XX:+UseZGC \
-XX:+TieredCompilation \
-XX:+AlwaysPreTouch \
-XX:+UseStringDeduplication \
-XX:+ExitOnOutOfMemoryError \
-XX:MaxMetaspaceSize=256m \
-Dspring.profiles.active=prod \
-Dlogging.level.root=WARN \
-Dcom.sun.management.jmxremote \
-Dcom.sun.management.jmxremote.port=9010 \
-Dcom.sun.management.jmxremote.authenticate=false \
-Dcom.sun.management.jmxremote.ssl=false \
-Djava.rmi.server.hostname=127.0.0.1"
awk '$0 >= "2024-10-31 10:00:00" && $0 <= "2024-10-31 12:00:00"' /var/log/myapp.log
<configuration>
<property name="LOG_PATTERN" value="%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - [traceId=%X{traceId}] %msg%n"/>
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>${LOG_PATTERN}</pattern>
</encoder>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger - %msg [traceId=%X{traceId}]%n</pattern>
</encoder>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger - %msg%X{traceId:-}%n</pattern>
</encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger - %msg%replace(%X{traceId}){'^(.+)$',' [traceId=$1]'}%n</pattern>
</appender>
<root level="INFO">
<appender-ref ref="CONSOLE"/>
</root>
</configuration>
import com.fasterxml.jackson.core.*;
import com.fasterxml.jackson.databind.ObjectMapper;
import java.io.InputStream;
import java.util.*;
public class HciMetadataParser {
private static final ObjectMapper objectMapper = new ObjectMapper();
public static List<Map<String, String>> extractMetadataList(InputStream jsonInputStream) throws Exception {
JsonFactory factory = objectMapper.getFactory();
List<Map<String, String>> metadataList = new ArrayList<>();
try (JsonParser parser = factory.createParser(jsonInputStream)) {
if (parser.nextToken() != JsonToken.START_OBJECT) {
throw new IllegalStateException("Expected JSON root to be an object");
}
while (parser.nextToken() != JsonToken.END_OBJECT) {
String fieldName = parser.getCurrentName();
parser.nextToken();
if ("Results".equals(fieldName) && parser.currentToken() == JsonToken.START_ARRAY) {
// Process each Result object
while (parser.nextToken() != JsonToken.END_ARRAY) {
Map<String, String> flatMetadata = new HashMap<>();
while (parser.nextToken() != JsonToken.END_OBJECT) {
String resultField = parser.getCurrentName();
parser.nextToken();
if ("metadata".equals(resultField) && parser.currentToken() == JsonToken.START_OBJECT) {
while (parser.nextToken() != JsonToken.END_OBJECT) {
String metaField = parser.getCurrentName();
parser.nextToken();
// Expecting START_ARRAY
if (parser.currentToken() == JsonToken.START_ARRAY) {
JsonToken token = parser.nextToken();
if (token == JsonToken.VALUE_STRING || token == JsonToken.VALUE_NUMBER_INT ||
token == JsonToken.VALUE_NUMBER_FLOAT || token == JsonToken.VALUE_TRUE ||
token == JsonToken.VALUE_FALSE) {
flatMetadata.put(metaField, parser.getValueAsString());
}
// Skip rest of array
while (parser.nextToken() != JsonToken.END_ARRAY) {
// no-op
}
} else {
parser.skipChildren();
}
}
} else {
parser.skipChildren();
}
}
metadataList.add(flatMetadata);
}
} else {
parser.skipChildren(); // Skip "IndexName", "HitCount" etc.
}
}
}
return metadataList;
}
}
import org.slf4j.MDC;
import org.springframework.stereotype.Component;
import org.springframework.web.servlet.HandlerInterceptor;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.util.UUID;
@Component
public class TraceIdInterceptor implements HandlerInterceptor {
private static final String TRACE_ID = "traceId";
@Override
public boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object handler) {
String traceId = UUID.randomUUID().toString();
MDC.put(TRACE_ID, traceId);
return true;
}
@Override
public void afterCompletion(HttpServletRequest request, HttpServletResponse response,
Object handler, Exception ex) {
MDC.remove(TRACE_ID);
}
}
val tokens = (1 to 20).map(_ => fetchTokenFromApi())
val tokenFeeder = Iterator.continually {
Map("accessToken" -> tokens(Random.nextInt(tokens.length)))
}
val scn = scenario("Load Test Search")
.feed(tokenFeeder)
.exec(
http("Search")
.get("/api/search")
.header("Authorization", "Bearer ${accessToken}")
)
setUp(
scn.inject(atOnceUsers(20), rampUsers(80).during(1.minute))
).protocols(httpProtocol)
openssl s_client -connect xxx.test.net:443 -servername xxx.test.net -showcerts </dev/null \
| openssl x509 -noout -text -ext subjectAltName
ai-review:
image: python:3.12
stage: ai_review
before_script:
- pip install requests
script:
- python .gitlab/ai_review_azure.py
rules:
- if: $CI_MERGE_REQUEST_IID
variables:
AZURE_OPENAI_ENDPOINT: $AZURE_OPENAI_ENDPOINT
AZURE_OPENAI_API_KEY: $AZURE_OPENAI_API_KEY
AZURE_OPENAI_DEPLOYMENT: $AZURE_OPENAI_DEPLOYMENT
AZURE_OPENAI_API_VERSION: $AZURE_OPENAI_API_VERSION
GITLAB_TOKEN: $GITLAB_TOKEN
%post
echo "Running post-install script..."
# Always enable the service to start at boot
systemctl enable my.service
# Only start the service if MY_ENV_VAR is set
if [ -n "$MY_ENV_VAR" ]; then
echo "MY_ENV_VAR is set to '$MY_ENV_VAR'. Starting my.service..."
systemctl start my.service
else
echo "MY_ENV_VAR is not set, skipping service start."
echo "You can manually set MY_ENV_VAR and run 'systemctl start my.service' later."
fi
import io.gatling.core.Predef._
import io.gatling.http.Predef._
val TraceHeader = "X-Trace-Id"
// helper to bump a per-user counter and build a trace id
def nextTrace: ChainBuilder =
exec { s =>
val n = s("seq").asOption[Long].getOrElse(0L) + 1
s.setAll("seq" -> n, "traceId" -> s"${s.userId}-$n")
}
val scn = scenario("print-trace-when-non200")
.exec(session => session.set("seq", 0L)) // init
.exec(nextTrace)
.exec(
http("GET /foo")
.get("/foo")
.header(TraceHeader, "${traceId}") // send trace id
.check(status.saveAs("st")) // capture status
)
.doIf(session => session("st").as[Int] != 200) {
exec { s =>
println(s"[NON-200] status=${s("st").as[Int]} trace=${s("traceId").as[String]}")
s
}
}
import org.apache.hc.client5.http.classic.CloseableHttpClient;
import org.apache.hc.client5.http.classic.HttpClients;
import org.apache.hc.client5.http.impl.io.PoolingHttpClientConnectionManager;
import org.apache.hc.client5.http.impl.io.PoolingHttpClientConnectionManagerBuilder;
import org.apache.hc.client5.http.ssl.SSLConnectionSocketFactoryBuilder;
import org.apache.hc.core5.ssl.SSLContexts;
import javax.net.ssl.HostnameVerifier;
import java.io.FileInputStream;
import java.security.KeyStore;
public final class MtlsNoHostnameCheckClient {
// A logging HostnameVerifier to prove it's actually being used
static final HostnameVerifier ALWAYS_TRUE_LOGGING_VERIFIER = (hostname, session) -> {
System.out.println("[HostnameVerifier] called for host: " + hostname);
return true; // <— ignore SAN/hostname
};
public static CloseableHttpClient create(
String keyJksPath, char[] keyStorePassword, char[] keyPassword,
String trustJksPath, char[] trustPassword
) throws Exception {
KeyStore keyStore = KeyStore.getInstance("JKS");
try (FileInputStream fis = new FileInputStream(keyJksPath)) {
keyStore.load(fis, keyStorePassword);
}
KeyStore trustStore = KeyStore.getInstance("JKS");
try (FileInputStream fis = new FileInputStream(trustJksPath)) {
trustStore.load(fis, trustPassword);
}
var sslContext = SSLContexts.custom()
.loadKeyMaterial(keyStore, keyPassword) // mTLS client cert
.loadTrustMaterial(trustStore, null) // normal trust; for testing you can use (chain,auth)->true
.build();
var sslsf = SSLConnectionSocketFactoryBuilder.create()
.setSslContext(sslContext)
.setHostnameVerifier(ALWAYS_TRUE_LOGGING_VERIFIER) // <— disable SAN/hostname check
.build();
PoolingHttpClientConnectionManager cm = PoolingHttpClientConnectionManagerBuilder.create()
.setSSLSocketFactory(sslsf)
.build();
return HttpClients.custom()
.setConnectionManager(cm)
.build();
}
}
import com.fasterxml.jackson.databind.ObjectMapper;
import org.springframework.stereotype.Service;
import java.io.OutputStream;
import java.nio.file.*;
@Service
public class JsonFileStore {
private final ObjectMapper mapper;
public JsonFileStore(ObjectMapper mapper) {
this.mapper = mapper; // Spring Boot auto-configures this
}
public void writeJson(Path targetPath, Object data) throws Exception {
Files.createDirectories(targetPath.getParent());
// Write to temp file first
Path tmp = Files.createTempFile(targetPath.getParent(), ".tmp-", ".json");
try (OutputStream out = Files.newOutputStream(tmp, StandardOpenOption.TRUNCATE_EXISTING)) {
mapper.writerWithDefaultPrettyPrinter().writeValue(out, data);
}
// Move into place (best-effort atomic on same filesystem)
try {
Files.move(tmp, targetPath, StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.ATOMIC_MOVE);
} catch (AtomicMoveNotSupportedException e) {
Files.move(tmp, targetPath, StandardCopyOption.REPLACE_EXISTING);
}
}
public <T> T readJson(Path path, Class<T> type) throws Exception {
return mapper.readValue(path.toFile(), type);
}
}
try (var vtp = java.util.concurrent.Executors.newVirtualThreadPerTaskExecutor()) {
HciData result = vtp.submit(() -> {
try (ClassicHttpResponse res = httpClient.executeOpen(null, post, HttpClientContext.create())) {
int code = res.getCode();
if (code < 200 || code >= 300) {
String errBody = res.getEntity() != null ? EntityUtils.toString(res.getEntity()) : "";
throw new org.apache.hc.client5.http.HttpResponseException(code, errBody);
}
if (res.getEntity() == null) {
throw new IllegalStateException("Empty HTTP entity from HCI");
}
try (var in = res.getEntity().getContent()) {
return objectMapper.readValue(in, HciData.class);
}
}
}).get();
// use result
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment