twitter_large / haplolite /allservice.sh
1a1a11a's picture
Add files using upload-large-folder tool
2261eaa verified
#!/bin/bash
# given a host port file, log all instances in the file
# set -e
zone=smf1
port_name=redis_0
nInstance=20
host_port_file=host_port2
rcli=$HOME/redis32/src/redis-cli
# rm -r info keys log 2>/dev/null
mkdir info keys log per_db 2>/dev/null
function prepare() {
cd $HOME;
packer fetch --cluster=smf1 aurora aurora live;
}
function get_host_port_for_all_services() {
service_account=nighthawk_revenue_backend
jobListStr=$($HOME/aurora job list ${zone}/${service_account}/prod|grep prod|grep _a)
declare -a jobList=($jobListStr)
for job in ${jobList[@]}; do
jobInfo=$($HOME/aurora job status ${job} --write-json)
for idx in `seq 0 $((nInstance-1))`; do
host=$(echo $jobInfo|jq ".[0].active[${idx}].assignedTask.slaveHost" | sed 's/"//g')
port=$(echo $jobInfo|jq ".[0].active[${idx}].assignedTask.assignedPorts.redis_0")
echo "$host $port"
echo $host $port $job >> ${host_port_file}
done
done
}
function get_host_port_for_one_service() {
service_account=$1
service=$2
service_shortname=$3
jobListStr=$($HOME/aurora job list ${zone}/${service_account}/prod|grep prod|grep ${service})
declare -a jobList=($jobListStr)
for job in ${jobList[@]}; do
jobInfo=$($HOME/aurora job status ${job} --write-json)
for idx in `seq 0 $((nInstance-1))`; do
host=$(echo $jobInfo|jq ".[0].active[${idx}].assignedTask.slaveHost" | sed 's/"//g')
port=$(echo $jobInfo|jq ".[0].active[${idx}].assignedTask.assignedPorts.redis_0")
echo "$host $port"
echo $host $port $job ${service_shortname} >> ${host_port_file}
done
done
}
function collect() {
service_name=$1
host=$2
port=$3
cnt=20000000
rm info/${service_name}.${host}.${port}.* 2>/dev/null
echo -e "info" | ${rcli} -h $host -p $port --no-raw > info/${service_name}.${host}.${port}.info
for db in `seq 1 320`; do
echo ${host} db${db}
grep db${db} info/${service_name}.${host}.${port}.info > /dev/null
if [[ $? == 1 ]]; then
break
fi
echo -e "select ${db}\nscan 0 count ${cnt}" | ${rcli} -h $host -p $port --no-raw > ${service_name}.${host}.${port}.${db}.keys.tmp
tail -n +3 ${service_name}.${host}.${port}.${db}.keys.tmp | head -n -1 > ${service_name}.${host}.${port}.${db}.keys
python3 getInfo.py --func gen_op --path ${service_name}.${host}.${port}.${db}.keys --db ${db} --op llen
python3 getInfo.py --func gen_op --path ${service_name}.${host}.${port}.${db}.keys --db ${db} --op ttl
cat ${service_name}.${host}.${port}.${db}.llen.input | nc $host $port > ${service_name}.${host}.${port}.${db}.keylen.tmp
tail -n +2 ${service_name}.${host}.${port}.${db}.keylen.tmp > ${service_name}.${host}.${port}.${db}.keylen
sleep 1
cat ${service_name}.${host}.${port}.${db}.ttl.input | nc $host $port > ${service_name}.${host}.${port}.${db}.ttl.tmp
tail -n +2 ${service_name}.${host}.${port}.${db}.ttl.tmp > ${service_name}.${host}.${port}.${db}.ttl
rm *.tmp 2>/dev/null
cat ${service_name}.${host}.${port}.${db}.keys >> info/${service_name}.${host}.${port}.keys
cat ${service_name}.${host}.${port}.${db}.keylen >> info/${service_name}.${host}.${port}.keylen
cat ${service_name}.${host}.${port}.${db}.ttl >> info/${service_name}.${host}.${port}.ttl
mv ${service_name}.${host}.${port}.${db}.* per_db/
# rm ${service_name}.${host}.${port}.${db}*
sleep 2
done
# if ! ( screen -ls | grep monitor.${host}.${port} > /dev/null); then
# screen -dmS monitor.${host}.${port};
# fi
# screen -S monitor.${host}.${port} -X stuff $"${rcli} -h $host -p $port monitor >> log/${service_name}.${host}.${port}.log\n"
}
function collect_all() {
while read -r line
do
host=$(echo $line|cut -d " " -f1);
port=$(echo $line|cut -d " " -f2);
job_path=$(echo $line|cut -d " " -f3);
service_name=$(echo $job_path|cut -d / -f 4|sed "s/nighthawk_//g"|sed "s/revenue_//g")
service_name2=$(echo $line|cut -d " " -f4)
echo '*****************************************'
collect ${service_name2} ${host} ${port}
done < ${host_port_file}
}
# get_host_port_for_one_service nighthawk_revenue_backend nighthawk_haplolite_prod_backend_a nonfanout
# get_host_port_for_one_service nighthawk_revenue_backend nighthawk_haplolite_fanout_timelines_backend_a fanout
collect_all
# while true; do logrotate -f rotate.conf -s state & sleep 87600; done
# while true; do
# for f in logr/*.conf; do
# logrotate -f $f -s ${f}.state &
# done
# sleep 87600;
# done