IOR

IOR代表“Interleaved or Random”。IOR(并行I/O的协调测试)是由劳伦斯利弗莫尔国家实验室的SIOP(可扩展I/O项目)开发的一个并行文件系统测试代码。IOR用于测试并行POSIX和MPI-IO级别的I/O性能。该并行程序在几组条件下对文件执行写入和读取,并报告生成的吞吐率。

一、作业提交参数说明

用户可通过公共模板提交IOR作业,与IOR相关的作业参数如下:

参数 描述
darta size 运测试文件的大小
muticlient io测试的模式:
ture为多文件多client,false为多文件单client

二、IOR作业运行参考

1.单文件多client测试

#!/bin/bash

IOREXE="/opt/ohpc/pub/apps/ior/ior" #可执行文件ior路径

# Node count -- not very accurate
NCT=`grep -v ^# hfile |wc -l` #hfile为mpirun的hostfile文件

# Date Stamp for benchmark
DS=`date +"%F_%H:%M:%S"`
# IOR will be run in a loop, doubling the number of processes per client node
# with every iteration from $SEQ -> $MAXPROCS. If SEQ=1 and MAXPROCS=8, then the
# iterations will be 1, 2, 4, 8 processes per node.
# SEQ and MAXPROCS should be a power of 2 (including 2^0).
SEQ=1       #开始迭代的进程数量
MAXPROCS=32 #每个节点上测试ior的进程数量,需要为为2的幂

# Overall data set size in GiB. Must be >=MAXPROCS. Should be a power of 2.
DATA_SIZE=32 #io文件的大小,需要大于等于MAXPROCS,需要为2的幂

BASE_DIR=/opt/ohpc/pub/wsm/iorbench-client/  #测试io的路径
mkdir -p ${BASE_DIR}

while [ ${SEQ} -le ${MAXPROCS} ]; do
NPROC=`expr ${NCT} \* ${SEQ}`
# Pick a reasonable block size, bearing in mind the size of the target file system.
# Bear in mind that the overall data size will be block size * number of processes.
# Block size must be a multiple of transfer size (-t option in command line).
BSZ=`expr ${DATA_SIZE} / ${SEQ}`"g"
# Alternatively, set to a static value and let the data size increase.
# BSZ="1g"
# BSZ="${DATA_SIZE}"
/opt/ohpc/pub/apps/openmpi-4.1.1/bin/mpirun --allow-run-as-root -np ${NPROC} --map-by node -hostfile ./hfile \
  ${IOREXE} -v -w -r -i 4 \
  -o ${BASE_DIR}/ior-test.file \
  -t 1m -b ${BSZ} \
  -O lustreStripeCount=-1 | tee ${BASE_DIR}/IOR-RW-Single_File-c_${NCT}-s_${SEQ}_${DS}
SEQ=`expr ${SEQ} \* 2`
done

2.多客户端多文件测试

#!/bin/bash
IOREXE="/opt/ohpc/pub/apps/ior/ior"

NCT=`grep -v ^# hfile |wc -l`
DS=`date +"%F_%H:%M:%S"`
SEQ=1
MAXPROCS=32
DATA_SIZE=32
BASE_DIR=/opt/ohpc/pub/wsm/iorbench-file/
mkdir -p ${BASE_DIR}
mkdir -p ${BASE_DIR}/test

while [ ${SEQ} -le ${MAXPROCS} ]; do
NPROC=`expr ${NCT} \* ${SEQ}`
BSZ=`expr ${DATA_SIZE} / ${SEQ}`"g"
# BSZ="1g"
# BSZ="${DATA_SIZE}"
/opt/ohpc/pub/apps/openmpi-4.1.1/bin/mpirun --allow-run-as-root -np ${NPROC} --map-by node -hostfile ./hfile \
  ${IOREXE} -v -w -r -i 4 -F \
  -o ${BASE_DIR}/test/ior-test.file \
  -t 1m -b ${BSZ} \
   | tee ${BASE_DIR}/IOR-RW-Multiple_Files-Common_Dir-c_${NCT}-s_${SEQ}_${DS}
SEQ=`expr ${SEQ} \* 2`
done

3.ior input文件

job_name=ior
run_time=24:00:00
partition=dell_intel
node_num=3
task_per_node=32
data_size=32
mutifile=True
#work_dir=/home/wushiming/hpl

4.执行脚本

#!/bin/sh
source /home/wushiming/ior/ior_input

##check input var
time=`date +%m%d_%H%M%S`

if [ "x$job_name" == "x" ];then
    sbatch_job_name="YHPC_$time "
else
    sbatch_job_name=$job_name
fi

if [ "x$partition" == "x" ];then
    sbatch_partition=""
else
    sbatch_partition=$partition
fi

if [ "x$work_dir" == "x" ];then
    mkdir -p /home/yhpc/YHPC_$time
    sbatch_work_dir=/home/yhpc/YHPC_$time
else
    sbatch_work_dir=$work_dir
fi

if [ "x$run_time" == "x" ];then
    sbatch_run_time=03:00:00
else
    sbatch_run_time=$run_time
fi

if [ "x$mutifile" == "True" ];then
    ior_mutifile="-F"
else
    sbatch_mutifile=""
fi

sbatch_node_num=$node_num
sbatch_task_per_node=$task_per_node

sbatch_err_log=$sbatch_work_dir/%j.err
sbatch_out_log=$sbatch_work_dir/%j.out

BSZ=`expr ${data_size} / ${sbatch_task_per_node}`"g"

### Write basic job infomations
#echo -e "The start time is: `date +"%Y-%m-%d %H:%M:%S"` \n"
#echo -e "My job ID is: $SLURM_JOB_ID \n"
#echo -e "The total cores is: $total_cores \n"
#echo -e "The hosts is: \n"
#srun -np $node_num -nnp 1 hostname
cat > $sbatch_work_dir/ior.slurm <<EOF
#!/bin/bash
#SBATCH --ntasks-per-node=$sbatch_task_per_node
#SBATCH --job-name $sbatch_job_name
#SBATCH --nodes=$sbatch_node_num
#SBATCH --mail-type=ALL
#SBATCH --partition $sbatch_partition
#SBATCH --chdir=$sbatch_work_dir
#SBATCH -e $sbatch_err_log
#SBATCH -o $sbatch_out_log

ulimit -s unlimited
ulimit -l unlimited

# 导入运行环境
module purge
source /opt/ohpc/pub/apps/intel/setvars.sh
module load intel/mpi-2021.1.1
module load ior/3.3

export I_MPI_OFI_PROVIDER=Verbs
export FI_VERBS_IFACE=team1.282

echo -e "The start time is: \`date +"%Y-%m-%d %H:%M:%S"\`"
echo -e "My job ID is: \$SLURM_JOB_ID"
echo -e "The total cores is: \$SLURM_NPROCS"
echo -e "The \$SLURM_JOB_ID Job info:"
scontrol show job \$SLURM_JOB_ID

mpirun -genv I_MPI_FABRICS ofi ior -v -w -r -i 4 $ior_mutifile -o $sbatch_work_dir/ior-test.file -t 1m -b ${BSZ}

echo -e "The end time is: \`date +"%Y-%m-%d %H:%M:%S"\` \n"
EOF

/usr/bin/sbatch $sbatch_work_dir/ior.slurm

results matching ""

    No results matching ""