NAMD

NAMD用于在大规模并行计算机上快速模拟大分子体系的并行分子动力学代码。

NAMD用经验力场,如Amber,CHARMM和Dreiding,通过数值求解运动方程计算原子轨迹。用于预测生物分子的动力学行为和重要性质,如弥散因子,内聚能等。

关于NAMD的更多信息请访问NAMD官网

一、作业提交参数说明

用户可通过公共模板提交NAMD作业,与NAMD相关的作业参数如下:

参数 描述
anamd conf 运行实例的配置文件
depend file 实例运行的相关依赖参数文件

二、NMAD作业运行参考

1.执行命令

charmrun --allow-run-as-root +p12 -hostfile /path/to/OPENMPI/hosts namd2 ubq_ws_eq.conf

2.运行文件

ubq_ws_eq.conf:
#############################################################
## JOB DESCRIPTION ##
#############################################################
# Minimization and Equilibration of
# Ubiquitin in a Water Sphere
#############################################################
## ADJUSTABLE PARAMETERS ##
#############################################################   
structure ubq_ws.psf
coordinates ubq_ws.pdb
set temperature 310
set outputname ubq_ws_eq
firsttimestep 0
#############################################################
## SIMULATION PARAMETERS ##
############################################################# 
# Input
paraTypeCharmm on
parameters par_all27_prot_lipid.inp
temperature $temperature
# Force-Field Parameters
exclude scaled1-4
1-4scaling 1.0
cutoff 12.0
switching on
switchdist 10.0
pairlistdist 14.0
# Integrator Parameters
timestep 2.0 ;# 2fs/step
rigidBonds all ;# needed for 2fs steps
nonbondedFreq 1
fullElectFrequency 2
stepspercycle 10
# Constant Temperature Control
langevin on ;# do langevin dynamics
langevinDamping 1 ;# damping coefficient (gamma) of 1/ps
langevinTemp $temperature
langevinHydrogen off ;# don't couple langevin bath to hydrogens
# Electrostatic Force Evaluation
MSM on
MSMGridSpacing 2.5 ;# very sensitive to performance, use this default
MSMxmin -5.0
MSMxmax 60.0
MSMymin -5.0
MSMymax 60.0
MSMzmin -15.0
MSMzmax 46
# Output
outputName /home/yhpc/YHPC_1012_141338/$outputname
restartfreq 500 ;# 500steps = every 1ps
dcdfreq 250
outputEnergies 100
#############################################################
## EXTRA PARAMETERS ##
#############################################################     
# Spherical boundary conditions
sphericalBC on
sphericalBCcenter 30.3081743413, 28.8049907121, 15.353994423
sphericalBCr1 26.0
sphericalBCk1 10
sphericalBCexp1 2
#############################################################
## EXECUTION SCRIPT ##
#############################################################  
# Minimization
minimize 100
reinitvels $temperature
run 250000 ;# 5ps

3.依赖文件

ubq_ws.pdb

par_all27_prot_lipid.inp

ubq_ws.psf

4.input 文件

#job_name=lmp
#run_time=24:00:00
partition=dell_intel
node_num=3
task_per_node=32
namd_conf=/home/wushiming/namd/ubq_ws_eq.conf
depend_file=(/home/wushiming/namd/ubq_ws.psf /home/wushiming/namd/ubq_ws.pdb /home/wushiming/namd/par_all27_prot_lipid.inp)
#work_dir=/home/wushiming/namd

5.执行脚本

#!/bin/sh
source /home/wushiming/namd/namd_input
##check input var
time=`date +%m%d_%H%M%S
if [ "x$job_name" == "x" ];then
sbatch_job_name="YHPC_$time "
else
sbatch_job_name=$job_name
fi
if [ "x$partition" == "x" ];then
sbatch_partition=""
else
sbatch_partition=$partition
fi
if [ "x$work_dir" == "x" ];then
mkdir -p /home/yhpc/YHPC_$time
sbatch_work_dir=/home/yhpc/YHPC_$time
else
sbatch_work_dir=$work_dir
fi
if [ "x$run_time" == "x" ];then
sbatch_run_time=03:00:00
else
sbatch_run_time=$run_time
fi
if [ "x$namd_conf" == "x" ];then
echo "The analysis_conf cannot be empty."
exit 1
else
cp $namd_conf $sbatch_work_dir
sbatch_namd_file=$namd_conf
fi
if [ "x$depend_file" == "x" ];then
continue
else
for i in ${depend_file[*]};do cp $i $sbatch_work_dir;done
fi
sbatch_node_num=$node_num
sbatch_task_per_node=$task_per_node
sbatch_err_log=$sbatch_work_dir/%j.err
sbatch_out_log=$sbatch_work_dir/%j.out
sed -i '/^outputName/c outputName '$sbatch_work_dir'\/\$outputname' $namd_conf
### Write basic job infomations
#echo -e "The start time is: `date +"%Y-%m-%d %H:%M:%S"` \n"
#echo -e "My job ID is: $SLURM_JOB_ID \n"
#echo -e "The total cores is: $total_cores \n"
#echo -e "The hosts is: \n"
#srun -np $node_num -nnp 1 hostname
cat > $sbatch_work_dir/namd.slurm <<EOF
#!/bin/bash
#SBATCH --ntasks-per-node=$sbatch_task_per_node
#SBATCH --job-name $sbatch_job_name
#SBATCH --nodes=$sbatch_node_num
#SBATCH --mail-type=ALL
#SBATCH --partition $sbatch_partition
#SBATCH --chdir=$sbatch_work_dir
#SBATCH -e $sbatch_err_log
#SBATCH -o $sbatch_out_log
# 导入运行环境
ulimit -s unlimited
ulimit -l unlimited
module purge
source /opt/ohpc/pub/apps/intel/setvars.sh
module load intel/mpi-2021.1.1
module load gnu8/8.3.0
module load namd/2.1.4
echo -e "The start time is: `date +"%Y-%m-%d %H:%M:%S"`"
echo -e "My job ID is: SLURM_JOB_ID"
echo -e "The total cores is: SLURM_NPROCS"
echo -e "The SLURM_JOB_ID Job info:"
scontrol show job SLURM_JOB_ID
export I_MPI_OFI_PROVIDER=Verbs
export FI_VERBS_IFACE=team1.282
mpirun -genv I_MPI_FABRICS ofi namd2 $namd_conf
echo -e "The end time is: `date +"%Y-%m-%d %H:%M:%S"` \n"
EOF
sed -i 's/SLURM*/\$SLURM/g' $sbatch_work_dir/namd.slurm
/usr/bin/sbatch $sbatch_work_dir/namd.slurm

results matching ""

    No results matching ""