#!/bin/bash #PBS -S /bin/bash #PBS -l walltime=08:00:00 #PBS -l nodes=4 #PBS -N M5.000Z0.0100 #PBS -o mppnp.out #PBS -e mppnp.err #PBS -l mem=100gb #PBS -q qwork cd $PBS_O_WORKDIR # the number of MPI processes per node *** there are 24 cores per node export ppn=24 export OMP_NUM_THREADS=1 #$[24/ppn] export MKL_NUM_THREADS=1 JOBINFO=mppnp.${PBS_JOBID} echo "Starting run at: `date`" > $JOBINFO echo "Current working directory is `pwd`" >> $JOBINFO echo $PBS_NODEFILE >>$JOBINFO tempname=${PBS_JOBID} # Sample script for running an MPI-based parallel program, parallel_diffuse. echo "Current working directory is `pwd`" echo "Node file: $PBS_NODEFILE :" echo "---------------------" cat $PBS_NODEFILE echo "---------------------" #MPIEX=/global/software/openmpi-1.4.4/intel-2011/bin/mpiexec MPIEX=mpiexec FULL_EXE=/home/critter/critter/PPN/MPPNP_set1extension/mppnp/CODE_netwsend/mppnp.exe # create output directories if needed [ -d H5_surf ] || mkdir H5_surf [ -d H5_out ] || mkdir H5_out [ -d H5_restart ] || mkdir H5_restart NUM_PROCS=`/bin/awk 'END {print NR}' $PBS_NODEFILE` echo "Running on $NUM_PROCS processors." echo "Starting run at: `date`" >> $JOBINFO #$MPIEX -n ${NUM_PROCS} $FULL_EXE #mppnp_`echo ${PBS_JOBID} | awk -F. '{print $1}'`.out #$MPIEX --display-map --verbose -n $[PBS_NUM_NODES*ppn] -npernode $ppn $FULL_EXE >& ${PBS_JOBID} $MPIEX --display-map --verbose -n 96 -npernode 24 $FULL_EXE >& ${PBS_JOBID} echo "Program mppnp finished with exit code $? at: `date`" >> $JOBINFO