forked from JointCloud/pcm-coordinator
314 lines
14 KiB
Protocol Buffer
314 lines
14 KiB
Protocol Buffer
syntax = "proto3";
|
|
package slurm;
|
|
|
|
option go_package = "/slurmpb";
|
|
import "idl/static.proto";
|
|
|
|
message UpdateJobReq{
|
|
uint32 JobId = 1;
|
|
Update_job_options data = 2;
|
|
SlurmVersion slurmVersion = 3;
|
|
}
|
|
|
|
message UpdateJobResp{
|
|
int32 Error_code=1;
|
|
}
|
|
|
|
message SubmitJobReq{
|
|
JobDescriptor data = 1;
|
|
SlurmVersion slurmVersion = 2;
|
|
}
|
|
|
|
message SubmitJobResp{
|
|
repeated Submit_response_msg submitResponseMsg = 1;
|
|
}
|
|
|
|
message DeleteJobReq{
|
|
uint32 JobId = 1;
|
|
SlurmVersion slurmVersion = 2;
|
|
}
|
|
|
|
message DeleteJobResp{
|
|
int32 Error_code=1;
|
|
}
|
|
|
|
message Argv{
|
|
string argv =1;
|
|
}
|
|
|
|
message Environment{
|
|
string environment =1;
|
|
}
|
|
|
|
message JobDescriptor{
|
|
string Account =1; /* charge to specified account */
|
|
string Acctg_freq =2; /* accounting polling intervals (seconds) */
|
|
string Alloc_node=3; /* node making resource allocation request
|
|
* NOTE: Normally set by slurm_submit* or
|
|
* slurm_allocate* function */
|
|
uint32 Alloc_resp_port=4; /* port to send allocation confirmation to */
|
|
uint32 Alloc_sid =5; /* local sid making resource allocation request
|
|
* NOTE: Normally set by slurm_submit* or
|
|
* slurm_allocate* function
|
|
* NOTE: Also used for update flags, see
|
|
* ALLOC_SID_* flags */
|
|
uint32 Argc =6; /* number of arguments to the script */
|
|
repeated Argv argv = 7; /* arguments to the script */
|
|
string Array_inx =8; /* job array index values */
|
|
//void *array_bitmap; /* NOTE: Set by slurmctld */
|
|
int64 Begin_time = 9; /* delay initiation until this time */
|
|
uint32 Ckpt_interval=10; /* periodically checkpoint this job */
|
|
string Ckpt_dir =11; /* directory to store checkpoint images */
|
|
string Comment =12; /* arbitrary comment (used by Moab scheduler) */
|
|
uint32 Contiguous=13; /* 1 if job requires contiguous nodes,
|
|
* 0 otherwise,default=0 */
|
|
string Cpu_bind=14; /* binding map for map/mask_cpu */
|
|
uint32 Cpu_bind_type=15; /* see cpu_bind_type_t */
|
|
string Dependency =16; /* synchronize job execution with other jobs */
|
|
int64 End_time=17; /* time by which job must complete, used for
|
|
* job update only now, possible deadline
|
|
* scheduling in the future */
|
|
repeated Environment environment=18; /* environment variables to set for job,
|
|
* name=value pairs, one per line */
|
|
uint32 Env_size =19; /* element count in environment */
|
|
string Exc_nodes =20; /* comma separated list of nodes excluded
|
|
* from job's allocation, default NONE */
|
|
string Features =21; /* comma separated list of required features,
|
|
* default NONE */
|
|
string Gres =22; /* comma separated list of required generic
|
|
* resources, default NONE */
|
|
uint32 Group_id =23; /* group to assume, if run as root. */
|
|
uint32 Immediate=24; /* 1 if allocate to run or fail immediately,
|
|
* 0 if to be queued awaiting resources */
|
|
uint32 Job_id =25; /* job ID, default set by SLURM */
|
|
uint32 Kill_on_node_fail=26; /* 1 if node failure to kill job,
|
|
* 0 otherwise,default=1 */
|
|
string Licenses=27; /* licenses required by the job */
|
|
uint32 Mail_type=28; /* see MAIL_JOB_ definitions above */
|
|
string Mail_user =29; /* user to receive notification */
|
|
string Mem_bind =30; /* binding map for map/mask_cpu */
|
|
uint32 Mem_bind_type=31; /* see mem_bind_type_t */
|
|
string Name =32; /* name of the job, default "" */
|
|
string Network=33; /* network use spec */
|
|
uint32 Nice =34; /* requested priority change,
|
|
* NICE_OFFSET == no change */
|
|
uint32 Num_tasks=35; /* number of tasks to be started,
|
|
* for batch only */
|
|
uint32 Open_mode=36; /* out/err open mode truncate or append,
|
|
* see OPEN_MODE_* */
|
|
uint32 Other_port=37; /* port to send various notification msg to */
|
|
uint32 Overcommit =38; /* over subscribe resources, for batch only */
|
|
string Partition=39; /* name of requested partition,
|
|
* default in SLURM config */
|
|
uint32 Plane_size =40; /* plane size when task_dist =
|
|
SLURM_DIST_PLANE */
|
|
uint32 Priority =41; /* relative priority of the job,
|
|
* explicitly set only for user root,
|
|
* 0 == held (don't initiate) */
|
|
uint32 Profile =42; /* Level of acct_gather_profile {all | none} */
|
|
string Qos =43; /* Quality of Service */
|
|
string Resp_host=44; /* NOTE: Set by slurmctld */
|
|
string Req_nodes=45; /* comma separated list of required nodes
|
|
* default NONE */
|
|
uint32 Requeue=46; /* enable or disable job requeue option */
|
|
string Reservation=47; /* name of reservation to use */
|
|
string Script=48; /* the actual job script, default NONE */
|
|
uint32 Shared =49; /* 1 if job can share nodes with other jobs,
|
|
* 0 if job needs exclusive access to the node,
|
|
* or NO_VAL to accept the system default.
|
|
* SHARED_FORCE to eliminate user control. */
|
|
//char **spank_job_env; environment variables for job prolog/epilog
|
|
// * scripts as set by SPANK plugins
|
|
uint32 Spank_job_env_size=50; /* element count in spank_env */
|
|
uint32 Task_dist =51; /* see enum task_dist_state */
|
|
uint32 Time_limit =52; /* maximum run time in minutes, default is
|
|
* partition limit */
|
|
uint32 Time_min =53; /* minimum run time in minutes, default is
|
|
* time_limit */
|
|
uint32 User_id=54; /* set only if different from current UID,
|
|
* can only be explicitly set by user root */
|
|
uint32 Wait_all_nodes=55; /* 0 to start job immediately after allocation
|
|
* 1 to start job after all nodes booted
|
|
* or NO_VAL to use system default */
|
|
uint32 Warn_signal=56; /* signal to send when approaching end time */
|
|
uint32 Warn_time=57; /* time before end to send signal (seconds) */
|
|
string Work_dir =58; /* pathname of working directory */
|
|
|
|
/* job constraints: */
|
|
uint32 Cpus_per_task=59; /* number of processors required for
|
|
* each task */
|
|
uint32 Min_cpus =60; /* minimum number of processors required,
|
|
* default=0 */
|
|
uint32 Max_cpus=61; /* maximum number of processors required,
|
|
* default=0 */
|
|
uint32 Min_nodes=62; /* minimum number of nodes required by job,
|
|
* default=0 */
|
|
uint32 Max_nodes=63; /* maximum number of nodes usable by job,
|
|
* default=0 */
|
|
uint32 Boards_per_node =64; /* boards per node required by job */
|
|
uint32 Sockets_per_board=65; /* sockets per board required by job */
|
|
uint32 Sockets_per_node =66; /* sockets per node required by job */
|
|
uint32 Cores_per_socket=67; /* cores per socket required by job */
|
|
uint32 Threads_per_core=68; /* threads per core required by job */
|
|
uint32 Ntasks_per_node =69; /* number of tasks to invoke on each node */
|
|
uint32 Ntasks_per_socket=70; /* number of tasks to invoke on
|
|
* each socket */
|
|
uint32 Ntasks_per_core =71; /* number of tasks to invoke on each core */
|
|
uint32 Ntasks_per_board=72; /* number of tasks to invoke on each board */
|
|
uint32 Pn_min_cpus =73; /* minimum # CPUs per node, default=0 */
|
|
uint32 Pn_min_memory=74; /* minimum real memory per node OR
|
|
* real memory per CPU | MEM_PER_CPU,
|
|
* default=0 (no limit) */
|
|
uint32 Pn_min_tmp_disk =75; /* minimum tmp disk per node,
|
|
* default=0 */
|
|
|
|
/*
|
|
* The following parameters are only meaningful on a Blue Gene
|
|
* system at present. Some will be of value on other system. Don't remove these
|
|
* they are needed for LCRM and others that can't talk to the opaque data type
|
|
* select_jobinfo.
|
|
*/
|
|
//uint16_t geometry[HIGHEST_DIMENSIONS]; node count in various
|
|
// * dimensions, e.g. X, Y, and Z
|
|
//uint16_t conn_type[HIGHEST_DIMENSIONS]; see enum connection_type
|
|
uint32 Reboot=76; /* force node reboot before startup */
|
|
uint32 Rotate=77; /* permit geometry rotation if set */
|
|
//char *blrtsimage; /* BlrtsImage for block */
|
|
//char *linuximage; /* LinuxImage for block */
|
|
//char *mloaderimage; /* MloaderImage for block */
|
|
//char *ramdiskimage; /* RamDiskImage for block */
|
|
|
|
/* End of Blue Gene specific values */
|
|
uint32 Req_switch =78; /* Minimum number of switches */
|
|
//dynamic_plugin_data_t *select_jobinfo; /* opaque data type,
|
|
// * SLURM internal use only */
|
|
string Std_err=79; /* pathname of stderr */
|
|
string Std_in =80; /* pathname of stdin */
|
|
string Std_out=81; /* pathname of stdout */
|
|
uint32 Wait4switch=82; /* Maximum time to wait for minimum switches */
|
|
string Wckey =83; /* wckey for job */
|
|
}
|
|
|
|
message Submit_response_msg{
|
|
uint32 Job_id = 1;
|
|
uint32 Step_id =2;
|
|
uint32 Error_code=3;
|
|
}
|
|
|
|
message JobInfoMsgReq{
|
|
uint32 JobId = 1;
|
|
SlurmVersion slurmVersion = 2;
|
|
}
|
|
|
|
message JobInfoMsgResp {
|
|
Job_info_msg jobInfoMsg =1;
|
|
}
|
|
|
|
message Job_info_msg{
|
|
int64 Last_update = 1;
|
|
uint32 Record_count = 2;
|
|
repeated Job_info Job_list = 3;
|
|
}
|
|
|
|
message Job_info{
|
|
string account = 1; /* charge to specified account */
|
|
string alloc_node = 2; /* local node making resource alloc */
|
|
uint32 alloc_sid =3; /* local sid making resource alloc */
|
|
uint32 array_job_id =4; /* job_id of a job array or 0 if N/A */
|
|
uint32 array_task_id =5; /* task_id of a job array */
|
|
uint32 assoc_id =6; /* association id for job */
|
|
uint32 batch_flag =7; /* 1 if batch: queued job with script */
|
|
string batch_host =8; /* name of host running batch script */
|
|
string batch_script=9; /* contents of batch script */
|
|
string command =10; /* command to be executed, built from submitted
|
|
* job's argv and NULL for salloc command */
|
|
string comment =11; /* arbitrary comment (used by Moab scheduler) */
|
|
uint32 contiguous =12; /* 1 if job requires contiguous nodes */
|
|
uint32 cpus_per_task=13; /* number of processors required for
|
|
* each task */
|
|
string dependency =14; /* synchronize job execution with other jobs */
|
|
uint32 derived_ec =15; /* highest exit code of all job steps */
|
|
int64 eligible_time =16; /* time job is eligible for running */
|
|
int64 end_time =17; /* time of termination, actual or expected */
|
|
string exc_nodes =18; /* comma separated list of excluded nodes */
|
|
int32 exc_node_inx =19; /* excluded list index pairs into node_table:
|
|
* start_range_1, end_range_1,
|
|
* start_range_2, .., -1 */
|
|
uint32 exit_code =20; /* exit code for job (status from wait call) */
|
|
string features =21; /* comma separated list of required features */
|
|
string gres =22; /* comma separated list of generic resources */
|
|
uint32 group_id =23; /* group job sumitted as */
|
|
uint32 job_id =24; /* job ID */
|
|
uint32 job_state =25; /* state of the job, see enum job_states */
|
|
string licenses =26; /* licenses required by the job */
|
|
uint32 max_cpus =27; /* maximum number of cpus usable by job */
|
|
uint32 max_nodes =28; /* maximum number of nodes usable by job */
|
|
uint32 boards_per_node =29; /* boards per node required by job */
|
|
uint32 sockets_per_board=30; /* sockets per board required by job */
|
|
uint32 sockets_per_node=31; /* sockets per node required by job */
|
|
uint32 cores_per_socket=32; /* cores per socket required by job */
|
|
uint32 threads_per_core=33; /* threads per core required by job */
|
|
string name =34; /* name of the job */
|
|
string network =35; /* network specification */
|
|
string nodes =36; /* list of nodes allocated to job */
|
|
uint32 nice =37; /* requested priority change */
|
|
int32 node_inx =38; /* list index pairs into node_table for *nodes:
|
|
* start_range_1, end_range_1,
|
|
* start_range_2, .., -1 */
|
|
uint32 ntasks_per_core =39; /* number of tasks to invoke on each core */
|
|
uint32 ntasks_per_node =40; /* number of tasks to invoke on each node */
|
|
uint32 ntasks_per_socket =41; /* number of tasks to invoke on each socket*/
|
|
uint32 ntasks_per_board =42; /* number of tasks to invoke on each board */
|
|
|
|
uint32 num_nodes =43; /* minimum number of nodes required by job */
|
|
uint32 num_cpus =44; /* minimum number of cpus required by job */
|
|
string partition =45; /* name of assigned partition */
|
|
uint32 pn_min_memory =46; /* minimum real memory per node, default=0 */
|
|
uint32 pn_min_cpus =47; /* minimum # CPUs per node, default=0 */
|
|
uint32 pn_min_tmp_disk =48; /* minimum tmp disk per node, default=0 */
|
|
int64 pre_sus_time =49; /* time job ran prior to last suspend */
|
|
uint32 priority =50; /* relative priority of the job,
|
|
* 0=held, 1=required nodes DOWN/DRAINED */
|
|
uint32 profile =51; /* Level of acct_gather_profile {all | none} */
|
|
string qos =52; /* Quality of Service */
|
|
string req_nodes =53; /* comma separated list of required nodes */
|
|
int32 req_node_inx =54; /* required list index pairs into node_table:
|
|
* start_range_1, end_range_1,
|
|
* start_range_2, .., -1 */
|
|
uint32 req_switch =55; /* Minimum number of switches */
|
|
uint32 requeue =56; /* enable or disable job requeue option */
|
|
int64 resize_time =57; /* time of latest size change */
|
|
uint32 restart_cnt =58;/* count of job restarts */
|
|
string resv_name =59; /* reservation name */
|
|
/*dynamic_plugin_data_t *select_jobinfo;*/ /* opaque data type,
|
|
* process using
|
|
* slurm_get_select_jobinfo()
|
|
*/
|
|
/*job_resources_t *job_resrcs;*/ /* opaque data type, job resources */
|
|
uint32 shared =60; /* 1 if job can share nodes with other jobs */
|
|
uint32 show_flags =61; /* conveys level of details requested */
|
|
int64 start_time =62; /* time execution begins, actual or expected */
|
|
string state_desc =63; /* optional details for state_reason */
|
|
uint32 state_reason =64; /* reason job still pending or failed, see
|
|
* slurm.h:enum job_state_reason */
|
|
int64 submit_time =65; /* time of job submission */
|
|
int64 suspend_time =66; /* time job last suspended or resumed */
|
|
uint32 time_limit =67; /* maximum run time in minutes or INFINITE */
|
|
uint32 time_min =68; /* minimum run time in minutes or INFINITE */
|
|
uint32 user_id =69; /* user the job runs as */
|
|
int64 preempt_time =70; /* preemption signal time */
|
|
uint32 wait4switch =71;/* Maximum time to wait for minimum switches */
|
|
string wckey =72; /* wckey for job */
|
|
string work_dir =73; /* pathname of working directory */
|
|
}
|
|
|
|
message Update_job_options {
|
|
string Partition =1;
|
|
string Qos =2;
|
|
uint32 Num_tasks =3;
|
|
uint32 Ntasks_per_node =4;
|
|
uint32 Ntasks_per_socket =5;
|
|
uint32 Ntasks_per_core =6;
|
|
uint32 Min_nodes =7;
|
|
uint32 Max_nodes =8;
|
|
} |