2 * linux/net/sunrpc/svc.c
4 * High-level RPC service routines
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
8 * Multiple threads pools and NUMAisation
9 * Copyright (c) 2006 Silicon Graphics, Inc.
10 * by Greg Banks <gnb@melbourne.sgi.com>
13 #include <linux/linkage.h>
14 #include <linux/sched.h>
15 #include <linux/errno.h>
16 #include <linux/net.h>
19 #include <linux/interrupt.h>
20 #include <linux/module.h>
22 #include <linux/sunrpc/types.h>
23 #include <linux/sunrpc/xdr.h>
24 #include <linux/sunrpc/stats.h>
25 #include <linux/sunrpc/svcsock.h>
26 #include <linux/sunrpc/clnt.h>
28 #define RPCDBG_FACILITY RPCDBG_SVCDSP
31 * Mode for mapping cpus to pools.
34 SVC_POOL_NONE = -1, /* uninitialised, choose one of the others */
35 SVC_POOL_GLOBAL, /* no mapping, just a single global pool
36 * (legacy & UP mode) */
37 SVC_POOL_PERCPU, /* one pool per cpu */
38 SVC_POOL_PERNODE /* one pool per numa node */
42 * Structure for mapping cpus to pools and vice versa.
43 * Setup once during sunrpc initialisation.
45 static struct svc_pool_map {
46 int mode; /* Note: int not enum to avoid
47 * warnings about "enumeration value
48 * not handled in switch" */
50 unsigned int *pool_to; /* maps pool id to cpu or node */
51 unsigned int *to_pool; /* maps cpu or node to pool id */
58 * Detect best pool mapping mode heuristically,
59 * according to the machine's topology.
62 svc_pool_map_choose_mode(void)
66 if (num_online_nodes() > 1) {
68 * Actually have multiple NUMA nodes,
69 * so split pools on NUMA node boundaries
71 return SVC_POOL_PERNODE;
74 node = any_online_node(node_online_map);
75 if (nr_cpus_node(node) > 2) {
77 * Non-trivial SMP, or CONFIG_NUMA on
78 * non-NUMA hardware, e.g. with a generic
79 * x86_64 kernel on Xeons. In this case we
80 * want to divide the pools on cpu boundaries.
82 return SVC_POOL_PERCPU;
85 /* default: one global pool */
86 return SVC_POOL_GLOBAL;
90 * Allocate the to_pool[] and pool_to[] arrays.
91 * Returns 0 on success or an errno.
94 svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools)
96 m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
99 m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
112 * Initialise the pool map for SVC_POOL_PERCPU mode.
113 * Returns number of pools or <0 on error.
116 svc_pool_map_init_percpu(struct svc_pool_map *m)
118 unsigned int maxpools = highest_possible_processor_id()+1;
119 unsigned int pidx = 0;
123 err = svc_pool_map_alloc_arrays(m, maxpools);
127 for_each_online_cpu(cpu) {
128 BUG_ON(pidx > maxpools);
129 m->to_pool[cpu] = pidx;
130 m->pool_to[pidx] = cpu;
133 /* cpus brought online later all get mapped to pool0, sorry */
140 * Initialise the pool map for SVC_POOL_PERNODE mode.
141 * Returns number of pools or <0 on error.
144 svc_pool_map_init_pernode(struct svc_pool_map *m)
146 unsigned int maxpools = highest_possible_node_id()+1;
147 unsigned int pidx = 0;
151 err = svc_pool_map_alloc_arrays(m, maxpools);
155 for_each_node_with_cpus(node) {
156 /* some architectures (e.g. SN2) have cpuless nodes */
157 BUG_ON(pidx > maxpools);
158 m->to_pool[node] = pidx;
159 m->pool_to[pidx] = node;
162 /* nodes brought online later all get mapped to pool0, sorry */
169 * Build the global map of cpus to pools and vice versa.
172 svc_pool_map_init(void)
174 struct svc_pool_map *m = &svc_pool_map;
177 if (m->mode != SVC_POOL_NONE)
180 m->mode = svc_pool_map_choose_mode();
183 case SVC_POOL_PERCPU:
184 npools = svc_pool_map_init_percpu(m);
186 case SVC_POOL_PERNODE:
187 npools = svc_pool_map_init_pernode(m);
192 /* default, or memory allocation failure */
194 m->mode = SVC_POOL_GLOBAL;
202 * Set the current thread's cpus_allowed mask so that it
203 * will only run on cpus in the given pool.
205 * Returns 1 and fills in oldmask iff a cpumask was applied.
208 svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask)
210 struct svc_pool_map *m = &svc_pool_map;
211 unsigned int node; /* or cpu */
214 * The caller checks for sv_nrpools > 1, which
215 * implies that we've been initialized and the
216 * map mode is not NONE.
218 BUG_ON(m->mode == SVC_POOL_NONE);
224 case SVC_POOL_PERCPU:
225 node = m->pool_to[pidx];
226 *oldmask = current->cpus_allowed;
227 set_cpus_allowed(current, cpumask_of_cpu(node));
229 case SVC_POOL_PERNODE:
230 node = m->pool_to[pidx];
231 *oldmask = current->cpus_allowed;
232 set_cpus_allowed(current, node_to_cpumask(node));
238 * Use the mapping mode to choose a pool for a given CPU.
239 * Used when enqueueing an incoming RPC. Always returns
240 * a non-NULL pool pointer.
243 svc_pool_for_cpu(struct svc_serv *serv, int cpu)
245 struct svc_pool_map *m = &svc_pool_map;
246 unsigned int pidx = 0;
249 * SVC_POOL_NONE happens in a pure client when
250 * lockd is brought up, so silently treat it the
251 * same as SVC_POOL_GLOBAL.
255 case SVC_POOL_PERCPU:
256 pidx = m->to_pool[cpu];
258 case SVC_POOL_PERNODE:
259 pidx = m->to_pool[cpu_to_node(cpu)];
262 return &serv->sv_pools[pidx % serv->sv_nrpools];
267 * Create an RPC service
269 static struct svc_serv *
270 __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
271 void (*shutdown)(struct svc_serv *serv))
273 struct svc_serv *serv;
275 unsigned int xdrsize;
278 if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL)))
280 serv->sv_name = prog->pg_name;
281 serv->sv_program = prog;
282 serv->sv_nrthreads = 1;
283 serv->sv_stats = prog->pg_stats;
284 if (bufsize > RPCSVC_MAXPAYLOAD)
285 bufsize = RPCSVC_MAXPAYLOAD;
286 serv->sv_max_payload = bufsize? bufsize : 4096;
287 serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);
288 serv->sv_shutdown = shutdown;
291 prog->pg_lovers = prog->pg_nvers-1;
292 for (vers=0; vers<prog->pg_nvers ; vers++)
293 if (prog->pg_vers[vers]) {
294 prog->pg_hivers = vers;
295 if (prog->pg_lovers > vers)
296 prog->pg_lovers = vers;
297 if (prog->pg_vers[vers]->vs_xdrsize > xdrsize)
298 xdrsize = prog->pg_vers[vers]->vs_xdrsize;
300 prog = prog->pg_next;
302 serv->sv_xdrsize = xdrsize;
303 INIT_LIST_HEAD(&serv->sv_tempsocks);
304 INIT_LIST_HEAD(&serv->sv_permsocks);
305 init_timer(&serv->sv_temptimer);
306 spin_lock_init(&serv->sv_lock);
308 serv->sv_nrpools = npools;
310 kcalloc(serv->sv_nrpools, sizeof(struct svc_pool),
312 if (!serv->sv_pools) {
317 for (i = 0; i < serv->sv_nrpools; i++) {
318 struct svc_pool *pool = &serv->sv_pools[i];
320 dprintk("svc: initialising pool %u for %s\n",
324 INIT_LIST_HEAD(&pool->sp_threads);
325 INIT_LIST_HEAD(&pool->sp_sockets);
326 INIT_LIST_HEAD(&pool->sp_all_threads);
327 spin_lock_init(&pool->sp_lock);
331 /* Remove any stale portmap registrations */
332 svc_register(serv, 0, 0);
338 svc_create(struct svc_program *prog, unsigned int bufsize,
339 void (*shutdown)(struct svc_serv *serv))
341 return __svc_create(prog, bufsize, /*npools*/1, shutdown);
345 svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
346 void (*shutdown)(struct svc_serv *serv),
347 svc_thread_fn func, int sig, struct module *mod)
349 struct svc_serv *serv;
350 unsigned int npools = svc_pool_map_init();
352 serv = __svc_create(prog, bufsize, npools, shutdown);
355 serv->sv_function = func;
356 serv->sv_kill_signal = sig;
357 serv->sv_module = mod;
364 * Destroy an RPC service. Should be called with the BKL held
367 svc_destroy(struct svc_serv *serv)
369 struct svc_sock *svsk;
371 dprintk("svc: svc_destroy(%s, %d)\n",
372 serv->sv_program->pg_name,
375 if (serv->sv_nrthreads) {
376 if (--(serv->sv_nrthreads) != 0) {
377 svc_sock_update_bufs(serv);
381 printk("svc_destroy: no threads for serv=%p!\n", serv);
383 del_timer_sync(&serv->sv_temptimer);
385 while (!list_empty(&serv->sv_tempsocks)) {
386 svsk = list_entry(serv->sv_tempsocks.next,
389 svc_close_socket(svsk);
391 if (serv->sv_shutdown)
392 serv->sv_shutdown(serv);
394 while (!list_empty(&serv->sv_permsocks)) {
395 svsk = list_entry(serv->sv_permsocks.next,
398 svc_close_socket(svsk);
401 cache_clean_deferred(serv);
403 /* Unregister service with the portmapper */
404 svc_register(serv, 0, 0);
405 kfree(serv->sv_pools);
410 * Allocate an RPC server's buffer space.
411 * We allocate pages and place them in rq_argpages.
414 svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
419 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
420 * We assume one is at most one page
423 BUG_ON(pages > RPCSVC_MAXPAGES);
425 struct page *p = alloc_page(GFP_KERNEL);
428 rqstp->rq_pages[arghi++] = p;
435 * Release an RPC server buffer
438 svc_release_buffer(struct svc_rqst *rqstp)
441 for (i=0; i<ARRAY_SIZE(rqstp->rq_pages); i++)
442 if (rqstp->rq_pages[i])
443 put_page(rqstp->rq_pages[i]);
447 * Create a thread in the given pool. Caller must hold BKL.
448 * On a NUMA or SMP machine, with a multi-pool serv, the thread
449 * will be restricted to run on the cpus belonging to the pool.
452 __svc_create_thread(svc_thread_fn func, struct svc_serv *serv,
453 struct svc_pool *pool)
455 struct svc_rqst *rqstp;
457 int have_oldmask = 0;
460 rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL);
464 init_waitqueue_head(&rqstp->rq_wait);
466 if (!(rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL))
467 || !(rqstp->rq_resp = kmalloc(serv->sv_xdrsize, GFP_KERNEL))
468 || !svc_init_buffer(rqstp, serv->sv_max_mesg))
471 serv->sv_nrthreads++;
472 spin_lock_bh(&pool->sp_lock);
473 pool->sp_nrthreads++;
474 list_add(&rqstp->rq_all, &pool->sp_all_threads);
475 spin_unlock_bh(&pool->sp_lock);
476 rqstp->rq_server = serv;
477 rqstp->rq_pool = pool;
479 if (serv->sv_nrpools > 1)
480 have_oldmask = svc_pool_map_set_cpumask(pool->sp_id, &oldmask);
482 error = kernel_thread((int (*)(void *)) func, rqstp, 0);
485 set_cpus_allowed(current, oldmask);
489 svc_sock_update_bufs(serv);
495 svc_exit_thread(rqstp);
500 * Create a thread in the default pool. Caller must hold BKL.
503 svc_create_thread(svc_thread_fn func, struct svc_serv *serv)
505 return __svc_create_thread(func, serv, &serv->sv_pools[0]);
509 * Choose a pool in which to create a new thread, for svc_set_num_threads
511 static inline struct svc_pool *
512 choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
517 return &serv->sv_pools[(*state)++ % serv->sv_nrpools];
521 * Choose a thread to kill, for svc_set_num_threads
523 static inline struct task_struct *
524 choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
527 struct task_struct *task = NULL;
530 spin_lock_bh(&pool->sp_lock);
532 /* choose a pool in round-robin fashion */
533 for (i = 0; i < serv->sv_nrpools; i++) {
534 pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
535 spin_lock_bh(&pool->sp_lock);
536 if (!list_empty(&pool->sp_all_threads))
538 spin_unlock_bh(&pool->sp_lock);
544 if (!list_empty(&pool->sp_all_threads)) {
545 struct svc_rqst *rqstp;
548 * Remove from the pool->sp_all_threads list
549 * so we don't try to kill it again.
551 rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
552 list_del_init(&rqstp->rq_all);
553 task = rqstp->rq_task;
555 spin_unlock_bh(&pool->sp_lock);
561 * Create or destroy enough new threads to make the number
562 * of threads the given number. If `pool' is non-NULL, applies
563 * only to threads in that pool, otherwise round-robins between
564 * all pools. Must be called with a svc_get() reference and
567 * Destroying threads relies on the service threads filling in
568 * rqstp->rq_task, which only the nfs ones do. Assumes the serv
569 * has been created using svc_create_pooled().
571 * Based on code that used to be in nfsd_svc() but tweaked
575 svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
577 struct task_struct *victim;
579 unsigned int state = serv->sv_nrthreads-1;
582 /* The -1 assumes caller has done a svc_get() */
583 nrservs -= (serv->sv_nrthreads-1);
585 spin_lock_bh(&pool->sp_lock);
586 nrservs -= pool->sp_nrthreads;
587 spin_unlock_bh(&pool->sp_lock);
590 /* create new threads */
591 while (nrservs > 0) {
593 __module_get(serv->sv_module);
594 error = __svc_create_thread(serv->sv_function, serv,
595 choose_pool(serv, pool, &state));
597 module_put(serv->sv_module);
601 /* destroy old threads */
602 while (nrservs < 0 &&
603 (victim = choose_victim(serv, pool, &state)) != NULL) {
604 send_sig(serv->sv_kill_signal, victim, 1);
612 * Called from a server thread as it's exiting. Caller must hold BKL.
615 svc_exit_thread(struct svc_rqst *rqstp)
617 struct svc_serv *serv = rqstp->rq_server;
618 struct svc_pool *pool = rqstp->rq_pool;
620 svc_release_buffer(rqstp);
621 kfree(rqstp->rq_resp);
622 kfree(rqstp->rq_argp);
623 kfree(rqstp->rq_auth_data);
625 spin_lock_bh(&pool->sp_lock);
626 pool->sp_nrthreads--;
627 list_del(&rqstp->rq_all);
628 spin_unlock_bh(&pool->sp_lock);
632 /* Release the server */
638 * Register an RPC service with the local portmapper.
639 * To unregister a service, call this routine with
640 * proto and port == 0.
643 svc_register(struct svc_serv *serv, int proto, unsigned short port)
645 struct svc_program *progp;
647 int i, error = 0, dummy;
650 clear_thread_flag(TIF_SIGPENDING);
652 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
653 for (i = 0; i < progp->pg_nvers; i++) {
654 if (progp->pg_vers[i] == NULL)
657 dprintk("svc: svc_register(%s, %s, %d, %d)%s\n",
659 proto == IPPROTO_UDP? "udp" : "tcp",
662 progp->pg_vers[i]->vs_hidden?
663 " (but not telling portmap)" : "");
665 if (progp->pg_vers[i]->vs_hidden)
668 error = rpc_register(progp->pg_prog, i, proto, port, &dummy);
671 if (port && !dummy) {
679 spin_lock_irqsave(¤t->sighand->siglock, flags);
681 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
688 * Process the RPC request.
691 svc_process(struct svc_rqst *rqstp)
693 struct svc_program *progp;
694 struct svc_version *versp = NULL; /* compiler food */
695 struct svc_procedure *procp = NULL;
696 struct kvec * argv = &rqstp->rq_arg.head[0];
697 struct kvec * resv = &rqstp->rq_res.head[0];
698 struct svc_serv *serv = rqstp->rq_server;
701 u32 dir, prog, vers, proc;
702 __be32 auth_stat, rpc_stat;
706 rpc_stat = rpc_success;
708 if (argv->iov_len < 6*4)
711 /* setup response xdr_buf.
712 * Initially it has just one page
714 rqstp->rq_resused = 1;
715 resv->iov_base = page_address(rqstp->rq_respages[0]);
717 rqstp->rq_res.pages = rqstp->rq_respages + 1;
718 rqstp->rq_res.len = 0;
719 rqstp->rq_res.page_base = 0;
720 rqstp->rq_res.page_len = 0;
721 rqstp->rq_res.buflen = PAGE_SIZE;
722 rqstp->rq_res.tail[0].iov_base = NULL;
723 rqstp->rq_res.tail[0].iov_len = 0;
724 /* Will be turned off only in gss privacy case: */
725 rqstp->rq_sendfile_ok = 1;
726 /* tcp needs a space for the record length... */
727 if (rqstp->rq_prot == IPPROTO_TCP)
730 rqstp->rq_xid = svc_getu32(argv);
731 svc_putu32(resv, rqstp->rq_xid);
733 dir = svc_getnl(argv);
734 vers = svc_getnl(argv);
736 /* First words of reply: */
737 svc_putnl(resv, 1); /* REPLY */
739 if (dir != 0) /* direction != CALL */
741 if (vers != 2) /* RPC version number */
744 /* Save position in case we later decide to reject: */
745 reply_statp = resv->iov_base + resv->iov_len;
747 svc_putnl(resv, 0); /* ACCEPT */
749 rqstp->rq_prog = prog = svc_getnl(argv); /* program number */
750 rqstp->rq_vers = vers = svc_getnl(argv); /* version number */
751 rqstp->rq_proc = proc = svc_getnl(argv); /* procedure number */
753 progp = serv->sv_program;
755 for (progp = serv->sv_program; progp; progp = progp->pg_next)
756 if (prog == progp->pg_prog)
760 * Decode auth data, and add verifier to reply buffer.
761 * We do this before anything else in order to get a decent
764 auth_res = svc_authenticate(rqstp, &auth_stat);
765 /* Also give the program a chance to reject this call: */
766 if (auth_res == SVC_OK && progp) {
767 auth_stat = rpc_autherr_badcred;
768 auth_res = progp->pg_authenticate(rqstp);
774 rpc_stat = rpc_garbage_args;
777 rpc_stat = rpc_system_err;
790 if (vers >= progp->pg_nvers ||
791 !(versp = progp->pg_vers[vers]))
794 procp = versp->vs_proc + proc;
795 if (proc >= versp->vs_nproc || !procp->pc_func)
797 rqstp->rq_server = serv;
798 rqstp->rq_procinfo = procp;
800 /* Syntactic check complete */
801 serv->sv_stats->rpccnt++;
803 /* Build the reply header. */
804 statp = resv->iov_base +resv->iov_len;
805 svc_putnl(resv, RPC_SUCCESS);
807 /* Bump per-procedure stats counter */
810 /* Initialize storage for argp and resp */
811 memset(rqstp->rq_argp, 0, procp->pc_argsize);
812 memset(rqstp->rq_resp, 0, procp->pc_ressize);
814 /* un-reserve some of the out-queue now that we have a
815 * better idea of reply size
817 if (procp->pc_xdrressize)
818 svc_reserve(rqstp, procp->pc_xdrressize<<2);
820 /* Call the function that processes the request. */
821 if (!versp->vs_dispatch) {
822 /* Decode arguments */
823 xdr = procp->pc_decode;
824 if (xdr && !xdr(rqstp, argv->iov_base, rqstp->rq_argp))
827 *statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
830 if (*statp == rpc_drop_reply) {
831 if (procp->pc_release)
832 procp->pc_release(rqstp, NULL, rqstp->rq_resp);
835 if (*statp == rpc_success && (xdr = procp->pc_encode)
836 && !xdr(rqstp, resv->iov_base+resv->iov_len, rqstp->rq_resp)) {
837 dprintk("svc: failed to encode reply\n");
838 /* serv->sv_stats->rpcsystemerr++; */
839 *statp = rpc_system_err;
842 dprintk("svc: calling dispatcher\n");
843 if (!versp->vs_dispatch(rqstp, statp)) {
844 /* Release reply info */
845 if (procp->pc_release)
846 procp->pc_release(rqstp, NULL, rqstp->rq_resp);
851 /* Check RPC status result */
852 if (*statp != rpc_success)
853 resv->iov_len = ((void*)statp) - resv->iov_base + 4;
855 /* Release reply info */
856 if (procp->pc_release)
857 procp->pc_release(rqstp, NULL, rqstp->rq_resp);
859 if (procp->pc_encode == NULL)
863 if (svc_authorise(rqstp))
865 return svc_send(rqstp);
868 svc_authorise(rqstp); /* doesn't hurt to call this twice */
869 dprintk("svc: svc_process dropit\n");
875 printk("svc: short len %Zd, dropping request\n", argv->iov_len);
877 goto dropit; /* drop request */
881 printk("svc: bad direction %d, dropping request\n", dir);
883 serv->sv_stats->rpcbadfmt++;
884 goto dropit; /* drop request */
887 serv->sv_stats->rpcbadfmt++;
888 svc_putnl(resv, 1); /* REJECT */
889 svc_putnl(resv, 0); /* RPC_MISMATCH */
890 svc_putnl(resv, 2); /* Only RPCv2 supported */
895 dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat));
896 serv->sv_stats->rpcbadauth++;
897 /* Restore write pointer to location of accept status: */
898 xdr_ressize_check(rqstp, reply_statp);
899 svc_putnl(resv, 1); /* REJECT */
900 svc_putnl(resv, 1); /* AUTH_ERROR */
901 svc_putnl(resv, ntohl(auth_stat)); /* status */
905 dprintk("svc: unknown program %d\n", prog);
906 serv->sv_stats->rpcbadfmt++;
907 svc_putnl(resv, RPC_PROG_UNAVAIL);
912 printk("svc: unknown version (%d for prog %d, %s)\n",
913 vers, prog, progp->pg_name);
915 serv->sv_stats->rpcbadfmt++;
916 svc_putnl(resv, RPC_PROG_MISMATCH);
917 svc_putnl(resv, progp->pg_lovers);
918 svc_putnl(resv, progp->pg_hivers);
923 printk("svc: unknown procedure (%d)\n", proc);
925 serv->sv_stats->rpcbadfmt++;
926 svc_putnl(resv, RPC_PROC_UNAVAIL);
931 printk("svc: failed to decode args\n");
933 rpc_stat = rpc_garbage_args;
935 serv->sv_stats->rpcbadfmt++;
936 svc_putnl(resv, ntohl(rpc_stat));
941 * Return (transport-specific) limit on the rpc payload.
943 u32 svc_max_payload(const struct svc_rqst *rqstp)
945 int max = RPCSVC_MAXPAYLOAD_TCP;
947 if (rqstp->rq_sock->sk_sock->type == SOCK_DGRAM)
948 max = RPCSVC_MAXPAYLOAD_UDP;
949 if (rqstp->rq_server->sv_max_payload < max)
950 max = rqstp->rq_server->sv_max_payload;
953 EXPORT_SYMBOL_GPL(svc_max_payload);