2 * linux/fs/nfsd/nfscache.c
4 * Request reply cache. This is currently a global cache, but this may
5 * change in the future and be a per-client cache.
7 * This code is heavily inspired by the 44BSD implementation, although
8 * it does things a bit differently.
10 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
13 #include <linux/kernel.h>
14 #include <linux/time.h>
15 #include <linux/slab.h>
16 #include <linux/string.h>
17 #include <linux/spinlock.h>
18 #include <linux/list.h>
20 #include <linux/sunrpc/svc.h>
21 #include <linux/nfsd/nfsd.h>
22 #include <linux/nfsd/cache.h>
24 /* Size of reply cache. Common values are:
30 #define CACHESIZE 1024
32 #define REQHASH(xid) ((((xid) >> 24) ^ (xid)) & (HASHSIZE-1))
34 static struct hlist_head * hash_list;
35 static struct list_head lru_head;
36 static int cache_disabled = 1;
38 static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
41 * locking for the reply cache:
42 * A cache entry is "single use" if c_state == RC_INPROG
43 * Otherwise, it when accessing _prev or _next, the lock must be held.
45 static DEFINE_SPINLOCK(cache_lock);
50 struct svc_cacherep *rp;
53 INIT_LIST_HEAD(&lru_head);
56 rp = kmalloc(sizeof(*rp), GFP_KERNEL);
58 list_add(&rp->c_lru, &lru_head);
59 rp->c_state = RC_UNUSED;
60 rp->c_type = RC_NOCACHE;
61 INIT_HLIST_NODE(&rp->c_hash);
66 printk (KERN_ERR "nfsd: cannot allocate all %d cache entries, only got %d\n",
67 CACHESIZE, CACHESIZE-i);
69 hash_list = kmalloc (HASHSIZE * sizeof(struct hlist_head), GFP_KERNEL);
71 nfsd_cache_shutdown();
72 printk (KERN_ERR "nfsd: cannot allocate %Zd bytes for hash list\n",
73 HASHSIZE * sizeof(struct hlist_head));
76 memset(hash_list, 0, HASHSIZE * sizeof(struct hlist_head));
82 nfsd_cache_shutdown(void)
84 struct svc_cacherep *rp;
86 while (!list_empty(&lru_head)) {
87 rp = list_entry(lru_head.next, struct svc_cacherep, c_lru);
88 if (rp->c_state == RC_DONE && rp->c_type == RC_REPLBUFF)
89 kfree(rp->c_replvec.iov_base);
102 * Move cache entry to end of LRU list
105 lru_put_end(struct svc_cacherep *rp)
107 list_del(&rp->c_lru);
108 list_add_tail(&rp->c_lru, &lru_head);
112 * Move a cache entry from one hash list to another
115 hash_refile(struct svc_cacherep *rp)
117 hlist_del_init(&rp->c_hash);
118 hlist_add_head(&rp->c_hash, hash_list + REQHASH(rp->c_xid));
122 * Try to find an entry matching the current call in the cache. When none
123 * is found, we grab the oldest unlocked entry off the LRU list.
124 * Note that no operation within the loop may sleep.
127 nfsd_cache_lookup(struct svc_rqst *rqstp, int type)
129 struct hlist_node *hn;
130 struct hlist_head *rh;
131 struct svc_cacherep *rp;
132 u32 xid = rqstp->rq_xid,
133 proto = rqstp->rq_prot,
134 vers = rqstp->rq_vers,
135 proc = rqstp->rq_proc;
139 rqstp->rq_cacherep = NULL;
140 if (cache_disabled || type == RC_NOCACHE) {
141 nfsdstats.rcnocache++;
145 spin_lock(&cache_lock);
148 rh = &hash_list[REQHASH(xid)];
149 hlist_for_each_entry(rp, hn, rh, c_hash) {
150 if (rp->c_state != RC_UNUSED &&
151 xid == rp->c_xid && proc == rp->c_proc &&
152 proto == rp->c_prot && vers == rp->c_vers &&
153 time_before(jiffies, rp->c_timestamp + 120*HZ) &&
154 memcmp((char*)&rqstp->rq_addr, (char*)&rp->c_addr, sizeof(rp->c_addr))==0) {
159 nfsdstats.rcmisses++;
161 /* This loop shouldn't take more than a few iterations normally */
164 list_for_each_entry(rp, &lru_head, c_lru) {
165 if (rp->c_state != RC_INPROG)
167 if (safe++ > CACHESIZE) {
168 printk("nfsd: loop in repcache LRU list\n");
175 /* This should not happen */
177 static int complaints;
179 printk(KERN_WARNING "nfsd: all repcache entries locked!\n");
180 if (++complaints > 5) {
181 printk(KERN_WARNING "nfsd: disabling repcache.\n");
187 rqstp->rq_cacherep = rp;
188 rp->c_state = RC_INPROG;
191 rp->c_addr = rqstp->rq_addr;
194 rp->c_timestamp = jiffies;
198 /* release any buffer */
199 if (rp->c_type == RC_REPLBUFF) {
200 kfree(rp->c_replvec.iov_base);
201 rp->c_replvec.iov_base = NULL;
203 rp->c_type = RC_NOCACHE;
205 spin_unlock(&cache_lock);
209 /* We found a matching entry which is either in progress or done. */
210 age = jiffies - rp->c_timestamp;
211 rp->c_timestamp = jiffies;
215 /* Request being processed or excessive rexmits */
216 if (rp->c_state == RC_INPROG || age < RC_DELAY)
219 /* From the hall of fame of impractical attacks:
220 * Is this a user who tries to snoop on the cache? */
222 if (!rqstp->rq_secure && rp->c_secure)
225 /* Compose RPC reply header */
226 switch (rp->c_type) {
230 svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
234 if (!nfsd_cache_append(rqstp, &rp->c_replvec))
235 goto out; /* should not happen */
239 printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
240 rp->c_state = RC_UNUSED;
247 * Update a cache entry. This is called from nfsd_dispatch when
248 * the procedure has been executed and the complete reply is in
251 * We're copying around data here rather than swapping buffers because
252 * the toplevel loop requires max-sized buffers, which would be a waste
253 * of memory for a cache with a max reply size of 100 bytes (diropokres).
255 * If we should start to use different types of cache entries tailored
256 * specifically for attrstat and fh's, we may save even more space.
258 * Also note that a cachetype of RC_NOCACHE can legally be passed when
259 * nfsd failed to encode a reply that otherwise would have been cached.
260 * In this case, nfsd_cache_update is called with statp == NULL.
263 nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, u32 *statp)
265 struct svc_cacherep *rp;
266 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
269 if (!(rp = rqstp->rq_cacherep) || cache_disabled)
272 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
275 /* Don't cache excessive amounts of data and XDR failures */
276 if (!statp || len > (256 >> 2)) {
277 rp->c_state = RC_UNUSED;
284 printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
285 rp->c_replstat = *statp;
288 cachv = &rp->c_replvec;
289 cachv->iov_base = kmalloc(len << 2, GFP_KERNEL);
290 if (!cachv->iov_base) {
291 spin_lock(&cache_lock);
292 rp->c_state = RC_UNUSED;
293 spin_unlock(&cache_lock);
296 cachv->iov_len = len << 2;
297 memcpy(cachv->iov_base, statp, len << 2);
300 spin_lock(&cache_lock);
302 rp->c_secure = rqstp->rq_secure;
303 rp->c_type = cachetype;
304 rp->c_state = RC_DONE;
305 rp->c_timestamp = jiffies;
306 spin_unlock(&cache_lock);
311 * Copy cached reply to current reply buffer. Should always fit.
312 * FIXME as reply is in a page, we should just attach the page, and
313 * keep a refcount....
316 nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
318 struct kvec *vec = &rqstp->rq_res.head[0];
320 if (vec->iov_len + data->iov_len > PAGE_SIZE) {
321 printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n",
325 memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
326 vec->iov_len += data->iov_len;