cipso: Fix documentation comment
[linux-2.6] / net / ipv4 / inet_connection_sock.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Support for INET connection oriented protocols.
7  *
8  * Authors:     See the TCP sources
9  *
10  *              This program is free software; you can redistribute it and/or
11  *              modify it under the terms of the GNU General Public License
12  *              as published by the Free Software Foundation; either version
13  *              2 of the License, or(at your option) any later version.
14  */
15
16 #include <linux/module.h>
17 #include <linux/jhash.h>
18
19 #include <net/inet_connection_sock.h>
20 #include <net/inet_hashtables.h>
21 #include <net/inet_timewait_sock.h>
22 #include <net/ip.h>
23 #include <net/route.h>
24 #include <net/tcp_states.h>
25 #include <net/xfrm.h>
26
27 #ifdef INET_CSK_DEBUG
28 const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
29 EXPORT_SYMBOL(inet_csk_timer_bug_msg);
30 #endif
31
32 /*
33  * This struct holds the first and last local port number.
34  */
35 struct local_ports sysctl_local_ports __read_mostly = {
36         .lock = SEQLOCK_UNLOCKED,
37         .range = { 32768, 61000 },
38 };
39
40 void inet_get_local_port_range(int *low, int *high)
41 {
42         unsigned seq;
43         do {
44                 seq = read_seqbegin(&sysctl_local_ports.lock);
45
46                 *low = sysctl_local_ports.range[0];
47                 *high = sysctl_local_ports.range[1];
48         } while (read_seqretry(&sysctl_local_ports.lock, seq));
49 }
50 EXPORT_SYMBOL(inet_get_local_port_range);
51
52 int inet_csk_bind_conflict(const struct sock *sk,
53                            const struct inet_bind_bucket *tb)
54 {
55         const __be32 sk_rcv_saddr = inet_rcv_saddr(sk);
56         struct sock *sk2;
57         struct hlist_node *node;
58         int reuse = sk->sk_reuse;
59
60         /*
61          * Unlike other sk lookup places we do not check
62          * for sk_net here, since _all_ the socks listed
63          * in tb->owners list belong to the same net - the
64          * one this bucket belongs to.
65          */
66
67         sk_for_each_bound(sk2, node, &tb->owners) {
68                 if (sk != sk2 &&
69                     !inet_v6_ipv6only(sk2) &&
70                     (!sk->sk_bound_dev_if ||
71                      !sk2->sk_bound_dev_if ||
72                      sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
73                         if (!reuse || !sk2->sk_reuse ||
74                             sk2->sk_state == TCP_LISTEN) {
75                                 const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
76                                 if (!sk2_rcv_saddr || !sk_rcv_saddr ||
77                                     sk2_rcv_saddr == sk_rcv_saddr)
78                                         break;
79                         }
80                 }
81         }
82         return node != NULL;
83 }
84
85 EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);
86
87 /* Obtain a reference to a local port for the given sock,
88  * if snum is zero it means select any available local port.
89  */
90 int inet_csk_get_port(struct sock *sk, unsigned short snum)
91 {
92         struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
93         struct inet_bind_hashbucket *head;
94         struct hlist_node *node;
95         struct inet_bind_bucket *tb;
96         int ret;
97         struct net *net = sock_net(sk);
98
99         local_bh_disable();
100         if (!snum) {
101                 int remaining, rover, low, high;
102
103                 inet_get_local_port_range(&low, &high);
104                 remaining = (high - low) + 1;
105                 rover = net_random() % remaining + low;
106
107                 do {
108                         head = &hashinfo->bhash[inet_bhashfn(net, rover,
109                                         hashinfo->bhash_size)];
110                         spin_lock(&head->lock);
111                         inet_bind_bucket_for_each(tb, node, &head->chain)
112                                 if (ib_net(tb) == net && tb->port == rover)
113                                         goto next;
114                         break;
115                 next:
116                         spin_unlock(&head->lock);
117                         if (++rover > high)
118                                 rover = low;
119                 } while (--remaining > 0);
120
121                 /* Exhausted local port range during search?  It is not
122                  * possible for us to be holding one of the bind hash
123                  * locks if this test triggers, because if 'remaining'
124                  * drops to zero, we broke out of the do/while loop at
125                  * the top level, not from the 'break;' statement.
126                  */
127                 ret = 1;
128                 if (remaining <= 0)
129                         goto fail;
130
131                 /* OK, here is the one we will use.  HEAD is
132                  * non-NULL and we hold it's mutex.
133                  */
134                 snum = rover;
135         } else {
136                 head = &hashinfo->bhash[inet_bhashfn(net, snum,
137                                 hashinfo->bhash_size)];
138                 spin_lock(&head->lock);
139                 inet_bind_bucket_for_each(tb, node, &head->chain)
140                         if (ib_net(tb) == net && tb->port == snum)
141                                 goto tb_found;
142         }
143         tb = NULL;
144         goto tb_not_found;
145 tb_found:
146         if (!hlist_empty(&tb->owners)) {
147                 if (tb->fastreuse > 0 &&
148                     sk->sk_reuse && sk->sk_state != TCP_LISTEN) {
149                         goto success;
150                 } else {
151                         ret = 1;
152                         if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb))
153                                 goto fail_unlock;
154                 }
155         }
156 tb_not_found:
157         ret = 1;
158         if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep,
159                                         net, head, snum)) == NULL)
160                 goto fail_unlock;
161         if (hlist_empty(&tb->owners)) {
162                 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
163                         tb->fastreuse = 1;
164                 else
165                         tb->fastreuse = 0;
166         } else if (tb->fastreuse &&
167                    (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
168                 tb->fastreuse = 0;
169 success:
170         if (!inet_csk(sk)->icsk_bind_hash)
171                 inet_bind_hash(sk, tb, snum);
172         WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
173         ret = 0;
174
175 fail_unlock:
176         spin_unlock(&head->lock);
177 fail:
178         local_bh_enable();
179         return ret;
180 }
181
182 EXPORT_SYMBOL_GPL(inet_csk_get_port);
183
184 /*
185  * Wait for an incoming connection, avoid race conditions. This must be called
186  * with the socket locked.
187  */
188 static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
189 {
190         struct inet_connection_sock *icsk = inet_csk(sk);
191         DEFINE_WAIT(wait);
192         int err;
193
194         /*
195          * True wake-one mechanism for incoming connections: only
196          * one process gets woken up, not the 'whole herd'.
197          * Since we do not 'race & poll' for established sockets
198          * anymore, the common case will execute the loop only once.
199          *
200          * Subtle issue: "add_wait_queue_exclusive()" will be added
201          * after any current non-exclusive waiters, and we know that
202          * it will always _stay_ after any new non-exclusive waiters
203          * because all non-exclusive waiters are added at the
204          * beginning of the wait-queue. As such, it's ok to "drop"
205          * our exclusiveness temporarily when we get woken up without
206          * having to remove and re-insert us on the wait queue.
207          */
208         for (;;) {
209                 prepare_to_wait_exclusive(sk->sk_sleep, &wait,
210                                           TASK_INTERRUPTIBLE);
211                 release_sock(sk);
212                 if (reqsk_queue_empty(&icsk->icsk_accept_queue))
213                         timeo = schedule_timeout(timeo);
214                 lock_sock(sk);
215                 err = 0;
216                 if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
217                         break;
218                 err = -EINVAL;
219                 if (sk->sk_state != TCP_LISTEN)
220                         break;
221                 err = sock_intr_errno(timeo);
222                 if (signal_pending(current))
223                         break;
224                 err = -EAGAIN;
225                 if (!timeo)
226                         break;
227         }
228         finish_wait(sk->sk_sleep, &wait);
229         return err;
230 }
231
232 /*
233  * This will accept the next outstanding connection.
234  */
235 struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
236 {
237         struct inet_connection_sock *icsk = inet_csk(sk);
238         struct sock *newsk;
239         int error;
240
241         lock_sock(sk);
242
243         /* We need to make sure that this socket is listening,
244          * and that it has something pending.
245          */
246         error = -EINVAL;
247         if (sk->sk_state != TCP_LISTEN)
248                 goto out_err;
249
250         /* Find already established connection */
251         if (reqsk_queue_empty(&icsk->icsk_accept_queue)) {
252                 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
253
254                 /* If this is a non blocking socket don't sleep */
255                 error = -EAGAIN;
256                 if (!timeo)
257                         goto out_err;
258
259                 error = inet_csk_wait_for_connect(sk, timeo);
260                 if (error)
261                         goto out_err;
262         }
263
264         newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk);
265         WARN_ON(newsk->sk_state == TCP_SYN_RECV);
266 out:
267         release_sock(sk);
268         return newsk;
269 out_err:
270         newsk = NULL;
271         *err = error;
272         goto out;
273 }
274
275 EXPORT_SYMBOL(inet_csk_accept);
276
277 /*
278  * Using different timers for retransmit, delayed acks and probes
279  * We may wish use just one timer maintaining a list of expire jiffies
280  * to optimize.
281  */
282 void inet_csk_init_xmit_timers(struct sock *sk,
283                                void (*retransmit_handler)(unsigned long),
284                                void (*delack_handler)(unsigned long),
285                                void (*keepalive_handler)(unsigned long))
286 {
287         struct inet_connection_sock *icsk = inet_csk(sk);
288
289         setup_timer(&icsk->icsk_retransmit_timer, retransmit_handler,
290                         (unsigned long)sk);
291         setup_timer(&icsk->icsk_delack_timer, delack_handler,
292                         (unsigned long)sk);
293         setup_timer(&sk->sk_timer, keepalive_handler, (unsigned long)sk);
294         icsk->icsk_pending = icsk->icsk_ack.pending = 0;
295 }
296
297 EXPORT_SYMBOL(inet_csk_init_xmit_timers);
298
299 void inet_csk_clear_xmit_timers(struct sock *sk)
300 {
301         struct inet_connection_sock *icsk = inet_csk(sk);
302
303         icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;
304
305         sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
306         sk_stop_timer(sk, &icsk->icsk_delack_timer);
307         sk_stop_timer(sk, &sk->sk_timer);
308 }
309
310 EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
311
312 void inet_csk_delete_keepalive_timer(struct sock *sk)
313 {
314         sk_stop_timer(sk, &sk->sk_timer);
315 }
316
317 EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
318
319 void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
320 {
321         sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
322 }
323
324 EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
325
326 struct dst_entry *inet_csk_route_req(struct sock *sk,
327                                      const struct request_sock *req)
328 {
329         struct rtable *rt;
330         const struct inet_request_sock *ireq = inet_rsk(req);
331         struct ip_options *opt = inet_rsk(req)->opt;
332         struct flowi fl = { .oif = sk->sk_bound_dev_if,
333                             .nl_u = { .ip4_u =
334                                       { .daddr = ((opt && opt->srr) ?
335                                                   opt->faddr :
336                                                   ireq->rmt_addr),
337                                         .saddr = ireq->loc_addr,
338                                         .tos = RT_CONN_FLAGS(sk) } },
339                             .proto = sk->sk_protocol,
340                             .flags = inet_sk_flowi_flags(sk),
341                             .uli_u = { .ports =
342                                        { .sport = inet_sk(sk)->sport,
343                                          .dport = ireq->rmt_port } } };
344         struct net *net = sock_net(sk);
345
346         security_req_classify_flow(req, &fl);
347         if (ip_route_output_flow(net, &rt, &fl, sk, 0))
348                 goto no_route;
349         if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
350                 goto route_err;
351         return &rt->u.dst;
352
353 route_err:
354         ip_rt_put(rt);
355 no_route:
356         IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
357         return NULL;
358 }
359
360 EXPORT_SYMBOL_GPL(inet_csk_route_req);
361
362 static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
363                                  const u32 rnd, const u32 synq_hsize)
364 {
365         return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1);
366 }
367
368 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
369 #define AF_INET_FAMILY(fam) ((fam) == AF_INET)
370 #else
371 #define AF_INET_FAMILY(fam) 1
372 #endif
373
374 struct request_sock *inet_csk_search_req(const struct sock *sk,
375                                          struct request_sock ***prevp,
376                                          const __be16 rport, const __be32 raddr,
377                                          const __be32 laddr)
378 {
379         const struct inet_connection_sock *icsk = inet_csk(sk);
380         struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
381         struct request_sock *req, **prev;
382
383         for (prev = &lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd,
384                                                     lopt->nr_table_entries)];
385              (req = *prev) != NULL;
386              prev = &req->dl_next) {
387                 const struct inet_request_sock *ireq = inet_rsk(req);
388
389                 if (ireq->rmt_port == rport &&
390                     ireq->rmt_addr == raddr &&
391                     ireq->loc_addr == laddr &&
392                     AF_INET_FAMILY(req->rsk_ops->family)) {
393                         WARN_ON(req->sk);
394                         *prevp = prev;
395                         break;
396                 }
397         }
398
399         return req;
400 }
401
402 EXPORT_SYMBOL_GPL(inet_csk_search_req);
403
404 void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
405                                    unsigned long timeout)
406 {
407         struct inet_connection_sock *icsk = inet_csk(sk);
408         struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
409         const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port,
410                                      lopt->hash_rnd, lopt->nr_table_entries);
411
412         reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
413         inet_csk_reqsk_queue_added(sk, timeout);
414 }
415
416 /* Only thing we need from tcp.h */
417 extern int sysctl_tcp_synack_retries;
418
419 EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
420
421 void inet_csk_reqsk_queue_prune(struct sock *parent,
422                                 const unsigned long interval,
423                                 const unsigned long timeout,
424                                 const unsigned long max_rto)
425 {
426         struct inet_connection_sock *icsk = inet_csk(parent);
427         struct request_sock_queue *queue = &icsk->icsk_accept_queue;
428         struct listen_sock *lopt = queue->listen_opt;
429         int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
430         int thresh = max_retries;
431         unsigned long now = jiffies;
432         struct request_sock **reqp, *req;
433         int i, budget;
434
435         if (lopt == NULL || lopt->qlen == 0)
436                 return;
437
438         /* Normally all the openreqs are young and become mature
439          * (i.e. converted to established socket) for first timeout.
440          * If synack was not acknowledged for 3 seconds, it means
441          * one of the following things: synack was lost, ack was lost,
442          * rtt is high or nobody planned to ack (i.e. synflood).
443          * When server is a bit loaded, queue is populated with old
444          * open requests, reducing effective size of queue.
445          * When server is well loaded, queue size reduces to zero
446          * after several minutes of work. It is not synflood,
447          * it is normal operation. The solution is pruning
448          * too old entries overriding normal timeout, when
449          * situation becomes dangerous.
450          *
451          * Essentially, we reserve half of room for young
452          * embrions; and abort old ones without pity, if old
453          * ones are about to clog our table.
454          */
455         if (lopt->qlen>>(lopt->max_qlen_log-1)) {
456                 int young = (lopt->qlen_young<<1);
457
458                 while (thresh > 2) {
459                         if (lopt->qlen < young)
460                                 break;
461                         thresh--;
462                         young <<= 1;
463                 }
464         }
465
466         if (queue->rskq_defer_accept)
467                 max_retries = queue->rskq_defer_accept;
468
469         budget = 2 * (lopt->nr_table_entries / (timeout / interval));
470         i = lopt->clock_hand;
471
472         do {
473                 reqp=&lopt->syn_table[i];
474                 while ((req = *reqp) != NULL) {
475                         if (time_after_eq(now, req->expires)) {
476                                 if ((req->retrans < thresh ||
477                                      (inet_rsk(req)->acked && req->retrans < max_retries))
478                                     && !req->rsk_ops->rtx_syn_ack(parent, req)) {
479                                         unsigned long timeo;
480
481                                         if (req->retrans++ == 0)
482                                                 lopt->qlen_young--;
483                                         timeo = min((timeout << req->retrans), max_rto);
484                                         req->expires = now + timeo;
485                                         reqp = &req->dl_next;
486                                         continue;
487                                 }
488
489                                 /* Drop this request */
490                                 inet_csk_reqsk_queue_unlink(parent, req, reqp);
491                                 reqsk_queue_removed(queue, req);
492                                 reqsk_free(req);
493                                 continue;
494                         }
495                         reqp = &req->dl_next;
496                 }
497
498                 i = (i + 1) & (lopt->nr_table_entries - 1);
499
500         } while (--budget > 0);
501
502         lopt->clock_hand = i;
503
504         if (lopt->qlen)
505                 inet_csk_reset_keepalive_timer(parent, interval);
506 }
507
508 EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune);
509
510 struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req,
511                             const gfp_t priority)
512 {
513         struct sock *newsk = sk_clone(sk, priority);
514
515         if (newsk != NULL) {
516                 struct inet_connection_sock *newicsk = inet_csk(newsk);
517
518                 newsk->sk_state = TCP_SYN_RECV;
519                 newicsk->icsk_bind_hash = NULL;
520
521                 inet_sk(newsk)->dport = inet_rsk(req)->rmt_port;
522                 inet_sk(newsk)->num = ntohs(inet_rsk(req)->loc_port);
523                 inet_sk(newsk)->sport = inet_rsk(req)->loc_port;
524                 newsk->sk_write_space = sk_stream_write_space;
525
526                 newicsk->icsk_retransmits = 0;
527                 newicsk->icsk_backoff     = 0;
528                 newicsk->icsk_probes_out  = 0;
529
530                 /* Deinitialize accept_queue to trap illegal accesses. */
531                 memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
532
533                 security_inet_csk_clone(newsk, req);
534         }
535         return newsk;
536 }
537
538 EXPORT_SYMBOL_GPL(inet_csk_clone);
539
540 /*
541  * At this point, there should be no process reference to this
542  * socket, and thus no user references at all.  Therefore we
543  * can assume the socket waitqueue is inactive and nobody will
544  * try to jump onto it.
545  */
546 void inet_csk_destroy_sock(struct sock *sk)
547 {
548         WARN_ON(sk->sk_state != TCP_CLOSE);
549         WARN_ON(!sock_flag(sk, SOCK_DEAD));
550
551         /* It cannot be in hash table! */
552         WARN_ON(!sk_unhashed(sk));
553
554         /* If it has not 0 inet_sk(sk)->num, it must be bound */
555         WARN_ON(inet_sk(sk)->num && !inet_csk(sk)->icsk_bind_hash);
556
557         sk->sk_prot->destroy(sk);
558
559         sk_stream_kill_queues(sk);
560
561         xfrm_sk_free_policy(sk);
562
563         sk_refcnt_debug_release(sk);
564
565         percpu_counter_dec(sk->sk_prot->orphan_count);
566         sock_put(sk);
567 }
568
569 EXPORT_SYMBOL(inet_csk_destroy_sock);
570
571 int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
572 {
573         struct inet_sock *inet = inet_sk(sk);
574         struct inet_connection_sock *icsk = inet_csk(sk);
575         int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries);
576
577         if (rc != 0)
578                 return rc;
579
580         sk->sk_max_ack_backlog = 0;
581         sk->sk_ack_backlog = 0;
582         inet_csk_delack_init(sk);
583
584         /* There is race window here: we announce ourselves listening,
585          * but this transition is still not validated by get_port().
586          * It is OK, because this socket enters to hash table only
587          * after validation is complete.
588          */
589         sk->sk_state = TCP_LISTEN;
590         if (!sk->sk_prot->get_port(sk, inet->num)) {
591                 inet->sport = htons(inet->num);
592
593                 sk_dst_reset(sk);
594                 sk->sk_prot->hash(sk);
595
596                 return 0;
597         }
598
599         sk->sk_state = TCP_CLOSE;
600         __reqsk_queue_destroy(&icsk->icsk_accept_queue);
601         return -EADDRINUSE;
602 }
603
604 EXPORT_SYMBOL_GPL(inet_csk_listen_start);
605
606 /*
607  *      This routine closes sockets which have been at least partially
608  *      opened, but not yet accepted.
609  */
610 void inet_csk_listen_stop(struct sock *sk)
611 {
612         struct inet_connection_sock *icsk = inet_csk(sk);
613         struct request_sock *acc_req;
614         struct request_sock *req;
615
616         inet_csk_delete_keepalive_timer(sk);
617
618         /* make all the listen_opt local to us */
619         acc_req = reqsk_queue_yank_acceptq(&icsk->icsk_accept_queue);
620
621         /* Following specs, it would be better either to send FIN
622          * (and enter FIN-WAIT-1, it is normal close)
623          * or to send active reset (abort).
624          * Certainly, it is pretty dangerous while synflood, but it is
625          * bad justification for our negligence 8)
626          * To be honest, we are not able to make either
627          * of the variants now.                 --ANK
628          */
629         reqsk_queue_destroy(&icsk->icsk_accept_queue);
630
631         while ((req = acc_req) != NULL) {
632                 struct sock *child = req->sk;
633
634                 acc_req = req->dl_next;
635
636                 local_bh_disable();
637                 bh_lock_sock(child);
638                 WARN_ON(sock_owned_by_user(child));
639                 sock_hold(child);
640
641                 sk->sk_prot->disconnect(child, O_NONBLOCK);
642
643                 sock_orphan(child);
644
645                 percpu_counter_inc(sk->sk_prot->orphan_count);
646
647                 inet_csk_destroy_sock(child);
648
649                 bh_unlock_sock(child);
650                 local_bh_enable();
651                 sock_put(child);
652
653                 sk_acceptq_removed(sk);
654                 __reqsk_free(req);
655         }
656         WARN_ON(sk->sk_ack_backlog);
657 }
658
659 EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
660
661 void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
662 {
663         struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
664         const struct inet_sock *inet = inet_sk(sk);
665
666         sin->sin_family         = AF_INET;
667         sin->sin_addr.s_addr    = inet->daddr;
668         sin->sin_port           = inet->dport;
669 }
670
671 EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
672
673 #ifdef CONFIG_COMPAT
674 int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
675                                char __user *optval, int __user *optlen)
676 {
677         const struct inet_connection_sock *icsk = inet_csk(sk);
678
679         if (icsk->icsk_af_ops->compat_getsockopt != NULL)
680                 return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
681                                                             optval, optlen);
682         return icsk->icsk_af_ops->getsockopt(sk, level, optname,
683                                              optval, optlen);
684 }
685
686 EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt);
687
688 int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
689                                char __user *optval, int optlen)
690 {
691         const struct inet_connection_sock *icsk = inet_csk(sk);
692
693         if (icsk->icsk_af_ops->compat_setsockopt != NULL)
694                 return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
695                                                             optval, optlen);
696         return icsk->icsk_af_ops->setsockopt(sk, level, optname,
697                                              optval, optlen);
698 }
699
700 EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt);
701 #endif