if (!(r->idiag_states & (TCPF_LISTEN | TCPF_SYN_RECV)))
goto skip_listen_ht;
- inet_listen_lock(hashinfo);
for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
struct sock *sk;
- struct hlist_node *node;
+ struct hlist_nulls_node *node;
+ struct inet_listen_hashbucket *ilb;
num = 0;
- sk_for_each(sk, node, &hashinfo->listening_hash[i]) {
+ ilb = &hashinfo->listening_hash[i];
+ spin_lock_bh(&ilb->lock);
+ sk_nulls_for_each(sk, node, &ilb->head) {
struct inet_sock *inet = inet_sk(sk);
if (num < s_num) {
goto syn_recv;
if (inet_csk_diag_dump(sk, skb, cb) < 0) {
- inet_listen_unlock(hashinfo);
+ spin_unlock_bh(&ilb->lock);
goto done;
}
goto next_listen;
if (inet_diag_dump_reqs(skb, sk, cb) < 0) {
- inet_listen_unlock(hashinfo);
+ spin_unlock_bh(&ilb->lock);
goto done;
}
cb->args[4] = 0;
++num;
}
+ spin_unlock_bh(&ilb->lock);
s_num = 0;
cb->args[3] = 0;
cb->args[4] = 0;
}
- inet_listen_unlock(hashinfo);
skip_listen_ht:
cb->args[0] = 1;
s_i = num = s_num = 0;
for (i = s_i; i < hashinfo->ehash_size; i++) {
struct inet_ehash_bucket *head = &hashinfo->ehash[i];
- rwlock_t *lock = inet_ehash_lockp(hashinfo, i);
+ spinlock_t *lock = inet_ehash_lockp(hashinfo, i);
struct sock *sk;
- struct hlist_node *node;
+ struct hlist_nulls_node *node;
num = 0;
- if (hlist_empty(&head->chain) && hlist_empty(&head->twchain))
+ if (hlist_nulls_empty(&head->chain) &&
+ hlist_nulls_empty(&head->twchain))
continue;
if (i > s_i)
s_num = 0;
- read_lock_bh(lock);
- sk_for_each(sk, node, &head->chain) {
+ spin_lock_bh(lock);
+ sk_nulls_for_each(sk, node, &head->chain) {
struct inet_sock *inet = inet_sk(sk);
if (num < s_num)
r->id.idiag_dport)
goto next_normal;
if (inet_csk_diag_dump(sk, skb, cb) < 0) {
- read_unlock_bh(lock);
+ spin_unlock_bh(lock);
goto done;
}
next_normal:
r->id.idiag_dport)
goto next_dying;
if (inet_twsk_diag_dump(tw, skb, cb) < 0) {
- read_unlock_bh(lock);
+ spin_unlock_bh(lock);
goto done;
}
next_dying:
++num;
}
}
- read_unlock_bh(lock);
+ spin_unlock_bh(lock);
}
done: