2 * net/tipc/port.c: TIPC port code
4 * Copyright (c) 1992-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
45 #include "name_table.h"
50 /* Connection management: */
51 #define PROBING_INTERVAL 3600000 /* [ms] => 1 h */
55 #define MAX_REJECT_SIZE 1024
57 static struct sk_buff *msg_queue_head = NULL;
58 static struct sk_buff *msg_queue_tail = NULL;
60 DEFINE_SPINLOCK(tipc_port_list_lock);
61 static DEFINE_SPINLOCK(queue_lock);
63 static LIST_HEAD(ports);
64 static void port_handle_node_down(unsigned long ref);
65 static struct sk_buff* port_build_self_abort_msg(struct port *,u32 err);
66 static struct sk_buff* port_build_peer_abort_msg(struct port *,u32 err);
67 static void port_timeout(unsigned long ref);
70 static u32 port_peernode(struct port *p_ptr)
72 return msg_destnode(&p_ptr->publ.phdr);
75 static u32 port_peerport(struct port *p_ptr)
77 return msg_destport(&p_ptr->publ.phdr);
80 static u32 port_out_seqno(struct port *p_ptr)
82 return msg_transp_seqno(&p_ptr->publ.phdr);
85 static void port_incr_out_seqno(struct port *p_ptr)
87 struct tipc_msg *m = &p_ptr->publ.phdr;
89 if (likely(!msg_routed(m)))
91 msg_set_transp_seqno(m, (msg_transp_seqno(m) + 1));
95 * tipc_multicast - send a multicast message to local and remote destinations
98 int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain,
99 u32 num_sect, struct iovec const *msg_sect)
101 struct tipc_msg *hdr;
103 struct sk_buff *ibuf = NULL;
104 struct port_list dports = {0, NULL, };
105 struct port *oport = tipc_port_deref(ref);
109 if (unlikely(!oport))
112 /* Create multicast message */
114 hdr = &oport->publ.phdr;
115 msg_set_type(hdr, TIPC_MCAST_MSG);
116 msg_set_nametype(hdr, seq->type);
117 msg_set_namelower(hdr, seq->lower);
118 msg_set_nameupper(hdr, seq->upper);
119 msg_set_hdr_sz(hdr, MCAST_H_SIZE);
120 res = msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE,
121 !oport->user_port, &buf);
125 /* Figure out where to send multicast message */
127 ext_targets = tipc_nametbl_mc_translate(seq->type, seq->lower, seq->upper,
128 TIPC_NODE_SCOPE, &dports);
130 /* Send message to destinations (duplicate it only if necessary) */
133 if (dports.count != 0) {
134 ibuf = skb_copy(buf, GFP_ATOMIC);
136 tipc_port_list_free(&dports);
141 res = tipc_bclink_send_msg(buf);
142 if ((res < 0) && (dports.count != 0)) {
151 tipc_port_recv_mcast(ibuf, &dports);
153 tipc_port_list_free(&dports);
159 * tipc_port_recv_mcast - deliver multicast message to all destination ports
161 * If there is no port list, perform a lookup to create one
164 void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
166 struct tipc_msg* msg;
167 struct port_list dports = {0, NULL, };
168 struct port_list *item = dp;
173 /* Create destination port list, if one wasn't supplied */
176 tipc_nametbl_mc_translate(msg_nametype(msg),
184 /* Deliver a copy of message to each destination port */
186 if (dp->count != 0) {
187 if (dp->count == 1) {
188 msg_set_destport(msg, dp->ports[0]);
189 tipc_port_recv_msg(buf);
190 tipc_port_list_free(dp);
193 for (; cnt < dp->count; cnt++) {
194 int index = cnt % PLSIZE;
195 struct sk_buff *b = skb_clone(buf, GFP_ATOMIC);
198 warn("Unable to deliver multicast message(s)\n");
199 msg_dbg(msg, "LOST:");
202 if ((index == 0) && (cnt != 0)) {
205 msg_set_destport(buf_msg(b),item->ports[index]);
206 tipc_port_recv_msg(b);
211 tipc_port_list_free(dp);
215 * tipc_createport_raw - create a native TIPC port
217 * Returns local port reference
220 u32 tipc_createport_raw(void *usr_handle,
221 u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
222 void (*wakeup)(struct tipc_port *),
223 const u32 importance)
226 struct tipc_msg *msg;
229 p_ptr = kzalloc(sizeof(*p_ptr), GFP_ATOMIC);
231 warn("Port creation failed, no memory\n");
234 ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock);
236 warn("Port creation failed, reference table exhausted\n");
242 p_ptr->publ.ref = ref;
243 msg = &p_ptr->publ.phdr;
244 msg_init(msg, DATA_LOW, TIPC_NAMED_MSG, TIPC_OK, LONG_H_SIZE, 0);
245 msg_set_orignode(msg, tipc_own_addr);
246 msg_set_prevnode(msg, tipc_own_addr);
247 msg_set_origport(msg, ref);
248 msg_set_importance(msg,importance);
249 p_ptr->last_in_seqno = 41;
251 p_ptr->publ.usr_handle = usr_handle;
252 INIT_LIST_HEAD(&p_ptr->wait_list);
253 INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list);
254 p_ptr->congested_link = NULL;
255 p_ptr->max_pkt = MAX_PKT_DEFAULT;
256 p_ptr->dispatcher = dispatcher;
257 p_ptr->wakeup = wakeup;
258 p_ptr->user_port = NULL;
259 k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref);
260 spin_lock_bh(&tipc_port_list_lock);
261 INIT_LIST_HEAD(&p_ptr->publications);
262 INIT_LIST_HEAD(&p_ptr->port_list);
263 list_add_tail(&p_ptr->port_list, &ports);
264 spin_unlock_bh(&tipc_port_list_lock);
265 tipc_port_unlock(p_ptr);
269 int tipc_deleteport(u32 ref)
272 struct sk_buff *buf = NULL;
274 tipc_withdraw(ref, 0, NULL);
275 p_ptr = tipc_port_lock(ref);
279 tipc_ref_discard(ref);
280 tipc_port_unlock(p_ptr);
282 k_cancel_timer(&p_ptr->timer);
283 if (p_ptr->publ.connected) {
284 buf = port_build_peer_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
285 tipc_nodesub_unsubscribe(&p_ptr->subscription);
287 if (p_ptr->user_port) {
288 tipc_reg_remove_port(p_ptr->user_port);
289 kfree(p_ptr->user_port);
292 spin_lock_bh(&tipc_port_list_lock);
293 list_del(&p_ptr->port_list);
294 list_del(&p_ptr->wait_list);
295 spin_unlock_bh(&tipc_port_list_lock);
296 k_term_timer(&p_ptr->timer);
298 dbg("Deleted port %u\n", ref);
299 tipc_net_route_msg(buf);
304 * tipc_get_port() - return port associated with 'ref'
306 * Note: Port is not locked.
309 struct tipc_port *tipc_get_port(const u32 ref)
311 return (struct tipc_port *)tipc_ref_deref(ref);
315 * tipc_get_handle - return user handle associated to port 'ref'
318 void *tipc_get_handle(const u32 ref)
323 p_ptr = tipc_port_lock(ref);
326 handle = p_ptr->publ.usr_handle;
327 tipc_port_unlock(p_ptr);
331 static int port_unreliable(struct port *p_ptr)
333 return msg_src_droppable(&p_ptr->publ.phdr);
336 int tipc_portunreliable(u32 ref, unsigned int *isunreliable)
340 p_ptr = tipc_port_lock(ref);
343 *isunreliable = port_unreliable(p_ptr);
344 spin_unlock_bh(p_ptr->publ.lock);
348 int tipc_set_portunreliable(u32 ref, unsigned int isunreliable)
352 p_ptr = tipc_port_lock(ref);
355 msg_set_src_droppable(&p_ptr->publ.phdr, (isunreliable != 0));
356 tipc_port_unlock(p_ptr);
360 static int port_unreturnable(struct port *p_ptr)
362 return msg_dest_droppable(&p_ptr->publ.phdr);
365 int tipc_portunreturnable(u32 ref, unsigned int *isunrejectable)
369 p_ptr = tipc_port_lock(ref);
372 *isunrejectable = port_unreturnable(p_ptr);
373 spin_unlock_bh(p_ptr->publ.lock);
377 int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable)
381 p_ptr = tipc_port_lock(ref);
384 msg_set_dest_droppable(&p_ptr->publ.phdr, (isunrejectable != 0));
385 tipc_port_unlock(p_ptr);
390 * port_build_proto_msg(): build a port level protocol
391 * or a connection abortion message. Called with
394 static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode,
395 u32 origport, u32 orignode,
396 u32 usr, u32 type, u32 err,
400 struct tipc_msg *msg;
402 buf = buf_acquire(LONG_H_SIZE);
405 msg_init(msg, usr, type, err, LONG_H_SIZE, destnode);
406 msg_set_destport(msg, destport);
407 msg_set_origport(msg, origport);
408 msg_set_destnode(msg, destnode);
409 msg_set_orignode(msg, orignode);
410 msg_set_transp_seqno(msg, seqno);
411 msg_set_msgcnt(msg, ack);
412 msg_dbg(msg, "PORT>SEND>:");
417 int tipc_set_msg_option(struct tipc_port *tp_ptr, const char *opt, const u32 sz)
419 msg_expand(&tp_ptr->phdr, msg_destnode(&tp_ptr->phdr));
420 msg_set_options(&tp_ptr->phdr, opt, sz);
424 int tipc_reject_msg(struct sk_buff *buf, u32 err)
426 struct tipc_msg *msg = buf_msg(buf);
427 struct sk_buff *rbuf;
428 struct tipc_msg *rmsg;
430 u32 imp = msg_importance(msg);
431 u32 data_sz = msg_data_sz(msg);
433 if (data_sz > MAX_REJECT_SIZE)
434 data_sz = MAX_REJECT_SIZE;
435 if (msg_connected(msg) && (imp < TIPC_CRITICAL_IMPORTANCE))
437 msg_dbg(msg, "port->rej: ");
439 /* discard rejected message if it shouldn't be returned to sender */
440 if (msg_errcode(msg) || msg_dest_droppable(msg)) {
445 /* construct rejected message */
447 hdr_sz = MCAST_H_SIZE;
449 hdr_sz = LONG_H_SIZE;
450 rbuf = buf_acquire(data_sz + hdr_sz);
455 rmsg = buf_msg(rbuf);
456 msg_init(rmsg, imp, msg_type(msg), err, hdr_sz, msg_orignode(msg));
457 msg_set_destport(rmsg, msg_origport(msg));
458 msg_set_prevnode(rmsg, tipc_own_addr);
459 msg_set_origport(rmsg, msg_destport(msg));
461 msg_set_orignode(rmsg, tipc_own_addr);
463 msg_set_orignode(rmsg, msg_destnode(msg));
464 msg_set_size(rmsg, data_sz + hdr_sz);
465 msg_set_nametype(rmsg, msg_nametype(msg));
466 msg_set_nameinst(rmsg, msg_nameinst(msg));
467 memcpy(rbuf->data + hdr_sz, msg_data(msg), data_sz);
469 /* send self-abort message when rejecting on a connected port */
470 if (msg_connected(msg)) {
471 struct sk_buff *abuf = NULL;
472 struct port *p_ptr = tipc_port_lock(msg_destport(msg));
475 if (p_ptr->publ.connected)
476 abuf = port_build_self_abort_msg(p_ptr, err);
477 tipc_port_unlock(p_ptr);
479 tipc_net_route_msg(abuf);
482 /* send rejected message */
484 tipc_net_route_msg(rbuf);
488 int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
489 struct iovec const *msg_sect, u32 num_sect,
495 res = msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE,
496 !p_ptr->user_port, &buf);
500 return tipc_reject_msg(buf, err);
503 static void port_timeout(unsigned long ref)
505 struct port *p_ptr = tipc_port_lock(ref);
506 struct sk_buff *buf = NULL;
508 if (!p_ptr || !p_ptr->publ.connected)
511 /* Last probe answered ? */
512 if (p_ptr->probing_state == PROBING) {
513 buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
515 buf = port_build_proto_msg(port_peerport(p_ptr),
516 port_peernode(p_ptr),
522 port_out_seqno(p_ptr),
524 port_incr_out_seqno(p_ptr);
525 p_ptr->probing_state = PROBING;
526 k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
528 tipc_port_unlock(p_ptr);
529 tipc_net_route_msg(buf);
533 static void port_handle_node_down(unsigned long ref)
535 struct port *p_ptr = tipc_port_lock(ref);
536 struct sk_buff* buf = NULL;
540 buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_NODE);
541 tipc_port_unlock(p_ptr);
542 tipc_net_route_msg(buf);
546 static struct sk_buff *port_build_self_abort_msg(struct port *p_ptr, u32 err)
548 u32 imp = msg_importance(&p_ptr->publ.phdr);
550 if (!p_ptr->publ.connected)
552 if (imp < TIPC_CRITICAL_IMPORTANCE)
554 return port_build_proto_msg(p_ptr->publ.ref,
556 port_peerport(p_ptr),
557 port_peernode(p_ptr),
561 p_ptr->last_in_seqno + 1,
566 static struct sk_buff *port_build_peer_abort_msg(struct port *p_ptr, u32 err)
568 u32 imp = msg_importance(&p_ptr->publ.phdr);
570 if (!p_ptr->publ.connected)
572 if (imp < TIPC_CRITICAL_IMPORTANCE)
574 return port_build_proto_msg(port_peerport(p_ptr),
575 port_peernode(p_ptr),
581 port_out_seqno(p_ptr),
585 void tipc_port_recv_proto_msg(struct sk_buff *buf)
587 struct tipc_msg *msg = buf_msg(buf);
588 struct port *p_ptr = tipc_port_lock(msg_destport(msg));
590 struct sk_buff *r_buf = NULL;
591 struct sk_buff *abort_buf = NULL;
593 msg_dbg(msg, "PORT<RECV<:");
596 err = TIPC_ERR_NO_PORT;
597 } else if (p_ptr->publ.connected) {
598 if (port_peernode(p_ptr) != msg_orignode(msg))
599 err = TIPC_ERR_NO_PORT;
600 if (port_peerport(p_ptr) != msg_origport(msg))
601 err = TIPC_ERR_NO_PORT;
602 if (!err && msg_routed(msg)) {
603 u32 seqno = msg_transp_seqno(msg);
604 u32 myno = ++p_ptr->last_in_seqno;
606 err = TIPC_ERR_NO_PORT;
607 abort_buf = port_build_self_abort_msg(p_ptr, err);
610 if (msg_type(msg) == CONN_ACK) {
611 int wakeup = tipc_port_congested(p_ptr) &&
612 p_ptr->publ.congested &&
614 p_ptr->acked += msg_msgcnt(msg);
615 if (tipc_port_congested(p_ptr))
617 p_ptr->publ.congested = 0;
620 p_ptr->wakeup(&p_ptr->publ);
623 } else if (p_ptr->publ.published) {
624 err = TIPC_ERR_NO_PORT;
627 r_buf = port_build_proto_msg(msg_origport(msg),
640 if (msg_type(msg) == CONN_PROBE) {
641 r_buf = port_build_proto_msg(msg_origport(msg),
648 port_out_seqno(p_ptr),
651 p_ptr->probing_state = CONFIRMED;
652 port_incr_out_seqno(p_ptr);
655 tipc_port_unlock(p_ptr);
656 tipc_net_route_msg(r_buf);
657 tipc_net_route_msg(abort_buf);
661 static void port_print(struct port *p_ptr, struct print_buf *buf, int full_id)
663 struct publication *publ;
666 tipc_printf(buf, "<%u.%u.%u:%u>:",
667 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
668 tipc_node(tipc_own_addr), p_ptr->publ.ref);
670 tipc_printf(buf, "%-10u:", p_ptr->publ.ref);
672 if (p_ptr->publ.connected) {
673 u32 dport = port_peerport(p_ptr);
674 u32 destnode = port_peernode(p_ptr);
676 tipc_printf(buf, " connected to <%u.%u.%u:%u>",
677 tipc_zone(destnode), tipc_cluster(destnode),
678 tipc_node(destnode), dport);
679 if (p_ptr->publ.conn_type != 0)
680 tipc_printf(buf, " via {%u,%u}",
681 p_ptr->publ.conn_type,
682 p_ptr->publ.conn_instance);
684 else if (p_ptr->publ.published) {
685 tipc_printf(buf, " bound to");
686 list_for_each_entry(publ, &p_ptr->publications, pport_list) {
687 if (publ->lower == publ->upper)
688 tipc_printf(buf, " {%u,%u}", publ->type,
691 tipc_printf(buf, " {%u,%u,%u}", publ->type,
692 publ->lower, publ->upper);
695 tipc_printf(buf, "\n");
698 #define MAX_PORT_QUERY 32768
700 struct sk_buff *tipc_port_get_ports(void)
703 struct tlv_desc *rep_tlv;
708 buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_PORT_QUERY));
711 rep_tlv = (struct tlv_desc *)buf->data;
713 tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_QUERY);
714 spin_lock_bh(&tipc_port_list_lock);
715 list_for_each_entry(p_ptr, &ports, port_list) {
716 spin_lock_bh(p_ptr->publ.lock);
717 port_print(p_ptr, &pb, 0);
718 spin_unlock_bh(p_ptr->publ.lock);
720 spin_unlock_bh(&tipc_port_list_lock);
721 str_len = tipc_printbuf_validate(&pb);
723 skb_put(buf, TLV_SPACE(str_len));
724 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
731 #define MAX_PORT_STATS 2000
733 struct sk_buff *port_show_stats(const void *req_tlv_area, int req_tlv_space)
738 struct tlv_desc *rep_tlv;
742 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_PORT_REF))
743 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
745 ref = *(u32 *)TLV_DATA(req_tlv_area);
748 p_ptr = tipc_port_lock(ref);
750 return cfg_reply_error_string("port not found");
752 buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_PORT_STATS));
754 tipc_port_unlock(p_ptr);
757 rep_tlv = (struct tlv_desc *)buf->data;
759 tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_STATS);
760 port_print(p_ptr, &pb, 1);
761 /* NEED TO FILL IN ADDITIONAL PORT STATISTICS HERE */
762 tipc_port_unlock(p_ptr);
763 str_len = tipc_printbuf_validate(&pb);
765 skb_put(buf, TLV_SPACE(str_len));
766 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
773 void tipc_port_reinit(void)
776 struct tipc_msg *msg;
778 spin_lock_bh(&tipc_port_list_lock);
779 list_for_each_entry(p_ptr, &ports, port_list) {
780 msg = &p_ptr->publ.phdr;
781 if (msg_orignode(msg) == tipc_own_addr)
783 msg_set_orignode(msg, tipc_own_addr);
785 spin_unlock_bh(&tipc_port_list_lock);
790 * port_dispatcher_sigh(): Signal handler for messages destinated
791 * to the tipc_port interface.
794 static void port_dispatcher_sigh(void *dummy)
798 spin_lock_bh(&queue_lock);
799 buf = msg_queue_head;
800 msg_queue_head = NULL;
801 spin_unlock_bh(&queue_lock);
805 struct user_port *up_ptr;
806 struct tipc_portid orig;
807 struct tipc_name_seq dseq;
813 struct sk_buff *next = buf->next;
814 struct tipc_msg *msg = buf_msg(buf);
815 u32 dref = msg_destport(msg);
817 message_type = msg_type(msg);
818 if (message_type > TIPC_DIRECT_MSG)
819 goto reject; /* Unsupported message type */
821 p_ptr = tipc_port_lock(dref);
823 goto reject; /* Port deleted while msg in queue */
825 orig.ref = msg_origport(msg);
826 orig.node = msg_orignode(msg);
827 up_ptr = p_ptr->user_port;
828 usr_handle = up_ptr->usr_handle;
829 connected = p_ptr->publ.connected;
830 published = p_ptr->publ.published;
832 if (unlikely(msg_errcode(msg)))
835 switch (message_type) {
838 tipc_conn_msg_event cb = up_ptr->conn_msg_cb;
839 u32 peer_port = port_peerport(p_ptr);
840 u32 peer_node = port_peernode(p_ptr);
842 spin_unlock_bh(p_ptr->publ.lock);
843 if (unlikely(!connected)) {
844 if (unlikely(published))
846 tipc_connect2port(dref,&orig);
848 if (unlikely(msg_origport(msg) != peer_port))
850 if (unlikely(msg_orignode(msg) != peer_node))
854 if (unlikely(++p_ptr->publ.conn_unacked >=
855 TIPC_FLOW_CONTROL_WIN))
856 tipc_acknowledge(dref,
857 p_ptr->publ.conn_unacked);
858 skb_pull(buf, msg_hdr_sz(msg));
859 cb(usr_handle, dref, &buf, msg_data(msg),
863 case TIPC_DIRECT_MSG:{
864 tipc_msg_event cb = up_ptr->msg_cb;
866 spin_unlock_bh(p_ptr->publ.lock);
867 if (unlikely(connected))
871 skb_pull(buf, msg_hdr_sz(msg));
872 cb(usr_handle, dref, &buf, msg_data(msg),
873 msg_data_sz(msg), msg_importance(msg),
878 case TIPC_NAMED_MSG:{
879 tipc_named_msg_event cb = up_ptr->named_msg_cb;
881 spin_unlock_bh(p_ptr->publ.lock);
882 if (unlikely(connected))
886 if (unlikely(!published))
888 dseq.type = msg_nametype(msg);
889 dseq.lower = msg_nameinst(msg);
890 dseq.upper = (message_type == TIPC_NAMED_MSG)
891 ? dseq.lower : msg_nameupper(msg);
892 skb_pull(buf, msg_hdr_sz(msg));
893 cb(usr_handle, dref, &buf, msg_data(msg),
894 msg_data_sz(msg), msg_importance(msg),
904 switch (message_type) {
907 tipc_conn_shutdown_event cb =
909 u32 peer_port = port_peerport(p_ptr);
910 u32 peer_node = port_peernode(p_ptr);
912 spin_unlock_bh(p_ptr->publ.lock);
913 if (!connected || !cb)
915 if (msg_origport(msg) != peer_port)
917 if (msg_orignode(msg) != peer_node)
919 tipc_disconnect(dref);
920 skb_pull(buf, msg_hdr_sz(msg));
921 cb(usr_handle, dref, &buf, msg_data(msg),
922 msg_data_sz(msg), msg_errcode(msg));
925 case TIPC_DIRECT_MSG:{
926 tipc_msg_err_event cb = up_ptr->err_cb;
928 spin_unlock_bh(p_ptr->publ.lock);
929 if (connected || !cb)
931 skb_pull(buf, msg_hdr_sz(msg));
932 cb(usr_handle, dref, &buf, msg_data(msg),
933 msg_data_sz(msg), msg_errcode(msg), &orig);
937 case TIPC_NAMED_MSG:{
938 tipc_named_msg_err_event cb =
939 up_ptr->named_err_cb;
941 spin_unlock_bh(p_ptr->publ.lock);
942 if (connected || !cb)
944 dseq.type = msg_nametype(msg);
945 dseq.lower = msg_nameinst(msg);
946 dseq.upper = (message_type == TIPC_NAMED_MSG)
947 ? dseq.lower : msg_nameupper(msg);
948 skb_pull(buf, msg_hdr_sz(msg));
949 cb(usr_handle, dref, &buf, msg_data(msg),
950 msg_data_sz(msg), msg_errcode(msg), &dseq);
959 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
965 * port_dispatcher(): Dispatcher for messages destinated
966 * to the tipc_port interface. Called with port locked.
969 static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf)
972 spin_lock_bh(&queue_lock);
973 if (msg_queue_head) {
974 msg_queue_tail->next = buf;
975 msg_queue_tail = buf;
977 msg_queue_tail = msg_queue_head = buf;
978 tipc_k_signal((Handler)port_dispatcher_sigh, 0);
980 spin_unlock_bh(&queue_lock);
985 * Wake up port after congestion: Called with port locked,
989 static void port_wakeup_sh(unsigned long ref)
992 struct user_port *up_ptr;
993 tipc_continue_event cb = NULL;
996 p_ptr = tipc_port_lock(ref);
998 up_ptr = p_ptr->user_port;
1000 cb = up_ptr->continue_event_cb;
1001 uh = up_ptr->usr_handle;
1003 tipc_port_unlock(p_ptr);
1010 static void port_wakeup(struct tipc_port *p_ptr)
1012 tipc_k_signal((Handler)port_wakeup_sh, p_ptr->ref);
1015 void tipc_acknowledge(u32 ref, u32 ack)
1018 struct sk_buff *buf = NULL;
1020 p_ptr = tipc_port_lock(ref);
1023 if (p_ptr->publ.connected) {
1024 p_ptr->publ.conn_unacked -= ack;
1025 buf = port_build_proto_msg(port_peerport(p_ptr),
1026 port_peernode(p_ptr),
1032 port_out_seqno(p_ptr),
1035 tipc_port_unlock(p_ptr);
1036 tipc_net_route_msg(buf);
1040 * tipc_createport(): user level call. Will add port to
1041 * registry if non-zero user_ref.
1044 int tipc_createport(u32 user_ref,
1046 unsigned int importance,
1047 tipc_msg_err_event error_cb,
1048 tipc_named_msg_err_event named_error_cb,
1049 tipc_conn_shutdown_event conn_error_cb,
1050 tipc_msg_event msg_cb,
1051 tipc_named_msg_event named_msg_cb,
1052 tipc_conn_msg_event conn_msg_cb,
1053 tipc_continue_event continue_event_cb,/* May be zero */
1056 struct user_port *up_ptr;
1060 up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
1062 warn("Port creation failed, no memory\n");
1065 ref = tipc_createport_raw(NULL, port_dispatcher, port_wakeup, importance);
1066 p_ptr = tipc_port_lock(ref);
1072 p_ptr->user_port = up_ptr;
1073 up_ptr->user_ref = user_ref;
1074 up_ptr->usr_handle = usr_handle;
1075 up_ptr->ref = p_ptr->publ.ref;
1076 up_ptr->err_cb = error_cb;
1077 up_ptr->named_err_cb = named_error_cb;
1078 up_ptr->conn_err_cb = conn_error_cb;
1079 up_ptr->msg_cb = msg_cb;
1080 up_ptr->named_msg_cb = named_msg_cb;
1081 up_ptr->conn_msg_cb = conn_msg_cb;
1082 up_ptr->continue_event_cb = continue_event_cb;
1083 INIT_LIST_HEAD(&up_ptr->uport_list);
1084 tipc_reg_add_port(up_ptr);
1085 *portref = p_ptr->publ.ref;
1086 dbg(" tipc_createport: %x with ref %u\n", p_ptr, p_ptr->publ.ref);
1087 tipc_port_unlock(p_ptr);
1091 int tipc_ownidentity(u32 ref, struct tipc_portid *id)
1094 id->node = tipc_own_addr;
1098 int tipc_portimportance(u32 ref, unsigned int *importance)
1102 p_ptr = tipc_port_lock(ref);
1105 *importance = (unsigned int)msg_importance(&p_ptr->publ.phdr);
1106 spin_unlock_bh(p_ptr->publ.lock);
1110 int tipc_set_portimportance(u32 ref, unsigned int imp)
1114 if (imp > TIPC_CRITICAL_IMPORTANCE)
1117 p_ptr = tipc_port_lock(ref);
1120 msg_set_importance(&p_ptr->publ.phdr, (u32)imp);
1121 spin_unlock_bh(p_ptr->publ.lock);
1126 int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1129 struct publication *publ;
1133 p_ptr = tipc_port_lock(ref);
1134 dbg("tipc_publ %u, p_ptr = %x, conn = %x, scope = %x, "
1135 "lower = %u, upper = %u\n",
1136 ref, p_ptr, p_ptr->publ.connected, scope, seq->lower, seq->upper);
1139 if (p_ptr->publ.connected)
1141 if (seq->lower > seq->upper)
1143 if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE))
1145 key = ref + p_ptr->pub_count + 1;
1150 publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper,
1151 scope, p_ptr->publ.ref, key);
1153 list_add(&publ->pport_list, &p_ptr->publications);
1155 p_ptr->publ.published = 1;
1159 tipc_port_unlock(p_ptr);
1163 int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1166 struct publication *publ;
1167 struct publication *tpubl;
1170 p_ptr = tipc_port_lock(ref);
1174 list_for_each_entry_safe(publ, tpubl,
1175 &p_ptr->publications, pport_list) {
1176 tipc_nametbl_withdraw(publ->type, publ->lower,
1177 publ->ref, publ->key);
1181 list_for_each_entry_safe(publ, tpubl,
1182 &p_ptr->publications, pport_list) {
1183 if (publ->scope != scope)
1185 if (publ->type != seq->type)
1187 if (publ->lower != seq->lower)
1189 if (publ->upper != seq->upper)
1191 tipc_nametbl_withdraw(publ->type, publ->lower,
1192 publ->ref, publ->key);
1197 if (list_empty(&p_ptr->publications))
1198 p_ptr->publ.published = 0;
1199 tipc_port_unlock(p_ptr);
1203 int tipc_connect2port(u32 ref, struct tipc_portid const *peer)
1206 struct tipc_msg *msg;
1209 p_ptr = tipc_port_lock(ref);
1212 if (p_ptr->publ.published || p_ptr->publ.connected)
1217 msg = &p_ptr->publ.phdr;
1218 msg_set_destnode(msg, peer->node);
1219 msg_set_destport(msg, peer->ref);
1220 msg_set_orignode(msg, tipc_own_addr);
1221 msg_set_origport(msg, p_ptr->publ.ref);
1222 msg_set_transp_seqno(msg, 42);
1223 msg_set_type(msg, TIPC_CONN_MSG);
1224 if (!may_route(peer->node))
1225 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1227 msg_set_hdr_sz(msg, LONG_H_SIZE);
1229 p_ptr->probing_interval = PROBING_INTERVAL;
1230 p_ptr->probing_state = CONFIRMED;
1231 p_ptr->publ.connected = 1;
1232 k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
1234 tipc_nodesub_subscribe(&p_ptr->subscription,peer->node,
1235 (void *)(unsigned long)ref,
1236 (net_ev_handler)port_handle_node_down);
1239 tipc_port_unlock(p_ptr);
1240 p_ptr->max_pkt = tipc_link_get_max_pkt(peer->node, ref);
1245 * tipc_disconnect(): Disconnect port form peer.
1246 * This is a node local operation.
1249 int tipc_disconnect(u32 ref)
1252 int res = -ENOTCONN;
1254 p_ptr = tipc_port_lock(ref);
1257 if (p_ptr->publ.connected) {
1258 p_ptr->publ.connected = 0;
1259 /* let timer expire on it's own to avoid deadlock! */
1260 tipc_nodesub_unsubscribe(&p_ptr->subscription);
1263 tipc_port_unlock(p_ptr);
1268 * tipc_shutdown(): Send a SHUTDOWN msg to peer and disconnect
1270 int tipc_shutdown(u32 ref)
1273 struct sk_buff *buf = NULL;
1275 p_ptr = tipc_port_lock(ref);
1279 if (p_ptr->publ.connected) {
1280 u32 imp = msg_importance(&p_ptr->publ.phdr);
1281 if (imp < TIPC_CRITICAL_IMPORTANCE)
1283 buf = port_build_proto_msg(port_peerport(p_ptr),
1284 port_peernode(p_ptr),
1290 port_out_seqno(p_ptr),
1293 tipc_port_unlock(p_ptr);
1294 tipc_net_route_msg(buf);
1295 return tipc_disconnect(ref);
1298 int tipc_isconnected(u32 ref, int *isconnected)
1302 p_ptr = tipc_port_lock(ref);
1305 *isconnected = p_ptr->publ.connected;
1306 tipc_port_unlock(p_ptr);
1310 int tipc_peer(u32 ref, struct tipc_portid *peer)
1315 p_ptr = tipc_port_lock(ref);
1318 if (p_ptr->publ.connected) {
1319 peer->ref = port_peerport(p_ptr);
1320 peer->node = port_peernode(p_ptr);
1324 tipc_port_unlock(p_ptr);
1328 int tipc_ref_valid(u32 ref)
1330 /* Works irrespective of type */
1331 return !!tipc_ref_deref(ref);
1336 * tipc_port_recv_sections(): Concatenate and deliver sectioned
1337 * message for this node.
1340 int tipc_port_recv_sections(struct port *sender, unsigned int num_sect,
1341 struct iovec const *msg_sect)
1343 struct sk_buff *buf;
1346 res = msg_build(&sender->publ.phdr, msg_sect, num_sect,
1347 MAX_MSG_SIZE, !sender->user_port, &buf);
1349 tipc_port_recv_msg(buf);
1354 * tipc_send - send message sections on connection
1357 int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
1363 p_ptr = tipc_port_deref(ref);
1364 if (!p_ptr || !p_ptr->publ.connected)
1367 p_ptr->publ.congested = 1;
1368 if (!tipc_port_congested(p_ptr)) {
1369 destnode = port_peernode(p_ptr);
1370 if (likely(destnode != tipc_own_addr))
1371 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
1374 res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
1376 if (likely(res != -ELINKCONG)) {
1377 port_incr_out_seqno(p_ptr);
1378 p_ptr->publ.congested = 0;
1383 if (port_unreliable(p_ptr)) {
1384 p_ptr->publ.congested = 0;
1385 /* Just calculate msg length and return */
1386 return msg_calc_data_size(msg_sect, num_sect);
1392 * tipc_send_buf - send message buffer on connection
1395 int tipc_send_buf(u32 ref, struct sk_buff *buf, unsigned int dsz)
1398 struct tipc_msg *msg;
1404 p_ptr = tipc_port_deref(ref);
1405 if (!p_ptr || !p_ptr->publ.connected)
1408 msg = &p_ptr->publ.phdr;
1409 hsz = msg_hdr_sz(msg);
1411 msg_set_size(msg, sz);
1412 if (skb_cow(buf, hsz))
1416 memcpy(buf->data, (unchar *)msg, hsz);
1417 destnode = msg_destnode(msg);
1418 p_ptr->publ.congested = 1;
1419 if (!tipc_port_congested(p_ptr)) {
1420 if (likely(destnode != tipc_own_addr))
1421 res = tipc_send_buf_fast(buf, destnode);
1423 tipc_port_recv_msg(buf);
1426 if (likely(res != -ELINKCONG)) {
1427 port_incr_out_seqno(p_ptr);
1429 p_ptr->publ.congested = 0;
1433 if (port_unreliable(p_ptr)) {
1434 p_ptr->publ.congested = 0;
1441 * tipc_forward2name - forward message sections to port name
1444 int tipc_forward2name(u32 ref,
1445 struct tipc_name const *name,
1448 struct iovec const *msg_sect,
1449 struct tipc_portid const *orig,
1450 unsigned int importance)
1453 struct tipc_msg *msg;
1454 u32 destnode = domain;
1458 p_ptr = tipc_port_deref(ref);
1459 if (!p_ptr || p_ptr->publ.connected)
1462 msg = &p_ptr->publ.phdr;
1463 msg_set_type(msg, TIPC_NAMED_MSG);
1464 msg_set_orignode(msg, orig->node);
1465 msg_set_origport(msg, orig->ref);
1466 msg_set_hdr_sz(msg, LONG_H_SIZE);
1467 msg_set_nametype(msg, name->type);
1468 msg_set_nameinst(msg, name->instance);
1469 msg_set_lookup_scope(msg, addr_scope(domain));
1470 if (importance <= TIPC_CRITICAL_IMPORTANCE)
1471 msg_set_importance(msg,importance);
1472 destport = tipc_nametbl_translate(name->type, name->instance, &destnode);
1473 msg_set_destnode(msg, destnode);
1474 msg_set_destport(msg, destport);
1476 if (likely(destport || destnode)) {
1478 if (likely(destnode == tipc_own_addr))
1479 return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
1480 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
1482 if (likely(res != -ELINKCONG))
1484 if (port_unreliable(p_ptr)) {
1485 /* Just calculate msg length and return */
1486 return msg_calc_data_size(msg_sect, num_sect);
1490 return tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect,
1495 * tipc_send2name - send message sections to port name
1498 int tipc_send2name(u32 ref,
1499 struct tipc_name const *name,
1500 unsigned int domain,
1501 unsigned int num_sect,
1502 struct iovec const *msg_sect)
1504 struct tipc_portid orig;
1507 orig.node = tipc_own_addr;
1508 return tipc_forward2name(ref, name, domain, num_sect, msg_sect, &orig,
1509 TIPC_PORT_IMPORTANCE);
1513 * tipc_forward_buf2name - forward message buffer to port name
1516 int tipc_forward_buf2name(u32 ref,
1517 struct tipc_name const *name,
1519 struct sk_buff *buf,
1521 struct tipc_portid const *orig,
1522 unsigned int importance)
1525 struct tipc_msg *msg;
1526 u32 destnode = domain;
1530 p_ptr = (struct port *)tipc_ref_deref(ref);
1531 if (!p_ptr || p_ptr->publ.connected)
1534 msg = &p_ptr->publ.phdr;
1535 if (importance <= TIPC_CRITICAL_IMPORTANCE)
1536 msg_set_importance(msg, importance);
1537 msg_set_type(msg, TIPC_NAMED_MSG);
1538 msg_set_orignode(msg, orig->node);
1539 msg_set_origport(msg, orig->ref);
1540 msg_set_nametype(msg, name->type);
1541 msg_set_nameinst(msg, name->instance);
1542 msg_set_lookup_scope(msg, addr_scope(domain));
1543 msg_set_hdr_sz(msg, LONG_H_SIZE);
1544 msg_set_size(msg, LONG_H_SIZE + dsz);
1545 destport = tipc_nametbl_translate(name->type, name->instance, &destnode);
1546 msg_set_destnode(msg, destnode);
1547 msg_set_destport(msg, destport);
1548 msg_dbg(msg, "forw2name ==> ");
1549 if (skb_cow(buf, LONG_H_SIZE))
1551 skb_push(buf, LONG_H_SIZE);
1552 memcpy(buf->data, (unchar *)msg, LONG_H_SIZE);
1553 msg_dbg(buf_msg(buf),"PREP:");
1554 if (likely(destport || destnode)) {
1556 if (destnode == tipc_own_addr)
1557 return tipc_port_recv_msg(buf);
1558 res = tipc_send_buf_fast(buf, destnode);
1559 if (likely(res != -ELINKCONG))
1561 if (port_unreliable(p_ptr))
1565 return tipc_reject_msg(buf, TIPC_ERR_NO_NAME);
1569 * tipc_send_buf2name - send message buffer to port name
1572 int tipc_send_buf2name(u32 ref,
1573 struct tipc_name const *dest,
1575 struct sk_buff *buf,
1578 struct tipc_portid orig;
1581 orig.node = tipc_own_addr;
1582 return tipc_forward_buf2name(ref, dest, domain, buf, dsz, &orig,
1583 TIPC_PORT_IMPORTANCE);
1587 * tipc_forward2port - forward message sections to port identity
1590 int tipc_forward2port(u32 ref,
1591 struct tipc_portid const *dest,
1592 unsigned int num_sect,
1593 struct iovec const *msg_sect,
1594 struct tipc_portid const *orig,
1595 unsigned int importance)
1598 struct tipc_msg *msg;
1601 p_ptr = tipc_port_deref(ref);
1602 if (!p_ptr || p_ptr->publ.connected)
1605 msg = &p_ptr->publ.phdr;
1606 msg_set_type(msg, TIPC_DIRECT_MSG);
1607 msg_set_orignode(msg, orig->node);
1608 msg_set_origport(msg, orig->ref);
1609 msg_set_destnode(msg, dest->node);
1610 msg_set_destport(msg, dest->ref);
1611 msg_set_hdr_sz(msg, DIR_MSG_H_SIZE);
1612 if (importance <= TIPC_CRITICAL_IMPORTANCE)
1613 msg_set_importance(msg, importance);
1615 if (dest->node == tipc_own_addr)
1616 return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
1617 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, dest->node);
1618 if (likely(res != -ELINKCONG))
1620 if (port_unreliable(p_ptr)) {
1621 /* Just calculate msg length and return */
1622 return msg_calc_data_size(msg_sect, num_sect);
1628 * tipc_send2port - send message sections to port identity
1631 int tipc_send2port(u32 ref,
1632 struct tipc_portid const *dest,
1633 unsigned int num_sect,
1634 struct iovec const *msg_sect)
1636 struct tipc_portid orig;
1639 orig.node = tipc_own_addr;
1640 return tipc_forward2port(ref, dest, num_sect, msg_sect, &orig,
1641 TIPC_PORT_IMPORTANCE);
1645 * tipc_forward_buf2port - forward message buffer to port identity
1647 int tipc_forward_buf2port(u32 ref,
1648 struct tipc_portid const *dest,
1649 struct sk_buff *buf,
1651 struct tipc_portid const *orig,
1652 unsigned int importance)
1655 struct tipc_msg *msg;
1658 p_ptr = (struct port *)tipc_ref_deref(ref);
1659 if (!p_ptr || p_ptr->publ.connected)
1662 msg = &p_ptr->publ.phdr;
1663 msg_set_type(msg, TIPC_DIRECT_MSG);
1664 msg_set_orignode(msg, orig->node);
1665 msg_set_origport(msg, orig->ref);
1666 msg_set_destnode(msg, dest->node);
1667 msg_set_destport(msg, dest->ref);
1668 msg_set_hdr_sz(msg, DIR_MSG_H_SIZE);
1669 if (importance <= TIPC_CRITICAL_IMPORTANCE)
1670 msg_set_importance(msg, importance);
1671 msg_set_size(msg, DIR_MSG_H_SIZE + dsz);
1672 if (skb_cow(buf, DIR_MSG_H_SIZE))
1675 skb_push(buf, DIR_MSG_H_SIZE);
1676 memcpy(buf->data, (unchar *)msg, DIR_MSG_H_SIZE);
1677 msg_dbg(msg, "buf2port: ");
1679 if (dest->node == tipc_own_addr)
1680 return tipc_port_recv_msg(buf);
1681 res = tipc_send_buf_fast(buf, dest->node);
1682 if (likely(res != -ELINKCONG))
1684 if (port_unreliable(p_ptr))
1690 * tipc_send_buf2port - send message buffer to port identity
1693 int tipc_send_buf2port(u32 ref,
1694 struct tipc_portid const *dest,
1695 struct sk_buff *buf,
1698 struct tipc_portid orig;
1701 orig.node = tipc_own_addr;
1702 return tipc_forward_buf2port(ref, dest, buf, dsz, &orig,
1703 TIPC_PORT_IMPORTANCE);