2 * IUCV protocol stack for Linux on zSeries
4 * Copyright IBM Corp. 2006, 2009
6 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
7 * Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
9 * Ursula Braun <ursula.braun@de.ibm.com>
12 #define KMSG_COMPONENT "af_iucv"
13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/list.h>
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/skbuff.h>
23 #include <linux/init.h>
24 #include <linux/poll.h>
26 #include <asm/ebcdic.h>
27 #include <asm/cpcmd.h>
28 #include <linux/kmod.h>
30 #include <net/iucv/af_iucv.h>
34 static char iucv_userid[80];
36 static const struct proto_ops iucv_sock_ops;
38 static struct proto iucv_proto = {
41 .obj_size = sizeof(struct iucv_sock),
44 static struct iucv_interface *pr_iucv;
46 /* special AF_IUCV IPRM messages */
47 static const u8 iprm_shutdown[8] =
48 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
50 #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
52 /* macros to set/get socket control buffer at correct offset */
53 #define CB_TAG(skb) ((skb)->cb) /* iucv message tag */
54 #define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag))
55 #define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
56 #define CB_TRGCLS_LEN (TRGCLS_SIZE)
58 #define __iucv_sock_wait(sk, condition, timeo, ret) \
60 DEFINE_WAIT(__wait); \
61 long __timeo = timeo; \
63 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
64 while (!(condition)) { \
69 if (signal_pending(current)) { \
70 ret = sock_intr_errno(__timeo); \
74 __timeo = schedule_timeout(__timeo); \
76 ret = sock_error(sk); \
80 finish_wait(sk_sleep(sk), &__wait); \
83 #define iucv_sock_wait(sk, condition, timeo) \
87 __iucv_sock_wait(sk, condition, timeo, __ret); \
91 static void iucv_sock_kill(struct sock *sk);
92 static void iucv_sock_close(struct sock *sk);
94 static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
95 struct packet_type *pt, struct net_device *orig_dev);
96 static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
97 struct sk_buff *skb, u8 flags);
98 static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);
100 /* Call Back functions */
101 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
102 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
103 static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
104 static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
106 static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
107 static void iucv_callback_shutdown(struct iucv_path *, u8 ipuser[16]);
109 static struct iucv_sock_list iucv_sk_list = {
110 .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
111 .autobind_name = ATOMIC_INIT(0)
114 static struct iucv_handler af_iucv_handler = {
115 .path_pending = iucv_callback_connreq,
116 .path_complete = iucv_callback_connack,
117 .path_severed = iucv_callback_connrej,
118 .message_pending = iucv_callback_rx,
119 .message_complete = iucv_callback_txdone,
120 .path_quiesced = iucv_callback_shutdown,
123 static inline void high_nmcpy(unsigned char *dst, char *src)
128 static inline void low_nmcpy(unsigned char *dst, char *src)
130 memcpy(&dst[8], src, 8);
133 static void iucv_skb_queue_purge(struct sk_buff_head *list)
137 while ((skb = skb_dequeue(list)) != NULL) {
144 static int afiucv_pm_prepare(struct device *dev)
146 #ifdef CONFIG_PM_DEBUG
147 printk(KERN_WARNING "afiucv_pm_prepare\n");
152 static void afiucv_pm_complete(struct device *dev)
154 #ifdef CONFIG_PM_DEBUG
155 printk(KERN_WARNING "afiucv_pm_complete\n");
160 * afiucv_pm_freeze() - Freeze PM callback
161 * @dev: AFIUCV dummy device
163 * Sever all established IUCV communication pathes
165 static int afiucv_pm_freeze(struct device *dev)
167 struct iucv_sock *iucv;
169 struct hlist_node *node;
172 #ifdef CONFIG_PM_DEBUG
173 printk(KERN_WARNING "afiucv_pm_freeze\n");
175 read_lock(&iucv_sk_list.lock);
176 sk_for_each(sk, node, &iucv_sk_list.head) {
178 iucv_skb_queue_purge(&iucv->send_skb_q);
179 skb_queue_purge(&iucv->backlog_skb_q);
180 switch (sk->sk_state) {
186 err = pr_iucv->path_sever(iucv->path, NULL);
187 iucv_path_free(iucv->path);
199 read_unlock(&iucv_sk_list.lock);
204 * afiucv_pm_restore_thaw() - Thaw and restore PM callback
205 * @dev: AFIUCV dummy device
207 * socket clean up after freeze
209 static int afiucv_pm_restore_thaw(struct device *dev)
212 struct hlist_node *node;
214 #ifdef CONFIG_PM_DEBUG
215 printk(KERN_WARNING "afiucv_pm_restore_thaw\n");
217 read_lock(&iucv_sk_list.lock);
218 sk_for_each(sk, node, &iucv_sk_list.head) {
219 switch (sk->sk_state) {
222 sk->sk_state = IUCV_DISCONN;
223 sk->sk_state_change(sk);
235 read_unlock(&iucv_sk_list.lock);
239 static const struct dev_pm_ops afiucv_pm_ops = {
240 .prepare = afiucv_pm_prepare,
241 .complete = afiucv_pm_complete,
242 .freeze = afiucv_pm_freeze,
243 .thaw = afiucv_pm_restore_thaw,
244 .restore = afiucv_pm_restore_thaw,
247 static struct device_driver af_iucv_driver = {
248 .owner = THIS_MODULE,
251 .pm = &afiucv_pm_ops,
254 /* dummy device used as trigger for PM functions */
255 static struct device *af_iucv_dev;
258 * iucv_msg_length() - Returns the length of an iucv message.
259 * @msg: Pointer to struct iucv_message, MUST NOT be NULL
261 * The function returns the length of the specified iucv message @msg of data
262 * stored in a buffer and of data stored in the parameter list (PRMDATA).
264 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
266 * PRMDATA[0..6] socket data (max 7 bytes);
267 * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
269 * The socket data length is computed by subtracting the socket data length
271 * If the socket data len is greater 7, then PRMDATA can be used for special
272 * notifications (see iucv_sock_shutdown); and further,
273 * if the socket data len is > 7, the function returns 8.
275 * Use this function to allocate socket buffers to store iucv message data.
277 static inline size_t iucv_msg_length(struct iucv_message *msg)
281 if (msg->flags & IUCV_IPRMDATA) {
282 datalen = 0xff - msg->rmmsg[7];
283 return (datalen < 8) ? datalen : 8;
289 * iucv_sock_in_state() - check for specific states
290 * @sk: sock structure
291 * @state: first iucv sk state
292 * @state: second iucv sk state
294 * Returns true if the socket in either in the first or second state.
296 static int iucv_sock_in_state(struct sock *sk, int state, int state2)
298 return (sk->sk_state == state || sk->sk_state == state2);
302 * iucv_below_msglim() - function to check if messages can be sent
303 * @sk: sock structure
305 * Returns true if the send queue length is lower than the message limit.
306 * Always returns true if the socket is not connected (no iucv path for
307 * checking the message limit).
309 static inline int iucv_below_msglim(struct sock *sk)
311 struct iucv_sock *iucv = iucv_sk(sk);
313 if (sk->sk_state != IUCV_CONNECTED)
315 if (iucv->transport == AF_IUCV_TRANS_IUCV)
316 return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
318 return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
319 (atomic_read(&iucv->pendings) <= 0));
323 * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
325 static void iucv_sock_wake_msglim(struct sock *sk)
327 struct socket_wq *wq;
330 wq = rcu_dereference(sk->sk_wq);
331 if (wq_has_sleeper(wq))
332 wake_up_interruptible_all(&wq->wait);
333 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
338 * afiucv_hs_send() - send a message through HiperSockets transport
340 static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
341 struct sk_buff *skb, u8 flags)
343 struct net *net = sock_net(sock);
344 struct iucv_sock *iucv = iucv_sk(sock);
345 struct af_iucv_trans_hdr *phs_hdr;
346 struct sk_buff *nskb;
347 int err, confirm_recv = 0;
349 memset(skb->head, 0, ETH_HLEN);
350 phs_hdr = (struct af_iucv_trans_hdr *)skb_push(skb,
351 sizeof(struct af_iucv_trans_hdr));
352 skb_reset_mac_header(skb);
353 skb_reset_network_header(skb);
354 skb_push(skb, ETH_HLEN);
355 skb_reset_mac_header(skb);
356 memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr));
358 phs_hdr->magic = ETH_P_AF_IUCV;
359 phs_hdr->version = 1;
360 phs_hdr->flags = flags;
361 if (flags == AF_IUCV_FLAG_SYN)
362 phs_hdr->window = iucv->msglimit;
363 else if ((flags == AF_IUCV_FLAG_WIN) || !flags) {
364 confirm_recv = atomic_read(&iucv->msg_recv);
365 phs_hdr->window = confirm_recv;
367 phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN;
369 memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
370 memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
371 memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
372 memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
373 ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID));
374 ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName));
375 ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID));
376 ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName));
378 memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
380 skb->dev = dev_get_by_index(net, sock->sk_bound_dev_if);
383 if (!(skb->dev->flags & IFF_UP))
385 if (skb->len > skb->dev->mtu) {
386 if (sock->sk_type == SOCK_SEQPACKET)
389 skb_trim(skb, skb->dev->mtu);
391 skb->protocol = ETH_P_AF_IUCV;
392 skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
393 nskb = skb_clone(skb, GFP_ATOMIC);
396 skb_queue_tail(&iucv->send_skb_q, nskb);
397 err = dev_queue_xmit(skb);
399 skb_unlink(nskb, &iucv->send_skb_q);
403 atomic_sub(confirm_recv, &iucv->msg_recv);
404 WARN_ON(atomic_read(&iucv->msg_recv) < 0);
410 static void iucv_sock_timeout(unsigned long arg)
412 struct sock *sk = (struct sock *)arg;
415 sk->sk_err = ETIMEDOUT;
416 sk->sk_state_change(sk);
423 static void iucv_sock_clear_timer(struct sock *sk)
425 sk_stop_timer(sk, &sk->sk_timer);
428 static struct sock *__iucv_get_sock_by_name(char *nm)
431 struct hlist_node *node;
433 sk_for_each(sk, node, &iucv_sk_list.head)
434 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
440 static void iucv_sock_destruct(struct sock *sk)
442 skb_queue_purge(&sk->sk_receive_queue);
443 skb_queue_purge(&sk->sk_write_queue);
447 static void iucv_sock_cleanup_listen(struct sock *parent)
451 /* Close non-accepted connections */
452 while ((sk = iucv_accept_dequeue(parent, NULL))) {
457 parent->sk_state = IUCV_CLOSED;
460 /* Kill socket (only if zapped and orphaned) */
461 static void iucv_sock_kill(struct sock *sk)
463 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
466 iucv_sock_unlink(&iucv_sk_list, sk);
467 sock_set_flag(sk, SOCK_DEAD);
471 /* Close an IUCV socket */
472 static void iucv_sock_close(struct sock *sk)
474 unsigned char user_data[16];
475 struct iucv_sock *iucv = iucv_sk(sk);
480 iucv_sock_clear_timer(sk);
483 switch (sk->sk_state) {
485 iucv_sock_cleanup_listen(sk);
489 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
491 blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
492 skb = sock_alloc_send_skb(sk, blen, 1, &err);
494 skb_reserve(skb, blen);
495 err = afiucv_hs_send(NULL, sk, skb,
498 sk->sk_state = IUCV_DISCONN;
499 sk->sk_state_change(sk);
501 case IUCV_DISCONN: /* fall through */
502 sk->sk_state = IUCV_CLOSING;
503 sk->sk_state_change(sk);
505 if (!skb_queue_empty(&iucv->send_skb_q)) {
506 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
507 timeo = sk->sk_lingertime;
509 timeo = IUCV_DISCONN_TIMEOUT;
511 iucv_sock_in_state(sk, IUCV_CLOSED, 0),
515 case IUCV_CLOSING: /* fall through */
516 sk->sk_state = IUCV_CLOSED;
517 sk->sk_state_change(sk);
520 low_nmcpy(user_data, iucv->src_name);
521 high_nmcpy(user_data, iucv->dst_name);
522 ASCEBC(user_data, sizeof(user_data));
523 pr_iucv->path_sever(iucv->path, user_data);
524 iucv_path_free(iucv->path);
528 sk->sk_err = ECONNRESET;
529 sk->sk_state_change(sk);
531 iucv_skb_queue_purge(&iucv->send_skb_q);
532 skb_queue_purge(&iucv->backlog_skb_q);
536 /* nothing to do here */
540 /* mark socket for deletion by iucv_sock_kill() */
541 sock_set_flag(sk, SOCK_ZAPPED);
546 static void iucv_sock_init(struct sock *sk, struct sock *parent)
549 sk->sk_type = parent->sk_type;
552 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
555 struct iucv_sock *iucv;
557 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
562 sock_init_data(sock, sk);
563 INIT_LIST_HEAD(&iucv->accept_q);
564 spin_lock_init(&iucv->accept_q_lock);
565 skb_queue_head_init(&iucv->send_skb_q);
566 INIT_LIST_HEAD(&iucv->message_q.list);
567 spin_lock_init(&iucv->message_q.lock);
568 skb_queue_head_init(&iucv->backlog_skb_q);
570 atomic_set(&iucv->pendings, 0);
573 atomic_set(&iucv->msg_sent, 0);
574 atomic_set(&iucv->msg_recv, 0);
576 iucv->sk_txnotify = afiucv_hs_callback_txnotify;
577 memset(&iucv->src_user_id , 0, 32);
579 iucv->transport = AF_IUCV_TRANS_IUCV;
581 iucv->transport = AF_IUCV_TRANS_HIPER;
583 sk->sk_destruct = iucv_sock_destruct;
584 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
585 sk->sk_allocation = GFP_DMA;
587 sock_reset_flag(sk, SOCK_ZAPPED);
589 sk->sk_protocol = proto;
590 sk->sk_state = IUCV_OPEN;
592 setup_timer(&sk->sk_timer, iucv_sock_timeout, (unsigned long)sk);
594 iucv_sock_link(&iucv_sk_list, sk);
598 /* Create an IUCV socket */
599 static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
604 if (protocol && protocol != PF_IUCV)
605 return -EPROTONOSUPPORT;
607 sock->state = SS_UNCONNECTED;
609 switch (sock->type) {
611 sock->ops = &iucv_sock_ops;
614 /* currently, proto ops can handle both sk types */
615 sock->ops = &iucv_sock_ops;
618 return -ESOCKTNOSUPPORT;
621 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
625 iucv_sock_init(sk, NULL);
630 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
632 write_lock_bh(&l->lock);
633 sk_add_node(sk, &l->head);
634 write_unlock_bh(&l->lock);
637 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
639 write_lock_bh(&l->lock);
640 sk_del_node_init(sk);
641 write_unlock_bh(&l->lock);
644 void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
647 struct iucv_sock *par = iucv_sk(parent);
650 spin_lock_irqsave(&par->accept_q_lock, flags);
651 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
652 spin_unlock_irqrestore(&par->accept_q_lock, flags);
653 iucv_sk(sk)->parent = parent;
654 sk_acceptq_added(parent);
657 void iucv_accept_unlink(struct sock *sk)
660 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
662 spin_lock_irqsave(&par->accept_q_lock, flags);
663 list_del_init(&iucv_sk(sk)->accept_q);
664 spin_unlock_irqrestore(&par->accept_q_lock, flags);
665 sk_acceptq_removed(iucv_sk(sk)->parent);
666 iucv_sk(sk)->parent = NULL;
670 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
672 struct iucv_sock *isk, *n;
675 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
676 sk = (struct sock *) isk;
679 if (sk->sk_state == IUCV_CLOSED) {
680 iucv_accept_unlink(sk);
685 if (sk->sk_state == IUCV_CONNECTED ||
686 sk->sk_state == IUCV_SEVERED ||
687 sk->sk_state == IUCV_DISCONN || /* due to PM restore */
689 iucv_accept_unlink(sk);
691 sock_graft(sk, newsock);
693 if (sk->sk_state == IUCV_SEVERED)
694 sk->sk_state = IUCV_DISCONN;
705 /* Bind an unbound socket */
706 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
709 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
710 struct sock *sk = sock->sk;
711 struct iucv_sock *iucv;
713 struct net_device *dev;
716 /* Verify the input sockaddr */
717 if (!addr || addr->sa_family != AF_IUCV)
721 if (sk->sk_state != IUCV_OPEN) {
726 write_lock_bh(&iucv_sk_list.lock);
729 if (__iucv_get_sock_by_name(sa->siucv_name)) {
736 /* Bind the socket */
739 if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
740 goto vm_bind; /* VM IUCV transport */
742 /* try hiper transport */
743 memcpy(uid, sa->siucv_user_id, sizeof(uid));
746 for_each_netdev_rcu(&init_net, dev) {
747 if (!memcmp(dev->perm_addr, uid, 8)) {
748 memcpy(iucv->src_name, sa->siucv_name, 8);
749 memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
750 sk->sk_bound_dev_if = dev->ifindex;
751 sk->sk_state = IUCV_BOUND;
752 iucv->transport = AF_IUCV_TRANS_HIPER;
754 iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT;
762 /* use local userid for backward compat */
763 memcpy(iucv->src_name, sa->siucv_name, 8);
764 memcpy(iucv->src_user_id, iucv_userid, 8);
765 sk->sk_state = IUCV_BOUND;
766 iucv->transport = AF_IUCV_TRANS_IUCV;
768 iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
771 /* found no dev to bind */
774 /* Release the socket list lock */
775 write_unlock_bh(&iucv_sk_list.lock);
781 /* Automatically bind an unbound socket */
782 static int iucv_sock_autobind(struct sock *sk)
784 struct iucv_sock *iucv = iucv_sk(sk);
785 char query_buffer[80];
789 /* Set the userid and name */
790 cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
794 memcpy(iucv->src_user_id, query_buffer, 8);
796 write_lock_bh(&iucv_sk_list.lock);
798 sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
799 while (__iucv_get_sock_by_name(name)) {
800 sprintf(name, "%08x",
801 atomic_inc_return(&iucv_sk_list.autobind_name));
804 write_unlock_bh(&iucv_sk_list.lock);
806 memcpy(&iucv->src_name, name, 8);
809 iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
814 static int afiucv_hs_connect(struct socket *sock)
816 struct sock *sk = sock->sk;
818 int blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
822 skb = sock_alloc_send_skb(sk, blen, 1, &err);
828 skb_reserve(skb, blen);
829 err = afiucv_hs_send(NULL, sk, skb, AF_IUCV_FLAG_SYN);
834 static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
836 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
837 struct sock *sk = sock->sk;
838 struct iucv_sock *iucv = iucv_sk(sk);
839 unsigned char user_data[16];
842 high_nmcpy(user_data, sa->siucv_name);
843 low_nmcpy(user_data, iucv->src_name);
844 ASCEBC(user_data, sizeof(user_data));
847 iucv->path = iucv_path_alloc(iucv->msglimit,
848 IUCV_IPRMDATA, GFP_KERNEL);
853 err = pr_iucv->path_connect(iucv->path, &af_iucv_handler,
854 sa->siucv_user_id, NULL, user_data,
857 iucv_path_free(iucv->path);
860 case 0x0b: /* Target communicator is not logged on */
863 case 0x0d: /* Max connections for this guest exceeded */
864 case 0x0e: /* Max connections for target guest exceeded */
867 case 0x0f: /* Missing IUCV authorization */
879 /* Connect an unconnected socket */
880 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
883 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
884 struct sock *sk = sock->sk;
885 struct iucv_sock *iucv = iucv_sk(sk);
888 if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
891 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
894 if (sk->sk_state == IUCV_OPEN &&
895 iucv->transport == AF_IUCV_TRANS_HIPER)
896 return -EBADFD; /* explicit bind required */
898 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
901 if (sk->sk_state == IUCV_OPEN) {
902 err = iucv_sock_autobind(sk);
909 /* Set the destination information */
910 memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
911 memcpy(iucv->dst_name, sa->siucv_name, 8);
913 if (iucv->transport == AF_IUCV_TRANS_HIPER)
914 err = afiucv_hs_connect(sock);
916 err = afiucv_path_connect(sock, addr);
920 if (sk->sk_state != IUCV_CONNECTED)
921 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
923 sock_sndtimeo(sk, flags & O_NONBLOCK));
925 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
928 if (err && iucv->transport == AF_IUCV_TRANS_IUCV) {
929 pr_iucv->path_sever(iucv->path, NULL);
930 iucv_path_free(iucv->path);
939 /* Move a socket into listening state. */
940 static int iucv_sock_listen(struct socket *sock, int backlog)
942 struct sock *sk = sock->sk;
948 if (sk->sk_state != IUCV_BOUND)
951 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
954 sk->sk_max_ack_backlog = backlog;
955 sk->sk_ack_backlog = 0;
956 sk->sk_state = IUCV_LISTEN;
964 /* Accept a pending connection */
965 static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
968 DECLARE_WAITQUEUE(wait, current);
969 struct sock *sk = sock->sk, *nsk;
973 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
975 if (sk->sk_state != IUCV_LISTEN) {
980 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
982 /* Wait for an incoming connection */
983 add_wait_queue_exclusive(sk_sleep(sk), &wait);
984 while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
985 set_current_state(TASK_INTERRUPTIBLE);
992 timeo = schedule_timeout(timeo);
993 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
995 if (sk->sk_state != IUCV_LISTEN) {
1000 if (signal_pending(current)) {
1001 err = sock_intr_errno(timeo);
1006 set_current_state(TASK_RUNNING);
1007 remove_wait_queue(sk_sleep(sk), &wait);
1012 newsock->state = SS_CONNECTED;
1019 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
1022 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
1023 struct sock *sk = sock->sk;
1024 struct iucv_sock *iucv = iucv_sk(sk);
1026 addr->sa_family = AF_IUCV;
1027 *len = sizeof(struct sockaddr_iucv);
1030 memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8);
1031 memcpy(siucv->siucv_name, iucv->dst_name, 8);
1033 memcpy(siucv->siucv_user_id, iucv->src_user_id, 8);
1034 memcpy(siucv->siucv_name, iucv->src_name, 8);
1036 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
1037 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
1038 memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
1044 * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
1046 * @msg: Pointer to a struct iucv_message
1047 * @skb: The socket data to send, skb->len MUST BE <= 7
1049 * Send the socket data in the parameter list in the iucv message
1050 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
1051 * list and the socket data len at index 7 (last byte).
1052 * See also iucv_msg_length().
1054 * Returns the error code from the iucv_message_send() call.
1056 static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
1057 struct sk_buff *skb)
1061 memcpy(prmdata, (void *) skb->data, skb->len);
1062 prmdata[7] = 0xff - (u8) skb->len;
1063 return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0,
1064 (void *) prmdata, 8);
1067 static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
1068 struct msghdr *msg, size_t len)
1070 struct sock *sk = sock->sk;
1071 struct iucv_sock *iucv = iucv_sk(sk);
1072 struct sk_buff *skb;
1073 struct iucv_message txmsg;
1074 struct cmsghdr *cmsg;
1080 int noblock = msg->msg_flags & MSG_DONTWAIT;
1082 err = sock_error(sk);
1086 if (msg->msg_flags & MSG_OOB)
1089 /* SOCK_SEQPACKET: we do not support segmented records */
1090 if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
1095 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1100 /* Return if the socket is not in connected state */
1101 if (sk->sk_state != IUCV_CONNECTED) {
1106 /* initialize defaults */
1107 cmsg_done = 0; /* check for duplicate headers */
1110 /* iterate over control messages */
1111 for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
1112 cmsg = CMSG_NXTHDR(msg, cmsg)) {
1114 if (!CMSG_OK(msg, cmsg)) {
1119 if (cmsg->cmsg_level != SOL_IUCV)
1122 if (cmsg->cmsg_type & cmsg_done) {
1126 cmsg_done |= cmsg->cmsg_type;
1128 switch (cmsg->cmsg_type) {
1129 case SCM_IUCV_TRGCLS:
1130 if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
1135 /* set iucv message target class */
1136 memcpy(&txmsg.class,
1137 (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
1148 /* allocate one skb for each iucv message:
1149 * this is fine for SOCK_SEQPACKET (unless we want to support
1150 * segmented records using the MSG_EOR flag), but
1151 * for SOCK_STREAM we might want to improve it in future */
1152 if (iucv->transport == AF_IUCV_TRANS_HIPER)
1153 skb = sock_alloc_send_skb(sk,
1154 len + sizeof(struct af_iucv_trans_hdr) + ETH_HLEN,
1157 skb = sock_alloc_send_skb(sk, len, noblock, &err);
1160 if (iucv->transport == AF_IUCV_TRANS_HIPER)
1161 skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
1162 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
1167 /* wait if outstanding messages for iucv path has reached */
1168 timeo = sock_sndtimeo(sk, noblock);
1169 err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
1173 /* return -ECONNRESET if the socket is no longer connected */
1174 if (sk->sk_state != IUCV_CONNECTED) {
1179 /* increment and save iucv message tag for msg_completion cbk */
1180 txmsg.tag = iucv->send_tag++;
1181 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
1182 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1183 atomic_inc(&iucv->msg_sent);
1184 err = afiucv_hs_send(&txmsg, sk, skb, 0);
1186 atomic_dec(&iucv->msg_sent);
1191 skb_queue_tail(&iucv->send_skb_q, skb);
1193 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
1195 err = iucv_send_iprm(iucv->path, &txmsg, skb);
1197 /* on success: there is no message_complete callback
1198 * for an IPRMDATA msg; remove skb from send queue */
1200 skb_unlink(skb, &iucv->send_skb_q);
1204 /* this error should never happen since the
1205 * IUCV_IPRMDATA path flag is set... sever path */
1207 pr_iucv->path_sever(iucv->path, NULL);
1208 skb_unlink(skb, &iucv->send_skb_q);
1213 err = pr_iucv->message_send(iucv->path, &txmsg, 0, 0,
1214 (void *) skb->data, skb->len);
1218 memcpy(user_id, iucv->dst_user_id, 8);
1220 memcpy(appl_id, iucv->dst_name, 8);
1221 pr_err("Application %s on z/VM guest %s"
1222 " exceeds message limit\n",
1227 skb_unlink(skb, &iucv->send_skb_q);
1244 /* iucv_fragment_skb() - Fragment a single IUCV message into multiple skb's
1246 * Locking: must be called with message_q.lock held
1248 static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
1250 int dataleft, size, copied = 0;
1251 struct sk_buff *nskb;
1255 if (dataleft >= sk->sk_rcvbuf / 4)
1256 size = sk->sk_rcvbuf / 4;
1260 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
1264 /* copy target class to control buffer of new skb */
1265 memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN);
1267 /* copy data fragment */
1268 memcpy(nskb->data, skb->data + copied, size);
1272 skb_reset_transport_header(nskb);
1273 skb_reset_network_header(nskb);
1276 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb);
1282 /* iucv_process_message() - Receive a single outstanding IUCV message
1284 * Locking: must be called with message_q.lock held
1286 static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1287 struct iucv_path *path,
1288 struct iucv_message *msg)
1293 len = iucv_msg_length(msg);
1295 /* store msg target class in the second 4 bytes of skb ctrl buffer */
1296 /* Note: the first 4 bytes are reserved for msg tag */
1297 memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN);
1299 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1300 if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
1301 if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
1306 rc = pr_iucv->message_receive(path, msg,
1307 msg->flags & IUCV_IPRMDATA,
1308 skb->data, len, NULL);
1313 /* we need to fragment iucv messages for SOCK_STREAM only;
1314 * for SOCK_SEQPACKET, it is only relevant if we support
1315 * record segmentation using MSG_EOR (see also recvmsg()) */
1316 if (sk->sk_type == SOCK_STREAM &&
1317 skb->truesize >= sk->sk_rcvbuf / 4) {
1318 rc = iucv_fragment_skb(sk, skb, len);
1322 pr_iucv->path_sever(path, NULL);
1325 skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
1327 skb_reset_transport_header(skb);
1328 skb_reset_network_header(skb);
1333 if (sock_queue_rcv_skb(sk, skb))
1334 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
1337 /* iucv_process_message_q() - Process outstanding IUCV messages
1339 * Locking: must be called with message_q.lock held
1341 static void iucv_process_message_q(struct sock *sk)
1343 struct iucv_sock *iucv = iucv_sk(sk);
1344 struct sk_buff *skb;
1345 struct sock_msg_q *p, *n;
1347 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
1348 skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA);
1351 iucv_process_message(sk, skb, p->path, &p->msg);
1354 if (!skb_queue_empty(&iucv->backlog_skb_q))
1359 static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1360 struct msghdr *msg, size_t len, int flags)
1362 int noblock = flags & MSG_DONTWAIT;
1363 struct sock *sk = sock->sk;
1364 struct iucv_sock *iucv = iucv_sk(sk);
1365 unsigned int copied, rlen;
1366 struct sk_buff *skb, *rskb, *cskb, *sskb;
1370 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
1371 skb_queue_empty(&iucv->backlog_skb_q) &&
1372 skb_queue_empty(&sk->sk_receive_queue) &&
1373 list_empty(&iucv->message_q.list))
1376 if (flags & (MSG_OOB))
1379 /* receive/dequeue next skb:
1380 * the function understands MSG_PEEK and, thus, does not dequeue skb */
1381 skb = skb_recv_datagram(sk, flags, noblock, &err);
1383 if (sk->sk_shutdown & RCV_SHUTDOWN)
1388 rlen = skb->len; /* real length of skb */
1389 copied = min_t(unsigned int, rlen, len);
1392 if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) {
1393 if (!(flags & MSG_PEEK))
1394 skb_queue_head(&sk->sk_receive_queue, skb);
1398 /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1399 if (sk->sk_type == SOCK_SEQPACKET) {
1401 msg->msg_flags |= MSG_TRUNC;
1402 /* each iucv message contains a complete record */
1403 msg->msg_flags |= MSG_EOR;
1406 /* create control message to store iucv msg target class:
1407 * get the trgcls from the control buffer of the skb due to
1408 * fragmentation of original iucv message. */
1409 err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
1410 CB_TRGCLS_LEN, CB_TRGCLS(skb));
1412 if (!(flags & MSG_PEEK))
1413 skb_queue_head(&sk->sk_receive_queue, skb);
1417 /* Mark read part of skb as used */
1418 if (!(flags & MSG_PEEK)) {
1420 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
1421 if (sk->sk_type == SOCK_STREAM) {
1422 skb_pull(skb, copied);
1424 skb_queue_head(&sk->sk_receive_queue, skb);
1430 atomic_inc(&iucv->msg_recv);
1432 /* Queue backlog skbs */
1433 spin_lock_bh(&iucv->message_q.lock);
1434 rskb = skb_dequeue(&iucv->backlog_skb_q);
1436 if (sock_queue_rcv_skb(sk, rskb)) {
1437 skb_queue_head(&iucv->backlog_skb_q,
1441 rskb = skb_dequeue(&iucv->backlog_skb_q);
1444 if (skb_queue_empty(&iucv->backlog_skb_q)) {
1445 if (!list_empty(&iucv->message_q.list))
1446 iucv_process_message_q(sk);
1447 if (atomic_read(&iucv->msg_recv) >=
1448 iucv->msglimit / 2) {
1449 /* send WIN to peer */
1450 blen = sizeof(struct af_iucv_trans_hdr) +
1452 sskb = sock_alloc_send_skb(sk, blen, 1, &err);
1454 skb_reserve(sskb, blen);
1455 err = afiucv_hs_send(NULL, sk, sskb,
1459 sk->sk_state = IUCV_DISCONN;
1460 sk->sk_state_change(sk);
1464 spin_unlock_bh(&iucv->message_q.lock);
1468 /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1469 if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
1475 static inline unsigned int iucv_accept_poll(struct sock *parent)
1477 struct iucv_sock *isk, *n;
1480 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
1481 sk = (struct sock *) isk;
1483 if (sk->sk_state == IUCV_CONNECTED)
1484 return POLLIN | POLLRDNORM;
1490 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
1493 struct sock *sk = sock->sk;
1494 unsigned int mask = 0;
1496 sock_poll_wait(file, sk_sleep(sk), wait);
1498 if (sk->sk_state == IUCV_LISTEN)
1499 return iucv_accept_poll(sk);
1501 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
1504 if (sk->sk_shutdown & RCV_SHUTDOWN)
1507 if (sk->sk_shutdown == SHUTDOWN_MASK)
1510 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1511 (sk->sk_shutdown & RCV_SHUTDOWN))
1512 mask |= POLLIN | POLLRDNORM;
1514 if (sk->sk_state == IUCV_CLOSED)
1517 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED)
1520 if (sock_writeable(sk))
1521 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1523 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1528 static int iucv_sock_shutdown(struct socket *sock, int how)
1530 struct sock *sk = sock->sk;
1531 struct iucv_sock *iucv = iucv_sk(sk);
1532 struct iucv_message txmsg;
1537 if ((how & ~SHUTDOWN_MASK) || !how)
1541 switch (sk->sk_state) {
1550 sk->sk_shutdown |= how;
1554 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
1557 err = pr_iucv->message_send(iucv->path, &txmsg, IUCV_IPRMDATA,
1558 0, (void *) iprm_shutdown, 8);
1574 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1575 err = pr_iucv->path_quiesce(iucv->path, NULL);
1579 skb_queue_purge(&sk->sk_receive_queue);
1582 /* Wake up anyone sleeping in poll */
1583 sk->sk_state_change(sk);
1590 static int iucv_sock_release(struct socket *sock)
1592 struct sock *sk = sock->sk;
1598 iucv_sock_close(sk);
1600 /* Unregister with IUCV base support */
1601 if (iucv_sk(sk)->path) {
1602 pr_iucv->path_sever(iucv_sk(sk)->path, NULL);
1603 iucv_path_free(iucv_sk(sk)->path);
1604 iucv_sk(sk)->path = NULL;
1612 /* getsockopt and setsockopt */
1613 static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
1614 char __user *optval, unsigned int optlen)
1616 struct sock *sk = sock->sk;
1617 struct iucv_sock *iucv = iucv_sk(sk);
1621 if (level != SOL_IUCV)
1622 return -ENOPROTOOPT;
1624 if (optlen < sizeof(int))
1627 if (get_user(val, (int __user *) optval))
1634 case SO_IPRMDATA_MSG:
1636 iucv->flags |= IUCV_IPRMDATA;
1638 iucv->flags &= ~IUCV_IPRMDATA;
1641 switch (sk->sk_state) {
1644 if (val < 1 || val > (u16)(~0))
1647 iucv->msglimit = val;
1663 static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1664 char __user *optval, int __user *optlen)
1666 struct sock *sk = sock->sk;
1667 struct iucv_sock *iucv = iucv_sk(sk);
1670 if (level != SOL_IUCV)
1671 return -ENOPROTOOPT;
1673 if (get_user(len, optlen))
1679 len = min_t(unsigned int, len, sizeof(int));
1682 case SO_IPRMDATA_MSG:
1683 val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
1687 val = (iucv->path != NULL) ? iucv->path->msglim /* connected */
1688 : iucv->msglimit; /* default */
1692 return -ENOPROTOOPT;
1695 if (put_user(len, optlen))
1697 if (copy_to_user(optval, &val, len))
1704 /* Callback wrappers - called from iucv base support */
1705 static int iucv_callback_connreq(struct iucv_path *path,
1706 u8 ipvmid[8], u8 ipuser[16])
1708 unsigned char user_data[16];
1709 unsigned char nuser_data[16];
1710 unsigned char src_name[8];
1711 struct hlist_node *node;
1712 struct sock *sk, *nsk;
1713 struct iucv_sock *iucv, *niucv;
1716 memcpy(src_name, ipuser, 8);
1717 EBCASC(src_name, 8);
1718 /* Find out if this path belongs to af_iucv. */
1719 read_lock(&iucv_sk_list.lock);
1722 sk_for_each(sk, node, &iucv_sk_list.head)
1723 if (sk->sk_state == IUCV_LISTEN &&
1724 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1726 * Found a listening socket with
1727 * src_name == ipuser[0-7].
1732 read_unlock(&iucv_sk_list.lock);
1734 /* No socket found, not one of our paths. */
1739 /* Check if parent socket is listening */
1740 low_nmcpy(user_data, iucv->src_name);
1741 high_nmcpy(user_data, iucv->dst_name);
1742 ASCEBC(user_data, sizeof(user_data));
1743 if (sk->sk_state != IUCV_LISTEN) {
1744 err = pr_iucv->path_sever(path, user_data);
1745 iucv_path_free(path);
1749 /* Check for backlog size */
1750 if (sk_acceptq_is_full(sk)) {
1751 err = pr_iucv->path_sever(path, user_data);
1752 iucv_path_free(path);
1756 /* Create the new socket */
1757 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1759 err = pr_iucv->path_sever(path, user_data);
1760 iucv_path_free(path);
1764 niucv = iucv_sk(nsk);
1765 iucv_sock_init(nsk, sk);
1767 /* Set the new iucv_sock */
1768 memcpy(niucv->dst_name, ipuser + 8, 8);
1769 EBCASC(niucv->dst_name, 8);
1770 memcpy(niucv->dst_user_id, ipvmid, 8);
1771 memcpy(niucv->src_name, iucv->src_name, 8);
1772 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1775 /* Call iucv_accept */
1776 high_nmcpy(nuser_data, ipuser + 8);
1777 memcpy(nuser_data + 8, niucv->src_name, 8);
1778 ASCEBC(nuser_data + 8, 8);
1780 /* set message limit for path based on msglimit of accepting socket */
1781 niucv->msglimit = iucv->msglimit;
1782 path->msglim = iucv->msglimit;
1783 err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
1785 err = pr_iucv->path_sever(path, user_data);
1786 iucv_path_free(path);
1787 iucv_sock_kill(nsk);
1791 iucv_accept_enqueue(sk, nsk);
1793 /* Wake up accept */
1794 nsk->sk_state = IUCV_CONNECTED;
1795 sk->sk_data_ready(sk, 1);
1802 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1804 struct sock *sk = path->private;
1806 sk->sk_state = IUCV_CONNECTED;
1807 sk->sk_state_change(sk);
1810 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1812 struct sock *sk = path->private;
1813 struct iucv_sock *iucv = iucv_sk(sk);
1814 struct sk_buff *skb;
1815 struct sock_msg_q *save_msg;
1818 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1819 pr_iucv->message_reject(path, msg);
1823 spin_lock(&iucv->message_q.lock);
1825 if (!list_empty(&iucv->message_q.list) ||
1826 !skb_queue_empty(&iucv->backlog_skb_q))
1829 len = atomic_read(&sk->sk_rmem_alloc);
1830 len += SKB_TRUESIZE(iucv_msg_length(msg));
1831 if (len > sk->sk_rcvbuf)
1834 skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA);
1838 iucv_process_message(sk, skb, path, msg);
1842 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1845 save_msg->path = path;
1846 save_msg->msg = *msg;
1848 list_add_tail(&save_msg->list, &iucv->message_q.list);
1851 spin_unlock(&iucv->message_q.lock);
1854 static void iucv_callback_txdone(struct iucv_path *path,
1855 struct iucv_message *msg)
1857 struct sock *sk = path->private;
1858 struct sk_buff *this = NULL;
1859 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1860 struct sk_buff *list_skb = list->next;
1861 unsigned long flags;
1863 if (!skb_queue_empty(list)) {
1864 spin_lock_irqsave(&list->lock, flags);
1866 while (list_skb != (struct sk_buff *)list) {
1867 if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) {
1871 list_skb = list_skb->next;
1874 __skb_unlink(this, list);
1876 spin_unlock_irqrestore(&list->lock, flags);
1880 /* wake up any process waiting for sending */
1881 iucv_sock_wake_msglim(sk);
1886 if (sk->sk_state == IUCV_CLOSING) {
1887 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1888 sk->sk_state = IUCV_CLOSED;
1889 sk->sk_state_change(sk);
1895 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1897 struct sock *sk = path->private;
1899 if (!list_empty(&iucv_sk(sk)->accept_q))
1900 sk->sk_state = IUCV_SEVERED;
1902 sk->sk_state = IUCV_DISCONN;
1904 sk->sk_state_change(sk);
1907 /* called if the other communication side shuts down its RECV direction;
1908 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1910 static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1912 struct sock *sk = path->private;
1915 if (sk->sk_state != IUCV_CLOSED) {
1916 sk->sk_shutdown |= SEND_SHUTDOWN;
1917 sk->sk_state_change(sk);
1922 /***************** HiperSockets transport callbacks ********************/
1923 static void afiucv_swap_src_dest(struct sk_buff *skb)
1925 struct af_iucv_trans_hdr *trans_hdr =
1926 (struct af_iucv_trans_hdr *)skb->data;
1930 ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
1931 ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
1932 ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
1933 ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
1934 memcpy(tmpID, trans_hdr->srcUserID, 8);
1935 memcpy(tmpName, trans_hdr->srcAppName, 8);
1936 memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8);
1937 memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8);
1938 memcpy(trans_hdr->destUserID, tmpID, 8);
1939 memcpy(trans_hdr->destAppName, tmpName, 8);
1940 skb_push(skb, ETH_HLEN);
1941 memset(skb->data, 0, ETH_HLEN);
1945 * afiucv_hs_callback_syn - react on received SYN
1947 static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
1950 struct iucv_sock *iucv, *niucv;
1951 struct af_iucv_trans_hdr *trans_hdr;
1955 trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
1957 /* no sock - connection refused */
1958 afiucv_swap_src_dest(skb);
1959 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1960 err = dev_queue_xmit(skb);
1964 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1966 if ((sk->sk_state != IUCV_LISTEN) ||
1967 sk_acceptq_is_full(sk) ||
1969 /* error on server socket - connection refused */
1972 afiucv_swap_src_dest(skb);
1973 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1974 err = dev_queue_xmit(skb);
1979 niucv = iucv_sk(nsk);
1980 iucv_sock_init(nsk, sk);
1981 niucv->transport = AF_IUCV_TRANS_HIPER;
1982 niucv->msglimit = iucv->msglimit;
1983 if (!trans_hdr->window)
1984 niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT;
1986 niucv->msglimit_peer = trans_hdr->window;
1987 memcpy(niucv->dst_name, trans_hdr->srcAppName, 8);
1988 memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8);
1989 memcpy(niucv->src_name, iucv->src_name, 8);
1990 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1991 nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
1992 afiucv_swap_src_dest(skb);
1993 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
1994 trans_hdr->window = niucv->msglimit;
1995 /* if receiver acks the xmit connection is established */
1996 err = dev_queue_xmit(skb);
1998 iucv_accept_enqueue(sk, nsk);
1999 nsk->sk_state = IUCV_CONNECTED;
2000 sk->sk_data_ready(sk, 1);
2002 iucv_sock_kill(nsk);
2006 return NET_RX_SUCCESS;
2010 * afiucv_hs_callback_synack() - react on received SYN-ACK
2012 static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
2014 struct iucv_sock *iucv = iucv_sk(sk);
2015 struct af_iucv_trans_hdr *trans_hdr =
2016 (struct af_iucv_trans_hdr *)skb->data;
2020 if (sk->sk_state != IUCV_BOUND)
2023 iucv->msglimit_peer = trans_hdr->window;
2024 sk->sk_state = IUCV_CONNECTED;
2025 sk->sk_state_change(sk);
2029 return NET_RX_SUCCESS;
2033 * afiucv_hs_callback_synfin() - react on received SYN_FIN
2035 static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
2037 struct iucv_sock *iucv = iucv_sk(sk);
2041 if (sk->sk_state != IUCV_BOUND)
2044 sk->sk_state = IUCV_DISCONN;
2045 sk->sk_state_change(sk);
2049 return NET_RX_SUCCESS;
2053 * afiucv_hs_callback_fin() - react on received FIN
2055 static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
2057 struct iucv_sock *iucv = iucv_sk(sk);
2059 /* other end of connection closed */
2062 if (!list_empty(&iucv->accept_q))
2063 sk->sk_state = IUCV_SEVERED;
2065 sk->sk_state = IUCV_DISCONN;
2066 sk->sk_state_change(sk);
2070 return NET_RX_SUCCESS;
2074 * afiucv_hs_callback_win() - react on received WIN
2076 static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
2078 struct iucv_sock *iucv = iucv_sk(sk);
2079 struct af_iucv_trans_hdr *trans_hdr =
2080 (struct af_iucv_trans_hdr *)skb->data;
2083 return NET_RX_SUCCESS;
2085 if (sk->sk_state != IUCV_CONNECTED)
2086 return NET_RX_SUCCESS;
2088 atomic_sub(trans_hdr->window, &iucv->msg_sent);
2089 iucv_sock_wake_msglim(sk);
2090 return NET_RX_SUCCESS;
2094 * afiucv_hs_callback_rx() - react on received data
2096 static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
2098 struct iucv_sock *iucv = iucv_sk(sk);
2102 return NET_RX_SUCCESS;
2105 if (sk->sk_state != IUCV_CONNECTED) {
2107 return NET_RX_SUCCESS;
2110 /* write stuff from iucv_msg to skb cb */
2111 if (skb->len <= sizeof(struct af_iucv_trans_hdr)) {
2113 return NET_RX_SUCCESS;
2115 skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
2116 skb_reset_transport_header(skb);
2117 skb_reset_network_header(skb);
2118 spin_lock(&iucv->message_q.lock);
2119 if (skb_queue_empty(&iucv->backlog_skb_q)) {
2120 if (sock_queue_rcv_skb(sk, skb)) {
2121 /* handle rcv queue full */
2122 skb_queue_tail(&iucv->backlog_skb_q, skb);
2125 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
2126 spin_unlock(&iucv->message_q.lock);
2127 return NET_RX_SUCCESS;
2131 * afiucv_hs_rcv() - base function for arriving data through HiperSockets
2133 * called from netif RX softirq
2135 static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2136 struct packet_type *pt, struct net_device *orig_dev)
2138 struct hlist_node *node;
2140 struct iucv_sock *iucv;
2141 struct af_iucv_trans_hdr *trans_hdr;
2145 skb_pull(skb, ETH_HLEN);
2146 trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
2147 EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
2148 EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
2149 EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
2150 EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
2151 memset(nullstring, 0, sizeof(nullstring));
2154 read_lock(&iucv_sk_list.lock);
2155 sk_for_each(sk, node, &iucv_sk_list.head) {
2156 if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
2157 if ((!memcmp(&iucv_sk(sk)->src_name,
2158 trans_hdr->destAppName, 8)) &&
2159 (!memcmp(&iucv_sk(sk)->src_user_id,
2160 trans_hdr->destUserID, 8)) &&
2161 (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
2162 (!memcmp(&iucv_sk(sk)->dst_user_id,
2168 if ((!memcmp(&iucv_sk(sk)->src_name,
2169 trans_hdr->destAppName, 8)) &&
2170 (!memcmp(&iucv_sk(sk)->src_user_id,
2171 trans_hdr->destUserID, 8)) &&
2172 (!memcmp(&iucv_sk(sk)->dst_name,
2173 trans_hdr->srcAppName, 8)) &&
2174 (!memcmp(&iucv_sk(sk)->dst_user_id,
2175 trans_hdr->srcUserID, 8))) {
2181 read_unlock(&iucv_sk_list.lock);
2186 how should we send with no sock
2187 1) send without sock no send rc checking?
2188 2) introduce default sock to handle this cases
2190 SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
2192 SYN|ACK, SYN|FIN, FIN -> no action? */
2194 switch (trans_hdr->flags) {
2195 case AF_IUCV_FLAG_SYN:
2196 /* connect request */
2197 err = afiucv_hs_callback_syn(sk, skb);
2199 case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK):
2200 /* connect request confirmed */
2201 err = afiucv_hs_callback_synack(sk, skb);
2203 case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN):
2204 /* connect request refused */
2205 err = afiucv_hs_callback_synfin(sk, skb);
2207 case (AF_IUCV_FLAG_FIN):
2209 err = afiucv_hs_callback_fin(sk, skb);
2211 case (AF_IUCV_FLAG_WIN):
2212 err = afiucv_hs_callback_win(sk, skb);
2213 if (skb->len > sizeof(struct af_iucv_trans_hdr))
2214 err = afiucv_hs_callback_rx(sk, skb);
2219 /* plain data frame */
2220 memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class,
2222 err = afiucv_hs_callback_rx(sk, skb);
2232 * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
2235 static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
2236 enum iucv_tx_notify n)
2238 struct sock *isk = skb->sk;
2239 struct sock *sk = NULL;
2240 struct iucv_sock *iucv = NULL;
2241 struct sk_buff_head *list;
2242 struct sk_buff *list_skb;
2243 struct sk_buff *this = NULL;
2244 unsigned long flags;
2245 struct hlist_node *node;
2247 read_lock(&iucv_sk_list.lock);
2248 sk_for_each(sk, node, &iucv_sk_list.head)
2253 read_unlock(&iucv_sk_list.lock);
2259 list = &iucv->send_skb_q;
2260 list_skb = list->next;
2261 if (skb_queue_empty(list))
2264 spin_lock_irqsave(&list->lock, flags);
2265 while (list_skb != (struct sk_buff *)list) {
2266 if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
2270 __skb_unlink(this, list);
2271 iucv_sock_wake_msglim(sk);
2275 case TX_NOTIFY_PENDING:
2276 atomic_inc(&iucv->pendings);
2278 case TX_NOTIFY_DELAYED_OK:
2279 __skb_unlink(this, list);
2280 atomic_dec(&iucv->pendings);
2281 if (atomic_read(&iucv->pendings) <= 0)
2282 iucv_sock_wake_msglim(sk);
2286 case TX_NOTIFY_UNREACHABLE:
2287 case TX_NOTIFY_DELAYED_UNREACHABLE:
2288 case TX_NOTIFY_TPQFULL: /* not yet used */
2289 case TX_NOTIFY_GENERALERROR:
2290 case TX_NOTIFY_DELAYED_GENERALERROR:
2291 __skb_unlink(this, list);
2294 if (!list_empty(&iucv->accept_q))
2295 sk->sk_state = IUCV_SEVERED;
2297 sk->sk_state = IUCV_DISCONN;
2298 sk->sk_state_change(sk);
2303 list_skb = list_skb->next;
2305 spin_unlock_irqrestore(&list->lock, flags);
2307 if (sk->sk_state == IUCV_CLOSING) {
2308 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
2309 sk->sk_state = IUCV_CLOSED;
2310 sk->sk_state_change(sk);
2317 static const struct proto_ops iucv_sock_ops = {
2319 .owner = THIS_MODULE,
2320 .release = iucv_sock_release,
2321 .bind = iucv_sock_bind,
2322 .connect = iucv_sock_connect,
2323 .listen = iucv_sock_listen,
2324 .accept = iucv_sock_accept,
2325 .getname = iucv_sock_getname,
2326 .sendmsg = iucv_sock_sendmsg,
2327 .recvmsg = iucv_sock_recvmsg,
2328 .poll = iucv_sock_poll,
2329 .ioctl = sock_no_ioctl,
2330 .mmap = sock_no_mmap,
2331 .socketpair = sock_no_socketpair,
2332 .shutdown = iucv_sock_shutdown,
2333 .setsockopt = iucv_sock_setsockopt,
2334 .getsockopt = iucv_sock_getsockopt,
2337 static const struct net_proto_family iucv_sock_family_ops = {
2339 .owner = THIS_MODULE,
2340 .create = iucv_sock_create,
2343 static struct packet_type iucv_packet_type = {
2344 .type = cpu_to_be16(ETH_P_AF_IUCV),
2345 .func = afiucv_hs_rcv,
2348 static int afiucv_iucv_init(void)
2352 err = pr_iucv->iucv_register(&af_iucv_handler, 0);
2355 /* establish dummy device */
2356 af_iucv_driver.bus = pr_iucv->bus;
2357 err = driver_register(&af_iucv_driver);
2360 af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
2365 dev_set_name(af_iucv_dev, "af_iucv");
2366 af_iucv_dev->bus = pr_iucv->bus;
2367 af_iucv_dev->parent = pr_iucv->root;
2368 af_iucv_dev->release = (void (*)(struct device *))kfree;
2369 af_iucv_dev->driver = &af_iucv_driver;
2370 err = device_register(af_iucv_dev);
2376 driver_unregister(&af_iucv_driver);
2378 pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2383 static int __init afiucv_init(void)
2387 if (MACHINE_IS_VM) {
2388 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
2389 if (unlikely(err)) {
2391 err = -EPROTONOSUPPORT;
2395 pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv");
2397 printk(KERN_WARNING "iucv_if lookup failed\n");
2398 memset(&iucv_userid, 0, sizeof(iucv_userid));
2401 memset(&iucv_userid, 0, sizeof(iucv_userid));
2405 err = proto_register(&iucv_proto, 0);
2408 err = sock_register(&iucv_sock_family_ops);
2413 err = afiucv_iucv_init();
2417 dev_add_pack(&iucv_packet_type);
2421 sock_unregister(PF_IUCV);
2423 proto_unregister(&iucv_proto);
2426 symbol_put(iucv_if);
2430 static void __exit afiucv_exit(void)
2433 device_unregister(af_iucv_dev);
2434 driver_unregister(&af_iucv_driver);
2435 pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2436 symbol_put(iucv_if);
2438 dev_remove_pack(&iucv_packet_type);
2439 sock_unregister(PF_IUCV);
2440 proto_unregister(&iucv_proto);
2443 module_init(afiucv_init);
2444 module_exit(afiucv_exit);
2446 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
2447 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
2448 MODULE_VERSION(VERSION);
2449 MODULE_LICENSE("GPL");
2450 MODULE_ALIAS_NETPROTO(PF_IUCV);