2 * IUCV protocol stack for Linux on zSeries
4 * Copyright IBM Corp. 2006, 2009
6 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
7 * Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
9 * Ursula Braun <ursula.braun@de.ibm.com>
12 #define KMSG_COMPONENT "af_iucv"
13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/list.h>
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/skbuff.h>
23 #include <linux/init.h>
24 #include <linux/poll.h>
26 #include <asm/ebcdic.h>
27 #include <asm/cpcmd.h>
28 #include <linux/kmod.h>
30 #include <net/iucv/af_iucv.h>
34 static char iucv_userid[80];
36 static const struct proto_ops iucv_sock_ops;
38 static struct proto iucv_proto = {
41 .obj_size = sizeof(struct iucv_sock),
44 static struct iucv_interface *pr_iucv;
46 /* special AF_IUCV IPRM messages */
47 static const u8 iprm_shutdown[8] =
48 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
50 #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
52 /* macros to set/get socket control buffer at correct offset */
53 #define CB_TAG(skb) ((skb)->cb) /* iucv message tag */
54 #define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag))
55 #define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
56 #define CB_TRGCLS_LEN (TRGCLS_SIZE)
58 #define __iucv_sock_wait(sk, condition, timeo, ret) \
60 DEFINE_WAIT(__wait); \
61 long __timeo = timeo; \
63 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
64 while (!(condition)) { \
69 if (signal_pending(current)) { \
70 ret = sock_intr_errno(__timeo); \
74 __timeo = schedule_timeout(__timeo); \
76 ret = sock_error(sk); \
80 finish_wait(sk_sleep(sk), &__wait); \
83 #define iucv_sock_wait(sk, condition, timeo) \
87 __iucv_sock_wait(sk, condition, timeo, __ret); \
91 static void iucv_sock_kill(struct sock *sk);
92 static void iucv_sock_close(struct sock *sk);
93 static void iucv_sever_path(struct sock *, int);
95 static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
96 struct packet_type *pt, struct net_device *orig_dev);
97 static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
98 struct sk_buff *skb, u8 flags);
99 static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);
101 /* Call Back functions */
102 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
103 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
104 static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
105 static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
107 static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
108 static void iucv_callback_shutdown(struct iucv_path *, u8 ipuser[16]);
110 static struct iucv_sock_list iucv_sk_list = {
111 .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
112 .autobind_name = ATOMIC_INIT(0)
115 static struct iucv_handler af_iucv_handler = {
116 .path_pending = iucv_callback_connreq,
117 .path_complete = iucv_callback_connack,
118 .path_severed = iucv_callback_connrej,
119 .message_pending = iucv_callback_rx,
120 .message_complete = iucv_callback_txdone,
121 .path_quiesced = iucv_callback_shutdown,
124 static inline void high_nmcpy(unsigned char *dst, char *src)
129 static inline void low_nmcpy(unsigned char *dst, char *src)
131 memcpy(&dst[8], src, 8);
134 static int afiucv_pm_prepare(struct device *dev)
136 #ifdef CONFIG_PM_DEBUG
137 printk(KERN_WARNING "afiucv_pm_prepare\n");
142 static void afiucv_pm_complete(struct device *dev)
144 #ifdef CONFIG_PM_DEBUG
145 printk(KERN_WARNING "afiucv_pm_complete\n");
150 * afiucv_pm_freeze() - Freeze PM callback
151 * @dev: AFIUCV dummy device
153 * Sever all established IUCV communication pathes
155 static int afiucv_pm_freeze(struct device *dev)
157 struct iucv_sock *iucv;
159 struct hlist_node *node;
162 #ifdef CONFIG_PM_DEBUG
163 printk(KERN_WARNING "afiucv_pm_freeze\n");
165 read_lock(&iucv_sk_list.lock);
166 sk_for_each(sk, node, &iucv_sk_list.head) {
168 skb_queue_purge(&iucv->send_skb_q);
169 skb_queue_purge(&iucv->backlog_skb_q);
170 switch (sk->sk_state) {
174 iucv_sever_path(sk, 0);
183 skb_queue_purge(&iucv->send_skb_q);
184 skb_queue_purge(&iucv->backlog_skb_q);
186 read_unlock(&iucv_sk_list.lock);
191 * afiucv_pm_restore_thaw() - Thaw and restore PM callback
192 * @dev: AFIUCV dummy device
194 * socket clean up after freeze
196 static int afiucv_pm_restore_thaw(struct device *dev)
199 struct hlist_node *node;
201 #ifdef CONFIG_PM_DEBUG
202 printk(KERN_WARNING "afiucv_pm_restore_thaw\n");
204 read_lock(&iucv_sk_list.lock);
205 sk_for_each(sk, node, &iucv_sk_list.head) {
206 switch (sk->sk_state) {
209 sk->sk_state = IUCV_DISCONN;
210 sk->sk_state_change(sk);
221 read_unlock(&iucv_sk_list.lock);
225 static const struct dev_pm_ops afiucv_pm_ops = {
226 .prepare = afiucv_pm_prepare,
227 .complete = afiucv_pm_complete,
228 .freeze = afiucv_pm_freeze,
229 .thaw = afiucv_pm_restore_thaw,
230 .restore = afiucv_pm_restore_thaw,
233 static struct device_driver af_iucv_driver = {
234 .owner = THIS_MODULE,
237 .pm = &afiucv_pm_ops,
240 /* dummy device used as trigger for PM functions */
241 static struct device *af_iucv_dev;
244 * iucv_msg_length() - Returns the length of an iucv message.
245 * @msg: Pointer to struct iucv_message, MUST NOT be NULL
247 * The function returns the length of the specified iucv message @msg of data
248 * stored in a buffer and of data stored in the parameter list (PRMDATA).
250 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
252 * PRMDATA[0..6] socket data (max 7 bytes);
253 * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
255 * The socket data length is computed by subtracting the socket data length
257 * If the socket data len is greater 7, then PRMDATA can be used for special
258 * notifications (see iucv_sock_shutdown); and further,
259 * if the socket data len is > 7, the function returns 8.
261 * Use this function to allocate socket buffers to store iucv message data.
263 static inline size_t iucv_msg_length(struct iucv_message *msg)
267 if (msg->flags & IUCV_IPRMDATA) {
268 datalen = 0xff - msg->rmmsg[7];
269 return (datalen < 8) ? datalen : 8;
275 * iucv_sock_in_state() - check for specific states
276 * @sk: sock structure
277 * @state: first iucv sk state
278 * @state: second iucv sk state
280 * Returns true if the socket in either in the first or second state.
282 static int iucv_sock_in_state(struct sock *sk, int state, int state2)
284 return (sk->sk_state == state || sk->sk_state == state2);
288 * iucv_below_msglim() - function to check if messages can be sent
289 * @sk: sock structure
291 * Returns true if the send queue length is lower than the message limit.
292 * Always returns true if the socket is not connected (no iucv path for
293 * checking the message limit).
295 static inline int iucv_below_msglim(struct sock *sk)
297 struct iucv_sock *iucv = iucv_sk(sk);
299 if (sk->sk_state != IUCV_CONNECTED)
301 if (iucv->transport == AF_IUCV_TRANS_IUCV)
302 return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
304 return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
305 (atomic_read(&iucv->pendings) <= 0));
309 * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
311 static void iucv_sock_wake_msglim(struct sock *sk)
313 struct socket_wq *wq;
316 wq = rcu_dereference(sk->sk_wq);
317 if (wq_has_sleeper(wq))
318 wake_up_interruptible_all(&wq->wait);
319 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
324 * afiucv_hs_send() - send a message through HiperSockets transport
326 static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
327 struct sk_buff *skb, u8 flags)
329 struct iucv_sock *iucv = iucv_sk(sock);
330 struct af_iucv_trans_hdr *phs_hdr;
331 struct sk_buff *nskb;
332 int err, confirm_recv = 0;
334 memset(skb->head, 0, ETH_HLEN);
335 phs_hdr = (struct af_iucv_trans_hdr *)skb_push(skb,
336 sizeof(struct af_iucv_trans_hdr));
337 skb_reset_mac_header(skb);
338 skb_reset_network_header(skb);
339 skb_push(skb, ETH_HLEN);
340 skb_reset_mac_header(skb);
341 memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr));
343 phs_hdr->magic = ETH_P_AF_IUCV;
344 phs_hdr->version = 1;
345 phs_hdr->flags = flags;
346 if (flags == AF_IUCV_FLAG_SYN)
347 phs_hdr->window = iucv->msglimit;
348 else if ((flags == AF_IUCV_FLAG_WIN) || !flags) {
349 confirm_recv = atomic_read(&iucv->msg_recv);
350 phs_hdr->window = confirm_recv;
352 phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN;
354 memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
355 memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
356 memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
357 memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
358 ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID));
359 ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName));
360 ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID));
361 ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName));
363 memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
365 skb->dev = iucv->hs_dev;
368 if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev))
370 if (skb->len > skb->dev->mtu) {
371 if (sock->sk_type == SOCK_SEQPACKET)
374 skb_trim(skb, skb->dev->mtu);
376 skb->protocol = ETH_P_AF_IUCV;
377 skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
378 nskb = skb_clone(skb, GFP_ATOMIC);
381 skb_queue_tail(&iucv->send_skb_q, nskb);
382 err = dev_queue_xmit(skb);
383 if (net_xmit_eval(err)) {
384 skb_unlink(nskb, &iucv->send_skb_q);
387 atomic_sub(confirm_recv, &iucv->msg_recv);
388 WARN_ON(atomic_read(&iucv->msg_recv) < 0);
390 return net_xmit_eval(err);
393 static struct sock *__iucv_get_sock_by_name(char *nm)
396 struct hlist_node *node;
398 sk_for_each(sk, node, &iucv_sk_list.head)
399 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
405 static void iucv_sock_destruct(struct sock *sk)
407 skb_queue_purge(&sk->sk_receive_queue);
408 skb_queue_purge(&sk->sk_write_queue);
412 static void iucv_sock_cleanup_listen(struct sock *parent)
416 /* Close non-accepted connections */
417 while ((sk = iucv_accept_dequeue(parent, NULL))) {
422 parent->sk_state = IUCV_CLOSED;
425 /* Kill socket (only if zapped and orphaned) */
426 static void iucv_sock_kill(struct sock *sk)
428 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
431 iucv_sock_unlink(&iucv_sk_list, sk);
432 sock_set_flag(sk, SOCK_DEAD);
436 /* Terminate an IUCV path */
437 static void iucv_sever_path(struct sock *sk, int with_user_data)
439 unsigned char user_data[16];
440 struct iucv_sock *iucv = iucv_sk(sk);
441 struct iucv_path *path = iucv->path;
445 if (with_user_data) {
446 low_nmcpy(user_data, iucv->src_name);
447 high_nmcpy(user_data, iucv->dst_name);
448 ASCEBC(user_data, sizeof(user_data));
449 pr_iucv->path_sever(path, user_data);
451 pr_iucv->path_sever(path, NULL);
452 iucv_path_free(path);
456 /* Send FIN through an IUCV socket for HIPER transport */
457 static int iucv_send_ctrl(struct sock *sk, u8 flags)
463 blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
464 skb = sock_alloc_send_skb(sk, blen, 1, &err);
466 skb_reserve(skb, blen);
467 err = afiucv_hs_send(NULL, sk, skb, flags);
472 /* Close an IUCV socket */
473 static void iucv_sock_close(struct sock *sk)
475 struct iucv_sock *iucv = iucv_sk(sk);
481 switch (sk->sk_state) {
483 iucv_sock_cleanup_listen(sk);
487 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
488 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
489 sk->sk_state = IUCV_DISCONN;
490 sk->sk_state_change(sk);
492 case IUCV_DISCONN: /* fall through */
493 sk->sk_state = IUCV_CLOSING;
494 sk->sk_state_change(sk);
496 if (!err && !skb_queue_empty(&iucv->send_skb_q)) {
497 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
498 timeo = sk->sk_lingertime;
500 timeo = IUCV_DISCONN_TIMEOUT;
502 iucv_sock_in_state(sk, IUCV_CLOSED, 0),
506 case IUCV_CLOSING: /* fall through */
507 sk->sk_state = IUCV_CLOSED;
508 sk->sk_state_change(sk);
510 sk->sk_err = ECONNRESET;
511 sk->sk_state_change(sk);
513 skb_queue_purge(&iucv->send_skb_q);
514 skb_queue_purge(&iucv->backlog_skb_q);
516 default: /* fall through */
517 iucv_sever_path(sk, 1);
521 dev_put(iucv->hs_dev);
523 sk->sk_bound_dev_if = 0;
526 /* mark socket for deletion by iucv_sock_kill() */
527 sock_set_flag(sk, SOCK_ZAPPED);
532 static void iucv_sock_init(struct sock *sk, struct sock *parent)
535 sk->sk_type = parent->sk_type;
538 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
541 struct iucv_sock *iucv;
543 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
548 sock_init_data(sock, sk);
549 INIT_LIST_HEAD(&iucv->accept_q);
550 spin_lock_init(&iucv->accept_q_lock);
551 skb_queue_head_init(&iucv->send_skb_q);
552 INIT_LIST_HEAD(&iucv->message_q.list);
553 spin_lock_init(&iucv->message_q.lock);
554 skb_queue_head_init(&iucv->backlog_skb_q);
556 atomic_set(&iucv->pendings, 0);
559 atomic_set(&iucv->msg_sent, 0);
560 atomic_set(&iucv->msg_recv, 0);
562 iucv->sk_txnotify = afiucv_hs_callback_txnotify;
563 memset(&iucv->src_user_id , 0, 32);
565 iucv->transport = AF_IUCV_TRANS_IUCV;
567 iucv->transport = AF_IUCV_TRANS_HIPER;
569 sk->sk_destruct = iucv_sock_destruct;
570 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
571 sk->sk_allocation = GFP_DMA;
573 sock_reset_flag(sk, SOCK_ZAPPED);
575 sk->sk_protocol = proto;
576 sk->sk_state = IUCV_OPEN;
578 iucv_sock_link(&iucv_sk_list, sk);
582 /* Create an IUCV socket */
583 static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
588 if (protocol && protocol != PF_IUCV)
589 return -EPROTONOSUPPORT;
591 sock->state = SS_UNCONNECTED;
593 switch (sock->type) {
595 sock->ops = &iucv_sock_ops;
598 /* currently, proto ops can handle both sk types */
599 sock->ops = &iucv_sock_ops;
602 return -ESOCKTNOSUPPORT;
605 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
609 iucv_sock_init(sk, NULL);
614 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
616 write_lock_bh(&l->lock);
617 sk_add_node(sk, &l->head);
618 write_unlock_bh(&l->lock);
621 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
623 write_lock_bh(&l->lock);
624 sk_del_node_init(sk);
625 write_unlock_bh(&l->lock);
628 void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
631 struct iucv_sock *par = iucv_sk(parent);
634 spin_lock_irqsave(&par->accept_q_lock, flags);
635 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
636 spin_unlock_irqrestore(&par->accept_q_lock, flags);
637 iucv_sk(sk)->parent = parent;
638 sk_acceptq_added(parent);
641 void iucv_accept_unlink(struct sock *sk)
644 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
646 spin_lock_irqsave(&par->accept_q_lock, flags);
647 list_del_init(&iucv_sk(sk)->accept_q);
648 spin_unlock_irqrestore(&par->accept_q_lock, flags);
649 sk_acceptq_removed(iucv_sk(sk)->parent);
650 iucv_sk(sk)->parent = NULL;
654 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
656 struct iucv_sock *isk, *n;
659 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
660 sk = (struct sock *) isk;
663 if (sk->sk_state == IUCV_CLOSED) {
664 iucv_accept_unlink(sk);
669 if (sk->sk_state == IUCV_CONNECTED ||
670 sk->sk_state == IUCV_DISCONN ||
672 iucv_accept_unlink(sk);
674 sock_graft(sk, newsock);
685 /* Bind an unbound socket */
686 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
689 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
690 struct sock *sk = sock->sk;
691 struct iucv_sock *iucv;
693 struct net_device *dev;
696 /* Verify the input sockaddr */
697 if (!addr || addr->sa_family != AF_IUCV)
701 if (sk->sk_state != IUCV_OPEN) {
706 write_lock_bh(&iucv_sk_list.lock);
709 if (__iucv_get_sock_by_name(sa->siucv_name)) {
716 /* Bind the socket */
718 if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
719 goto vm_bind; /* VM IUCV transport */
721 /* try hiper transport */
722 memcpy(uid, sa->siucv_user_id, sizeof(uid));
725 for_each_netdev_rcu(&init_net, dev) {
726 if (!memcmp(dev->perm_addr, uid, 8)) {
727 memcpy(iucv->src_name, sa->siucv_name, 8);
728 memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
729 sk->sk_bound_dev_if = dev->ifindex;
732 sk->sk_state = IUCV_BOUND;
733 iucv->transport = AF_IUCV_TRANS_HIPER;
735 iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT;
743 /* use local userid for backward compat */
744 memcpy(iucv->src_name, sa->siucv_name, 8);
745 memcpy(iucv->src_user_id, iucv_userid, 8);
746 sk->sk_state = IUCV_BOUND;
747 iucv->transport = AF_IUCV_TRANS_IUCV;
749 iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
752 /* found no dev to bind */
755 /* Release the socket list lock */
756 write_unlock_bh(&iucv_sk_list.lock);
762 /* Automatically bind an unbound socket */
763 static int iucv_sock_autobind(struct sock *sk)
765 struct iucv_sock *iucv = iucv_sk(sk);
769 if (unlikely(!pr_iucv))
772 memcpy(iucv->src_user_id, iucv_userid, 8);
774 write_lock_bh(&iucv_sk_list.lock);
776 sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
777 while (__iucv_get_sock_by_name(name)) {
778 sprintf(name, "%08x",
779 atomic_inc_return(&iucv_sk_list.autobind_name));
782 write_unlock_bh(&iucv_sk_list.lock);
784 memcpy(&iucv->src_name, name, 8);
787 iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
792 static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
794 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
795 struct sock *sk = sock->sk;
796 struct iucv_sock *iucv = iucv_sk(sk);
797 unsigned char user_data[16];
800 high_nmcpy(user_data, sa->siucv_name);
801 low_nmcpy(user_data, iucv->src_name);
802 ASCEBC(user_data, sizeof(user_data));
805 iucv->path = iucv_path_alloc(iucv->msglimit,
806 IUCV_IPRMDATA, GFP_KERNEL);
811 err = pr_iucv->path_connect(iucv->path, &af_iucv_handler,
812 sa->siucv_user_id, NULL, user_data,
815 iucv_path_free(iucv->path);
818 case 0x0b: /* Target communicator is not logged on */
821 case 0x0d: /* Max connections for this guest exceeded */
822 case 0x0e: /* Max connections for target guest exceeded */
825 case 0x0f: /* Missing IUCV authorization */
837 /* Connect an unconnected socket */
838 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
841 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
842 struct sock *sk = sock->sk;
843 struct iucv_sock *iucv = iucv_sk(sk);
846 if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
849 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
852 if (sk->sk_state == IUCV_OPEN &&
853 iucv->transport == AF_IUCV_TRANS_HIPER)
854 return -EBADFD; /* explicit bind required */
856 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
859 if (sk->sk_state == IUCV_OPEN) {
860 err = iucv_sock_autobind(sk);
867 /* Set the destination information */
868 memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
869 memcpy(iucv->dst_name, sa->siucv_name, 8);
871 if (iucv->transport == AF_IUCV_TRANS_HIPER)
872 err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN);
874 err = afiucv_path_connect(sock, addr);
878 if (sk->sk_state != IUCV_CONNECTED)
879 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
881 sock_sndtimeo(sk, flags & O_NONBLOCK));
883 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
886 if (err && iucv->transport == AF_IUCV_TRANS_IUCV)
887 iucv_sever_path(sk, 0);
894 /* Move a socket into listening state. */
895 static int iucv_sock_listen(struct socket *sock, int backlog)
897 struct sock *sk = sock->sk;
903 if (sk->sk_state != IUCV_BOUND)
906 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
909 sk->sk_max_ack_backlog = backlog;
910 sk->sk_ack_backlog = 0;
911 sk->sk_state = IUCV_LISTEN;
919 /* Accept a pending connection */
920 static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
923 DECLARE_WAITQUEUE(wait, current);
924 struct sock *sk = sock->sk, *nsk;
928 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
930 if (sk->sk_state != IUCV_LISTEN) {
935 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
937 /* Wait for an incoming connection */
938 add_wait_queue_exclusive(sk_sleep(sk), &wait);
939 while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
940 set_current_state(TASK_INTERRUPTIBLE);
947 timeo = schedule_timeout(timeo);
948 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
950 if (sk->sk_state != IUCV_LISTEN) {
955 if (signal_pending(current)) {
956 err = sock_intr_errno(timeo);
961 set_current_state(TASK_RUNNING);
962 remove_wait_queue(sk_sleep(sk), &wait);
967 newsock->state = SS_CONNECTED;
974 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
977 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
978 struct sock *sk = sock->sk;
979 struct iucv_sock *iucv = iucv_sk(sk);
981 addr->sa_family = AF_IUCV;
982 *len = sizeof(struct sockaddr_iucv);
985 memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8);
986 memcpy(siucv->siucv_name, iucv->dst_name, 8);
988 memcpy(siucv->siucv_user_id, iucv->src_user_id, 8);
989 memcpy(siucv->siucv_name, iucv->src_name, 8);
991 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
992 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
993 memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
999 * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
1001 * @msg: Pointer to a struct iucv_message
1002 * @skb: The socket data to send, skb->len MUST BE <= 7
1004 * Send the socket data in the parameter list in the iucv message
1005 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
1006 * list and the socket data len at index 7 (last byte).
1007 * See also iucv_msg_length().
1009 * Returns the error code from the iucv_message_send() call.
1011 static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
1012 struct sk_buff *skb)
1016 memcpy(prmdata, (void *) skb->data, skb->len);
1017 prmdata[7] = 0xff - (u8) skb->len;
1018 return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0,
1019 (void *) prmdata, 8);
1022 static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
1023 struct msghdr *msg, size_t len)
1025 struct sock *sk = sock->sk;
1026 struct iucv_sock *iucv = iucv_sk(sk);
1027 struct sk_buff *skb;
1028 struct iucv_message txmsg;
1029 struct cmsghdr *cmsg;
1035 int noblock = msg->msg_flags & MSG_DONTWAIT;
1037 err = sock_error(sk);
1041 if (msg->msg_flags & MSG_OOB)
1044 /* SOCK_SEQPACKET: we do not support segmented records */
1045 if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
1050 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1055 /* Return if the socket is not in connected state */
1056 if (sk->sk_state != IUCV_CONNECTED) {
1061 /* initialize defaults */
1062 cmsg_done = 0; /* check for duplicate headers */
1065 /* iterate over control messages */
1066 for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
1067 cmsg = CMSG_NXTHDR(msg, cmsg)) {
1069 if (!CMSG_OK(msg, cmsg)) {
1074 if (cmsg->cmsg_level != SOL_IUCV)
1077 if (cmsg->cmsg_type & cmsg_done) {
1081 cmsg_done |= cmsg->cmsg_type;
1083 switch (cmsg->cmsg_type) {
1084 case SCM_IUCV_TRGCLS:
1085 if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
1090 /* set iucv message target class */
1091 memcpy(&txmsg.class,
1092 (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
1103 /* allocate one skb for each iucv message:
1104 * this is fine for SOCK_SEQPACKET (unless we want to support
1105 * segmented records using the MSG_EOR flag), but
1106 * for SOCK_STREAM we might want to improve it in future */
1107 if (iucv->transport == AF_IUCV_TRANS_HIPER)
1108 skb = sock_alloc_send_skb(sk,
1109 len + sizeof(struct af_iucv_trans_hdr) + ETH_HLEN,
1112 skb = sock_alloc_send_skb(sk, len, noblock, &err);
1117 if (iucv->transport == AF_IUCV_TRANS_HIPER)
1118 skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
1119 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
1124 /* wait if outstanding messages for iucv path has reached */
1125 timeo = sock_sndtimeo(sk, noblock);
1126 err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
1130 /* return -ECONNRESET if the socket is no longer connected */
1131 if (sk->sk_state != IUCV_CONNECTED) {
1136 /* increment and save iucv message tag for msg_completion cbk */
1137 txmsg.tag = iucv->send_tag++;
1138 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
1140 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1141 atomic_inc(&iucv->msg_sent);
1142 err = afiucv_hs_send(&txmsg, sk, skb, 0);
1144 atomic_dec(&iucv->msg_sent);
1149 skb_queue_tail(&iucv->send_skb_q, skb);
1151 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
1153 err = iucv_send_iprm(iucv->path, &txmsg, skb);
1155 /* on success: there is no message_complete callback
1156 * for an IPRMDATA msg; remove skb from send queue */
1158 skb_unlink(skb, &iucv->send_skb_q);
1162 /* this error should never happen since the
1163 * IUCV_IPRMDATA path flag is set... sever path */
1165 pr_iucv->path_sever(iucv->path, NULL);
1166 skb_unlink(skb, &iucv->send_skb_q);
1171 err = pr_iucv->message_send(iucv->path, &txmsg, 0, 0,
1172 (void *) skb->data, skb->len);
1176 memcpy(user_id, iucv->dst_user_id, 8);
1178 memcpy(appl_id, iucv->dst_name, 8);
1179 pr_err("Application %s on z/VM guest %s"
1180 " exceeds message limit\n",
1185 skb_unlink(skb, &iucv->send_skb_q);
1200 /* iucv_fragment_skb() - Fragment a single IUCV message into multiple skb's
1202 * Locking: must be called with message_q.lock held
1204 static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
1206 int dataleft, size, copied = 0;
1207 struct sk_buff *nskb;
1211 if (dataleft >= sk->sk_rcvbuf / 4)
1212 size = sk->sk_rcvbuf / 4;
1216 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
1220 /* copy target class to control buffer of new skb */
1221 memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN);
1223 /* copy data fragment */
1224 memcpy(nskb->data, skb->data + copied, size);
1228 skb_reset_transport_header(nskb);
1229 skb_reset_network_header(nskb);
1232 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb);
1238 /* iucv_process_message() - Receive a single outstanding IUCV message
1240 * Locking: must be called with message_q.lock held
1242 static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1243 struct iucv_path *path,
1244 struct iucv_message *msg)
1249 len = iucv_msg_length(msg);
1251 /* store msg target class in the second 4 bytes of skb ctrl buffer */
1252 /* Note: the first 4 bytes are reserved for msg tag */
1253 memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN);
1255 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1256 if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
1257 if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
1262 rc = pr_iucv->message_receive(path, msg,
1263 msg->flags & IUCV_IPRMDATA,
1264 skb->data, len, NULL);
1269 /* we need to fragment iucv messages for SOCK_STREAM only;
1270 * for SOCK_SEQPACKET, it is only relevant if we support
1271 * record segmentation using MSG_EOR (see also recvmsg()) */
1272 if (sk->sk_type == SOCK_STREAM &&
1273 skb->truesize >= sk->sk_rcvbuf / 4) {
1274 rc = iucv_fragment_skb(sk, skb, len);
1278 pr_iucv->path_sever(path, NULL);
1281 skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
1283 skb_reset_transport_header(skb);
1284 skb_reset_network_header(skb);
1289 if (sock_queue_rcv_skb(sk, skb))
1290 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
1293 /* iucv_process_message_q() - Process outstanding IUCV messages
1295 * Locking: must be called with message_q.lock held
1297 static void iucv_process_message_q(struct sock *sk)
1299 struct iucv_sock *iucv = iucv_sk(sk);
1300 struct sk_buff *skb;
1301 struct sock_msg_q *p, *n;
1303 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
1304 skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA);
1307 iucv_process_message(sk, skb, p->path, &p->msg);
1310 if (!skb_queue_empty(&iucv->backlog_skb_q))
1315 static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1316 struct msghdr *msg, size_t len, int flags)
1318 int noblock = flags & MSG_DONTWAIT;
1319 struct sock *sk = sock->sk;
1320 struct iucv_sock *iucv = iucv_sk(sk);
1321 unsigned int copied, rlen;
1322 struct sk_buff *skb, *rskb, *cskb;
1325 if ((sk->sk_state == IUCV_DISCONN) &&
1326 skb_queue_empty(&iucv->backlog_skb_q) &&
1327 skb_queue_empty(&sk->sk_receive_queue) &&
1328 list_empty(&iucv->message_q.list))
1331 if (flags & (MSG_OOB))
1334 /* receive/dequeue next skb:
1335 * the function understands MSG_PEEK and, thus, does not dequeue skb */
1336 skb = skb_recv_datagram(sk, flags, noblock, &err);
1338 if (sk->sk_shutdown & RCV_SHUTDOWN)
1343 rlen = skb->len; /* real length of skb */
1344 copied = min_t(unsigned int, rlen, len);
1347 if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) {
1348 if (!(flags & MSG_PEEK))
1349 skb_queue_head(&sk->sk_receive_queue, skb);
1353 /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1354 if (sk->sk_type == SOCK_SEQPACKET) {
1356 msg->msg_flags |= MSG_TRUNC;
1357 /* each iucv message contains a complete record */
1358 msg->msg_flags |= MSG_EOR;
1361 /* create control message to store iucv msg target class:
1362 * get the trgcls from the control buffer of the skb due to
1363 * fragmentation of original iucv message. */
1364 err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
1365 CB_TRGCLS_LEN, CB_TRGCLS(skb));
1367 if (!(flags & MSG_PEEK))
1368 skb_queue_head(&sk->sk_receive_queue, skb);
1372 /* Mark read part of skb as used */
1373 if (!(flags & MSG_PEEK)) {
1375 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
1376 if (sk->sk_type == SOCK_STREAM) {
1377 skb_pull(skb, copied);
1379 skb_queue_head(&sk->sk_receive_queue, skb);
1385 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1386 atomic_inc(&iucv->msg_recv);
1387 if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
1389 iucv_sock_close(sk);
1394 /* Queue backlog skbs */
1395 spin_lock_bh(&iucv->message_q.lock);
1396 rskb = skb_dequeue(&iucv->backlog_skb_q);
1398 if (sock_queue_rcv_skb(sk, rskb)) {
1399 skb_queue_head(&iucv->backlog_skb_q,
1403 rskb = skb_dequeue(&iucv->backlog_skb_q);
1406 if (skb_queue_empty(&iucv->backlog_skb_q)) {
1407 if (!list_empty(&iucv->message_q.list))
1408 iucv_process_message_q(sk);
1409 if (atomic_read(&iucv->msg_recv) >=
1410 iucv->msglimit / 2) {
1411 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN);
1413 sk->sk_state = IUCV_DISCONN;
1414 sk->sk_state_change(sk);
1418 spin_unlock_bh(&iucv->message_q.lock);
1422 /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1423 if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
1429 static inline unsigned int iucv_accept_poll(struct sock *parent)
1431 struct iucv_sock *isk, *n;
1434 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
1435 sk = (struct sock *) isk;
1437 if (sk->sk_state == IUCV_CONNECTED)
1438 return POLLIN | POLLRDNORM;
1444 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
1447 struct sock *sk = sock->sk;
1448 unsigned int mask = 0;
1450 sock_poll_wait(file, sk_sleep(sk), wait);
1452 if (sk->sk_state == IUCV_LISTEN)
1453 return iucv_accept_poll(sk);
1455 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
1458 if (sk->sk_shutdown & RCV_SHUTDOWN)
1461 if (sk->sk_shutdown == SHUTDOWN_MASK)
1464 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1465 (sk->sk_shutdown & RCV_SHUTDOWN))
1466 mask |= POLLIN | POLLRDNORM;
1468 if (sk->sk_state == IUCV_CLOSED)
1471 if (sk->sk_state == IUCV_DISCONN)
1474 if (sock_writeable(sk) && iucv_below_msglim(sk))
1475 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1477 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1482 static int iucv_sock_shutdown(struct socket *sock, int how)
1484 struct sock *sk = sock->sk;
1485 struct iucv_sock *iucv = iucv_sk(sk);
1486 struct iucv_message txmsg;
1491 if ((how & ~SHUTDOWN_MASK) || !how)
1495 switch (sk->sk_state) {
1503 sk->sk_shutdown |= how;
1507 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
1510 err = pr_iucv->message_send(iucv->path, &txmsg, IUCV_IPRMDATA,
1511 0, (void *) iprm_shutdown, 8);
1527 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1528 err = pr_iucv->path_quiesce(iucv->path, NULL);
1532 skb_queue_purge(&sk->sk_receive_queue);
1535 /* Wake up anyone sleeping in poll */
1536 sk->sk_state_change(sk);
1543 static int iucv_sock_release(struct socket *sock)
1545 struct sock *sk = sock->sk;
1551 iucv_sock_close(sk);
1558 /* getsockopt and setsockopt */
1559 static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
1560 char __user *optval, unsigned int optlen)
1562 struct sock *sk = sock->sk;
1563 struct iucv_sock *iucv = iucv_sk(sk);
1567 if (level != SOL_IUCV)
1568 return -ENOPROTOOPT;
1570 if (optlen < sizeof(int))
1573 if (get_user(val, (int __user *) optval))
1580 case SO_IPRMDATA_MSG:
1582 iucv->flags |= IUCV_IPRMDATA;
1584 iucv->flags &= ~IUCV_IPRMDATA;
1587 switch (sk->sk_state) {
1590 if (val < 1 || val > (u16)(~0))
1593 iucv->msglimit = val;
1609 static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1610 char __user *optval, int __user *optlen)
1612 struct sock *sk = sock->sk;
1613 struct iucv_sock *iucv = iucv_sk(sk);
1617 if (level != SOL_IUCV)
1618 return -ENOPROTOOPT;
1620 if (get_user(len, optlen))
1626 len = min_t(unsigned int, len, sizeof(int));
1629 case SO_IPRMDATA_MSG:
1630 val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
1634 val = (iucv->path != NULL) ? iucv->path->msglim /* connected */
1635 : iucv->msglimit; /* default */
1639 if (sk->sk_state == IUCV_OPEN)
1641 val = (iucv->hs_dev) ? iucv->hs_dev->mtu -
1642 sizeof(struct af_iucv_trans_hdr) - ETH_HLEN :
1646 return -ENOPROTOOPT;
1649 if (put_user(len, optlen))
1651 if (copy_to_user(optval, &val, len))
1658 /* Callback wrappers - called from iucv base support */
1659 static int iucv_callback_connreq(struct iucv_path *path,
1660 u8 ipvmid[8], u8 ipuser[16])
1662 unsigned char user_data[16];
1663 unsigned char nuser_data[16];
1664 unsigned char src_name[8];
1665 struct hlist_node *node;
1666 struct sock *sk, *nsk;
1667 struct iucv_sock *iucv, *niucv;
1670 memcpy(src_name, ipuser, 8);
1671 EBCASC(src_name, 8);
1672 /* Find out if this path belongs to af_iucv. */
1673 read_lock(&iucv_sk_list.lock);
1676 sk_for_each(sk, node, &iucv_sk_list.head)
1677 if (sk->sk_state == IUCV_LISTEN &&
1678 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1680 * Found a listening socket with
1681 * src_name == ipuser[0-7].
1686 read_unlock(&iucv_sk_list.lock);
1688 /* No socket found, not one of our paths. */
1693 /* Check if parent socket is listening */
1694 low_nmcpy(user_data, iucv->src_name);
1695 high_nmcpy(user_data, iucv->dst_name);
1696 ASCEBC(user_data, sizeof(user_data));
1697 if (sk->sk_state != IUCV_LISTEN) {
1698 err = pr_iucv->path_sever(path, user_data);
1699 iucv_path_free(path);
1703 /* Check for backlog size */
1704 if (sk_acceptq_is_full(sk)) {
1705 err = pr_iucv->path_sever(path, user_data);
1706 iucv_path_free(path);
1710 /* Create the new socket */
1711 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1713 err = pr_iucv->path_sever(path, user_data);
1714 iucv_path_free(path);
1718 niucv = iucv_sk(nsk);
1719 iucv_sock_init(nsk, sk);
1721 /* Set the new iucv_sock */
1722 memcpy(niucv->dst_name, ipuser + 8, 8);
1723 EBCASC(niucv->dst_name, 8);
1724 memcpy(niucv->dst_user_id, ipvmid, 8);
1725 memcpy(niucv->src_name, iucv->src_name, 8);
1726 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1729 /* Call iucv_accept */
1730 high_nmcpy(nuser_data, ipuser + 8);
1731 memcpy(nuser_data + 8, niucv->src_name, 8);
1732 ASCEBC(nuser_data + 8, 8);
1734 /* set message limit for path based on msglimit of accepting socket */
1735 niucv->msglimit = iucv->msglimit;
1736 path->msglim = iucv->msglimit;
1737 err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
1739 iucv_sever_path(nsk, 1);
1740 iucv_sock_kill(nsk);
1744 iucv_accept_enqueue(sk, nsk);
1746 /* Wake up accept */
1747 nsk->sk_state = IUCV_CONNECTED;
1748 sk->sk_data_ready(sk, 1);
1755 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1757 struct sock *sk = path->private;
1759 sk->sk_state = IUCV_CONNECTED;
1760 sk->sk_state_change(sk);
1763 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1765 struct sock *sk = path->private;
1766 struct iucv_sock *iucv = iucv_sk(sk);
1767 struct sk_buff *skb;
1768 struct sock_msg_q *save_msg;
1771 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1772 pr_iucv->message_reject(path, msg);
1776 spin_lock(&iucv->message_q.lock);
1778 if (!list_empty(&iucv->message_q.list) ||
1779 !skb_queue_empty(&iucv->backlog_skb_q))
1782 len = atomic_read(&sk->sk_rmem_alloc);
1783 len += SKB_TRUESIZE(iucv_msg_length(msg));
1784 if (len > sk->sk_rcvbuf)
1787 skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA);
1791 iucv_process_message(sk, skb, path, msg);
1795 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1798 save_msg->path = path;
1799 save_msg->msg = *msg;
1801 list_add_tail(&save_msg->list, &iucv->message_q.list);
1804 spin_unlock(&iucv->message_q.lock);
1807 static void iucv_callback_txdone(struct iucv_path *path,
1808 struct iucv_message *msg)
1810 struct sock *sk = path->private;
1811 struct sk_buff *this = NULL;
1812 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1813 struct sk_buff *list_skb = list->next;
1814 unsigned long flags;
1817 if (!skb_queue_empty(list)) {
1818 spin_lock_irqsave(&list->lock, flags);
1820 while (list_skb != (struct sk_buff *)list) {
1821 if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) {
1825 list_skb = list_skb->next;
1828 __skb_unlink(this, list);
1830 spin_unlock_irqrestore(&list->lock, flags);
1834 /* wake up any process waiting for sending */
1835 iucv_sock_wake_msglim(sk);
1839 if (sk->sk_state == IUCV_CLOSING) {
1840 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1841 sk->sk_state = IUCV_CLOSED;
1842 sk->sk_state_change(sk);
1849 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1851 struct sock *sk = path->private;
1853 if (sk->sk_state == IUCV_CLOSED)
1857 iucv_sever_path(sk, 1);
1858 sk->sk_state = IUCV_DISCONN;
1860 sk->sk_state_change(sk);
1864 /* called if the other communication side shuts down its RECV direction;
1865 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1867 static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1869 struct sock *sk = path->private;
1872 if (sk->sk_state != IUCV_CLOSED) {
1873 sk->sk_shutdown |= SEND_SHUTDOWN;
1874 sk->sk_state_change(sk);
1879 /***************** HiperSockets transport callbacks ********************/
1880 static void afiucv_swap_src_dest(struct sk_buff *skb)
1882 struct af_iucv_trans_hdr *trans_hdr =
1883 (struct af_iucv_trans_hdr *)skb->data;
1887 ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
1888 ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
1889 ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
1890 ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
1891 memcpy(tmpID, trans_hdr->srcUserID, 8);
1892 memcpy(tmpName, trans_hdr->srcAppName, 8);
1893 memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8);
1894 memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8);
1895 memcpy(trans_hdr->destUserID, tmpID, 8);
1896 memcpy(trans_hdr->destAppName, tmpName, 8);
1897 skb_push(skb, ETH_HLEN);
1898 memset(skb->data, 0, ETH_HLEN);
1902 * afiucv_hs_callback_syn - react on received SYN
1904 static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
1907 struct iucv_sock *iucv, *niucv;
1908 struct af_iucv_trans_hdr *trans_hdr;
1912 trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
1914 /* no sock - connection refused */
1915 afiucv_swap_src_dest(skb);
1916 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1917 err = dev_queue_xmit(skb);
1921 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1923 if ((sk->sk_state != IUCV_LISTEN) ||
1924 sk_acceptq_is_full(sk) ||
1926 /* error on server socket - connection refused */
1929 afiucv_swap_src_dest(skb);
1930 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1931 err = dev_queue_xmit(skb);
1936 niucv = iucv_sk(nsk);
1937 iucv_sock_init(nsk, sk);
1938 niucv->transport = AF_IUCV_TRANS_HIPER;
1939 niucv->msglimit = iucv->msglimit;
1940 if (!trans_hdr->window)
1941 niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT;
1943 niucv->msglimit_peer = trans_hdr->window;
1944 memcpy(niucv->dst_name, trans_hdr->srcAppName, 8);
1945 memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8);
1946 memcpy(niucv->src_name, iucv->src_name, 8);
1947 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1948 nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
1949 niucv->hs_dev = iucv->hs_dev;
1950 dev_hold(niucv->hs_dev);
1951 afiucv_swap_src_dest(skb);
1952 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
1953 trans_hdr->window = niucv->msglimit;
1954 /* if receiver acks the xmit connection is established */
1955 err = dev_queue_xmit(skb);
1957 iucv_accept_enqueue(sk, nsk);
1958 nsk->sk_state = IUCV_CONNECTED;
1959 sk->sk_data_ready(sk, 1);
1961 iucv_sock_kill(nsk);
1965 return NET_RX_SUCCESS;
1969 * afiucv_hs_callback_synack() - react on received SYN-ACK
1971 static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
1973 struct iucv_sock *iucv = iucv_sk(sk);
1974 struct af_iucv_trans_hdr *trans_hdr =
1975 (struct af_iucv_trans_hdr *)skb->data;
1979 if (sk->sk_state != IUCV_BOUND)
1982 iucv->msglimit_peer = trans_hdr->window;
1983 sk->sk_state = IUCV_CONNECTED;
1984 sk->sk_state_change(sk);
1988 return NET_RX_SUCCESS;
1992 * afiucv_hs_callback_synfin() - react on received SYN_FIN
1994 static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
1996 struct iucv_sock *iucv = iucv_sk(sk);
2000 if (sk->sk_state != IUCV_BOUND)
2003 sk->sk_state = IUCV_DISCONN;
2004 sk->sk_state_change(sk);
2008 return NET_RX_SUCCESS;
2012 * afiucv_hs_callback_fin() - react on received FIN
2014 static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
2016 struct iucv_sock *iucv = iucv_sk(sk);
2018 /* other end of connection closed */
2022 if (sk->sk_state == IUCV_CONNECTED) {
2023 sk->sk_state = IUCV_DISCONN;
2024 sk->sk_state_change(sk);
2029 return NET_RX_SUCCESS;
2033 * afiucv_hs_callback_win() - react on received WIN
2035 static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
2037 struct iucv_sock *iucv = iucv_sk(sk);
2038 struct af_iucv_trans_hdr *trans_hdr =
2039 (struct af_iucv_trans_hdr *)skb->data;
2042 return NET_RX_SUCCESS;
2044 if (sk->sk_state != IUCV_CONNECTED)
2045 return NET_RX_SUCCESS;
2047 atomic_sub(trans_hdr->window, &iucv->msg_sent);
2048 iucv_sock_wake_msglim(sk);
2049 return NET_RX_SUCCESS;
2053 * afiucv_hs_callback_rx() - react on received data
2055 static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
2057 struct iucv_sock *iucv = iucv_sk(sk);
2061 return NET_RX_SUCCESS;
2064 if (sk->sk_state != IUCV_CONNECTED) {
2066 return NET_RX_SUCCESS;
2069 /* write stuff from iucv_msg to skb cb */
2070 if (skb->len <= sizeof(struct af_iucv_trans_hdr)) {
2072 return NET_RX_SUCCESS;
2074 skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
2075 skb_reset_transport_header(skb);
2076 skb_reset_network_header(skb);
2077 spin_lock(&iucv->message_q.lock);
2078 if (skb_queue_empty(&iucv->backlog_skb_q)) {
2079 if (sock_queue_rcv_skb(sk, skb)) {
2080 /* handle rcv queue full */
2081 skb_queue_tail(&iucv->backlog_skb_q, skb);
2084 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
2085 spin_unlock(&iucv->message_q.lock);
2086 return NET_RX_SUCCESS;
2090 * afiucv_hs_rcv() - base function for arriving data through HiperSockets
2092 * called from netif RX softirq
2094 static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2095 struct packet_type *pt, struct net_device *orig_dev)
2097 struct hlist_node *node;
2099 struct iucv_sock *iucv;
2100 struct af_iucv_trans_hdr *trans_hdr;
2104 skb_pull(skb, ETH_HLEN);
2105 trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
2106 EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
2107 EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
2108 EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
2109 EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
2110 memset(nullstring, 0, sizeof(nullstring));
2113 read_lock(&iucv_sk_list.lock);
2114 sk_for_each(sk, node, &iucv_sk_list.head) {
2115 if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
2116 if ((!memcmp(&iucv_sk(sk)->src_name,
2117 trans_hdr->destAppName, 8)) &&
2118 (!memcmp(&iucv_sk(sk)->src_user_id,
2119 trans_hdr->destUserID, 8)) &&
2120 (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
2121 (!memcmp(&iucv_sk(sk)->dst_user_id,
2127 if ((!memcmp(&iucv_sk(sk)->src_name,
2128 trans_hdr->destAppName, 8)) &&
2129 (!memcmp(&iucv_sk(sk)->src_user_id,
2130 trans_hdr->destUserID, 8)) &&
2131 (!memcmp(&iucv_sk(sk)->dst_name,
2132 trans_hdr->srcAppName, 8)) &&
2133 (!memcmp(&iucv_sk(sk)->dst_user_id,
2134 trans_hdr->srcUserID, 8))) {
2140 read_unlock(&iucv_sk_list.lock);
2145 how should we send with no sock
2146 1) send without sock no send rc checking?
2147 2) introduce default sock to handle this cases
2149 SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
2151 SYN|ACK, SYN|FIN, FIN -> no action? */
2153 switch (trans_hdr->flags) {
2154 case AF_IUCV_FLAG_SYN:
2155 /* connect request */
2156 err = afiucv_hs_callback_syn(sk, skb);
2158 case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK):
2159 /* connect request confirmed */
2160 err = afiucv_hs_callback_synack(sk, skb);
2162 case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN):
2163 /* connect request refused */
2164 err = afiucv_hs_callback_synfin(sk, skb);
2166 case (AF_IUCV_FLAG_FIN):
2168 err = afiucv_hs_callback_fin(sk, skb);
2170 case (AF_IUCV_FLAG_WIN):
2171 err = afiucv_hs_callback_win(sk, skb);
2172 if (skb->len == sizeof(struct af_iucv_trans_hdr)) {
2178 /* plain data frame */
2179 memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class,
2181 err = afiucv_hs_callback_rx(sk, skb);
2191 * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
2194 static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
2195 enum iucv_tx_notify n)
2197 struct sock *isk = skb->sk;
2198 struct sock *sk = NULL;
2199 struct iucv_sock *iucv = NULL;
2200 struct sk_buff_head *list;
2201 struct sk_buff *list_skb;
2202 struct sk_buff *nskb;
2203 unsigned long flags;
2204 struct hlist_node *node;
2206 read_lock_irqsave(&iucv_sk_list.lock, flags);
2207 sk_for_each(sk, node, &iucv_sk_list.head)
2212 read_unlock_irqrestore(&iucv_sk_list.lock, flags);
2214 if (!iucv || sock_flag(sk, SOCK_ZAPPED))
2217 list = &iucv->send_skb_q;
2218 spin_lock_irqsave(&list->lock, flags);
2219 if (skb_queue_empty(list))
2221 list_skb = list->next;
2222 nskb = list_skb->next;
2223 while (list_skb != (struct sk_buff *)list) {
2224 if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
2227 __skb_unlink(list_skb, list);
2228 kfree_skb(list_skb);
2229 iucv_sock_wake_msglim(sk);
2231 case TX_NOTIFY_PENDING:
2232 atomic_inc(&iucv->pendings);
2234 case TX_NOTIFY_DELAYED_OK:
2235 __skb_unlink(list_skb, list);
2236 atomic_dec(&iucv->pendings);
2237 if (atomic_read(&iucv->pendings) <= 0)
2238 iucv_sock_wake_msglim(sk);
2239 kfree_skb(list_skb);
2241 case TX_NOTIFY_UNREACHABLE:
2242 case TX_NOTIFY_DELAYED_UNREACHABLE:
2243 case TX_NOTIFY_TPQFULL: /* not yet used */
2244 case TX_NOTIFY_GENERALERROR:
2245 case TX_NOTIFY_DELAYED_GENERALERROR:
2246 __skb_unlink(list_skb, list);
2247 kfree_skb(list_skb);
2248 if (sk->sk_state == IUCV_CONNECTED) {
2249 sk->sk_state = IUCV_DISCONN;
2250 sk->sk_state_change(sk);
2260 spin_unlock_irqrestore(&list->lock, flags);
2262 if (sk->sk_state == IUCV_CLOSING) {
2263 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
2264 sk->sk_state = IUCV_CLOSED;
2265 sk->sk_state_change(sk);
2272 * afiucv_netdev_event: handle netdev notifier chain events
2274 static int afiucv_netdev_event(struct notifier_block *this,
2275 unsigned long event, void *ptr)
2277 struct net_device *event_dev = (struct net_device *)ptr;
2278 struct hlist_node *node;
2280 struct iucv_sock *iucv;
2284 case NETDEV_GOING_DOWN:
2285 sk_for_each(sk, node, &iucv_sk_list.head) {
2287 if ((iucv->hs_dev == event_dev) &&
2288 (sk->sk_state == IUCV_CONNECTED)) {
2289 if (event == NETDEV_GOING_DOWN)
2290 iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
2291 sk->sk_state = IUCV_DISCONN;
2292 sk->sk_state_change(sk);
2297 case NETDEV_UNREGISTER:
2304 static struct notifier_block afiucv_netdev_notifier = {
2305 .notifier_call = afiucv_netdev_event,
2308 static const struct proto_ops iucv_sock_ops = {
2310 .owner = THIS_MODULE,
2311 .release = iucv_sock_release,
2312 .bind = iucv_sock_bind,
2313 .connect = iucv_sock_connect,
2314 .listen = iucv_sock_listen,
2315 .accept = iucv_sock_accept,
2316 .getname = iucv_sock_getname,
2317 .sendmsg = iucv_sock_sendmsg,
2318 .recvmsg = iucv_sock_recvmsg,
2319 .poll = iucv_sock_poll,
2320 .ioctl = sock_no_ioctl,
2321 .mmap = sock_no_mmap,
2322 .socketpair = sock_no_socketpair,
2323 .shutdown = iucv_sock_shutdown,
2324 .setsockopt = iucv_sock_setsockopt,
2325 .getsockopt = iucv_sock_getsockopt,
2328 static const struct net_proto_family iucv_sock_family_ops = {
2330 .owner = THIS_MODULE,
2331 .create = iucv_sock_create,
2334 static struct packet_type iucv_packet_type = {
2335 .type = cpu_to_be16(ETH_P_AF_IUCV),
2336 .func = afiucv_hs_rcv,
2339 static int afiucv_iucv_init(void)
2343 err = pr_iucv->iucv_register(&af_iucv_handler, 0);
2346 /* establish dummy device */
2347 af_iucv_driver.bus = pr_iucv->bus;
2348 err = driver_register(&af_iucv_driver);
2351 af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
2356 dev_set_name(af_iucv_dev, "af_iucv");
2357 af_iucv_dev->bus = pr_iucv->bus;
2358 af_iucv_dev->parent = pr_iucv->root;
2359 af_iucv_dev->release = (void (*)(struct device *))kfree;
2360 af_iucv_dev->driver = &af_iucv_driver;
2361 err = device_register(af_iucv_dev);
2367 driver_unregister(&af_iucv_driver);
2369 pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2374 static int __init afiucv_init(void)
2378 if (MACHINE_IS_VM) {
2379 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
2380 if (unlikely(err)) {
2382 err = -EPROTONOSUPPORT;
2386 pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv");
2388 printk(KERN_WARNING "iucv_if lookup failed\n");
2389 memset(&iucv_userid, 0, sizeof(iucv_userid));
2392 memset(&iucv_userid, 0, sizeof(iucv_userid));
2396 err = proto_register(&iucv_proto, 0);
2399 err = sock_register(&iucv_sock_family_ops);
2404 err = afiucv_iucv_init();
2408 register_netdevice_notifier(&afiucv_netdev_notifier);
2409 dev_add_pack(&iucv_packet_type);
2413 sock_unregister(PF_IUCV);
2415 proto_unregister(&iucv_proto);
2418 symbol_put(iucv_if);
2422 static void __exit afiucv_exit(void)
2425 device_unregister(af_iucv_dev);
2426 driver_unregister(&af_iucv_driver);
2427 pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2428 symbol_put(iucv_if);
2430 unregister_netdevice_notifier(&afiucv_netdev_notifier);
2431 dev_remove_pack(&iucv_packet_type);
2432 sock_unregister(PF_IUCV);
2433 proto_unregister(&iucv_proto);
2436 module_init(afiucv_init);
2437 module_exit(afiucv_exit);
2439 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
2440 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
2441 MODULE_VERSION(VERSION);
2442 MODULE_LICENSE("GPL");
2443 MODULE_ALIAS_NETPROTO(PF_IUCV);