staging: unisys: Clean up kthread usage
authorNeil Horman <nhorman@redhat.com>
Tue, 21 Jul 2015 13:55:39 +0000 (09:55 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 23 Jul 2015 04:19:17 +0000 (21:19 -0700)
Remove the has_stopped completion as theres already one available
internally.

Correct the while loops

Remove the while loop in drain_queue as it already exists in the top level
loop

Signed-off-by: Neil Horman <nhorman@redhat.com>
Signed-off-by: Benjamin Romer <benjamin.romer@unisys.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/staging/unisys/visornic/visornic_main.c

index a421a5b..923c580 100644 (file)
@@ -91,7 +91,6 @@ static struct visor_driver visornic_driver = {
 
 struct visor_thread_info {
        struct task_struct *task;
-       struct completion has_stopped;
        int id;
 };
 
@@ -317,7 +316,6 @@ static int visor_thread_start(struct visor_thread_info *thrinfo,
                              void *thrcontext, char *name)
 {
        /* used to stop the thread */
-       init_completion(&thrinfo->has_stopped);
        thrinfo->task = kthread_run(threadfn, thrcontext, name);
        if (IS_ERR(thrinfo->task)) {
                pr_debug("%s failed (%ld)\n",
@@ -341,10 +339,8 @@ static void visor_thread_stop(struct visor_thread_info *thrinfo)
        if (!thrinfo->id)
                return; /* thread not running */
 
-       kthread_stop(thrinfo->task);
-       /* give up if the thread has NOT died in 1 minute */
-       if (wait_for_completion_timeout(&thrinfo->has_stopped, 60 * HZ))
-               thrinfo->id = 0;
+       BUG_ON(kthread_stop(thrinfo->task));
+       thrinfo->id = 0;
 }
 
 static ssize_t enable_ints_write(struct file *file,
@@ -1691,99 +1687,95 @@ drain_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata)
        unsigned long flags;
        struct net_device *netdev;
 
-       /* drain queue */
-       while (1) {
-               /* TODO: CLIENT ACQUIRE -- Don't really need this at the
-                * moment */
-               if (!visorchannel_signalremove(devdata->dev->visorchannel,
-                                              IOCHAN_FROM_IOPART,
-                                              cmdrsp))
-                       break; /* queue empty */
-
-               switch (cmdrsp->net.type) {
-               case NET_RCV:
-                       devdata->chstat.got_rcv++;
-                       /* process incoming packet */
-                       visornic_rx(cmdrsp);
-                       break;
-               case NET_XMIT_DONE:
-                       spin_lock_irqsave(&devdata->priv_lock, flags);
-                       devdata->chstat.got_xmit_done++;
-                       if (cmdrsp->net.xmtdone.xmt_done_result)
-                               devdata->chstat.xmit_fail++;
-                       /* only call queue wake if we stopped it */
-                       netdev = ((struct sk_buff *)cmdrsp->net.buf)->dev;
-                       /* ASSERT netdev == vnicinfo->netdev; */
-                       if ((netdev == devdata->netdev) &&
-                           netif_queue_stopped(netdev)) {
-                               /* check to see if we have crossed
-                                * the lower watermark for
-                                * netif_wake_queue()
+       /* TODO: CLIENT ACQUIRE -- Don't really need this at the
+        * moment */
+       if (!visorchannel_signalremove(devdata->dev->visorchannel,
+                                      IOCHAN_FROM_IOPART,
+                                      cmdrsp))
+               return; /* queue empty */
+
+       switch (cmdrsp->net.type) {
+       case NET_RCV:
+               devdata->chstat.got_rcv++;
+               /* process incoming packet */
+               visornic_rx(cmdrsp);
+               break;
+       case NET_XMIT_DONE:
+               spin_lock_irqsave(&devdata->priv_lock, flags);
+               devdata->chstat.got_xmit_done++;
+               if (cmdrsp->net.xmtdone.xmt_done_result)
+                       devdata->chstat.xmit_fail++;
+               /* only call queue wake if we stopped it */
+               netdev = ((struct sk_buff *)cmdrsp->net.buf)->dev;
+               /* ASSERT netdev == vnicinfo->netdev; */
+               if ((netdev == devdata->netdev) &&
+                   netif_queue_stopped(netdev)) {
+                       /* check to see if we have crossed
+                        * the lower watermark for
+                        * netif_wake_queue()
+                        */
+                       if (((devdata->chstat.sent_xmit >=
+                           devdata->chstat.got_xmit_done) &&
+                           (devdata->chstat.sent_xmit -
+                           devdata->chstat.got_xmit_done <=
+                           devdata->lower_threshold_net_xmits)) ||
+                           ((devdata->chstat.sent_xmit <
+                           devdata->chstat.got_xmit_done) &&
+                           (ULONG_MAX - devdata->chstat.got_xmit_done
+                           + devdata->chstat.sent_xmit <=
+                           devdata->lower_threshold_net_xmits))) {
+                               /* enough NET_XMITs completed
+                                * so can restart netif queue
                                 */
-                               if (((devdata->chstat.sent_xmit >=
-                                   devdata->chstat.got_xmit_done) &&
-                                   (devdata->chstat.sent_xmit -
-                                   devdata->chstat.got_xmit_done <=
-                                   devdata->lower_threshold_net_xmits)) ||
-                                   ((devdata->chstat.sent_xmit <
-                                   devdata->chstat.got_xmit_done) &&
-                                   (ULONG_MAX - devdata->chstat.got_xmit_done
-                                   + devdata->chstat.sent_xmit <=
-                                   devdata->lower_threshold_net_xmits))) {
-                                       /* enough NET_XMITs completed
-                                        * so can restart netif queue
-                                        */
-                                       netif_wake_queue(netdev);
-                                       devdata->flow_control_lower_hits++;
-                               }
-                       }
-                       skb_unlink(cmdrsp->net.buf, &devdata->xmitbufhead);
-                       spin_unlock_irqrestore(&devdata->priv_lock, flags);
-                       kfree_skb(cmdrsp->net.buf);
-                       break;
-               case NET_RCV_ENBDIS_ACK:
-                       devdata->chstat.got_enbdisack++;
-                       netdev = (struct net_device *)
-                       cmdrsp->net.enbdis.context;
-                       spin_lock_irqsave(&devdata->priv_lock, flags);
-                       devdata->enab_dis_acked = 1;
-                       spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
-                       if (devdata->server_down &&
-                           devdata->server_change_state) {
-                               /* Inform Linux that the link is up */
-                               devdata->server_down = false;
-                               devdata->server_change_state = false;
                                netif_wake_queue(netdev);
-                               netif_carrier_on(netdev);
+                               devdata->flow_control_lower_hits++;
                        }
-                       break;
-               case NET_CONNECT_STATUS:
-                       netdev = devdata->netdev;
-                       if (cmdrsp->net.enbdis.enable == 1) {
-                               spin_lock_irqsave(&devdata->priv_lock, flags);
-                               devdata->enabled = cmdrsp->net.enbdis.enable;
-                               spin_unlock_irqrestore(&devdata->priv_lock,
-                                                      flags);
-                               netif_wake_queue(netdev);
-                               netif_carrier_on(netdev);
-                       } else {
-                               netif_stop_queue(netdev);
-                               netif_carrier_off(netdev);
-                               spin_lock_irqsave(&devdata->priv_lock, flags);
-                               devdata->enabled = cmdrsp->net.enbdis.enable;
-                               spin_unlock_irqrestore(&devdata->priv_lock,
-                                                      flags);
-                       }
-                       break;
-               default:
-                       break;
                }
-               /* cmdrsp is now available for reuse  */
+               skb_unlink(cmdrsp->net.buf, &devdata->xmitbufhead);
+               spin_unlock_irqrestore(&devdata->priv_lock, flags);
+               kfree_skb(cmdrsp->net.buf);
+               break;
+       case NET_RCV_ENBDIS_ACK:
+               devdata->chstat.got_enbdisack++;
+               netdev = (struct net_device *)
+               cmdrsp->net.enbdis.context;
+               spin_lock_irqsave(&devdata->priv_lock, flags);
+               devdata->enab_dis_acked = 1;
+               spin_unlock_irqrestore(&devdata->priv_lock, flags);
 
                if (kthread_should_stop())
                        break;
+               if (devdata->server_down &&
+                   devdata->server_change_state) {
+                       /* Inform Linux that the link is up */
+                       devdata->server_down = false;
+                       devdata->server_change_state = false;
+                       netif_wake_queue(netdev);
+                       netif_carrier_on(netdev);
+               }
+               break;
+       case NET_CONNECT_STATUS:
+               netdev = devdata->netdev;
+               if (cmdrsp->net.enbdis.enable == 1) {
+                       spin_lock_irqsave(&devdata->priv_lock, flags);
+                       devdata->enabled = cmdrsp->net.enbdis.enable;
+                       spin_unlock_irqrestore(&devdata->priv_lock,
+                                              flags);
+                       netif_wake_queue(netdev);
+                       netif_carrier_on(netdev);
+               } else {
+                       netif_stop_queue(netdev);
+                       netif_carrier_off(netdev);
+                       spin_lock_irqsave(&devdata->priv_lock, flags);
+                       devdata->enabled = cmdrsp->net.enbdis.enable;
+                       spin_unlock_irqrestore(&devdata->priv_lock,
+                                              flags);
+               }
+               break;
+       default:
+               break;
        }
+       /* cmdrsp is now available for reuse  */
 }
 
 /**
@@ -1803,9 +1795,9 @@ process_incoming_rsps(void *v)
 
        cmdrsp = kmalloc(SZ, GFP_ATOMIC);
        if (!cmdrsp)
-               complete_and_exit(&devdata->threadinfo.has_stopped, 0);
+               return 0;
 
-       while (1) {
+       while (!kthread_should_stop()) {
                wait_event_interruptible_timeout(
                        devdata->rsp_queue, (atomic_read(
                                             &devdata->interrupt_rcvd) == 1),
@@ -1818,12 +1810,10 @@ process_incoming_rsps(void *v)
                atomic_set(&devdata->interrupt_rcvd, 0);
                send_rcv_posts_if_needed(devdata);
                drain_queue(cmdrsp, devdata);
-               if (kthread_should_stop())
-                       break;
        }
 
        kfree(cmdrsp);
-       complete_and_exit(&devdata->threadinfo.has_stopped, 0);
+       return 0;
 }
 
 /**