X-Git-Url: http://git.cascardo.eti.br/?a=blobdiff_plain;f=drivers%2Fnet%2Fethernet%2Fibm%2Fibmvnic.c;h=bfe17d9c022df9ac8932c1cef9e4eac94cf23d77;hb=8bf371e6adff29758cc3c57c17df4486513081f8;hp=88f3c85fb04ada575e367042d79f4b8188e12245;hpb=3ef500a53f8c44ca82decf79dd8138fd07c7739d;p=cascardo%2Flinux.git diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 88f3c85fb04a..5f44c5520fbc 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -203,7 +203,8 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter, struct device *dev = &adapter->vdev->dev; dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); - send_request_unmap(adapter, ltb->map_id); + if (!adapter->failover) + send_request_unmap(adapter, ltb->map_id); } static int alloc_rx_pool(struct ibmvnic_adapter *adapter, @@ -522,7 +523,8 @@ static int ibmvnic_close(struct net_device *netdev) for (i = 0; i < adapter->req_rx_queues; i++) napi_disable(&adapter->napi[i]); - netif_tx_stop_all_queues(netdev); + if (!adapter->failover) + netif_tx_stop_all_queues(netdev); if (adapter->bounce_buffer) { if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) { @@ -1188,7 +1190,7 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter if (!scrq) return NULL; - scrq->msgs = (union sub_crq *)__get_free_pages(GFP_KERNEL, 2); + scrq->msgs = (union sub_crq *)__get_free_pages(GFP_ATOMIC, 2); memset(scrq->msgs, 0, 4 * PAGE_SIZE); if (!scrq->msgs) { dev_warn(dev, "Couldn't allocate crq queue messages page\n"); @@ -1422,7 +1424,7 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) scrq = adapter->tx_scrq[i]; scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); - if (scrq->irq == NO_IRQ) { + if (!scrq->irq) { rc = -EINVAL; dev_err(dev, "Error mapping irq\n"); goto req_tx_irq_failed; @@ -1442,7 +1444,7 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) for (i = 0; i < adapter->req_rx_queues; i++) { scrq = adapter->rx_scrq[i]; scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); - if (scrq->irq == NO_IRQ) { + if (!scrq->irq) { rc = -EINVAL; dev_err(dev, "Error mapping irq\n"); goto req_rx_irq_failed; @@ -1459,14 +1461,16 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) return rc; req_rx_irq_failed: - for (j = 0; j < i; j++) + for (j = 0; j < i; j++) { free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]); irq_dispose_mapping(adapter->rx_scrq[j]->irq); + } i = adapter->req_tx_queues; req_tx_irq_failed: - for (j = 0; j < i; j++) + for (j = 0; j < i; j++) { free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); irq_dispose_mapping(adapter->rx_scrq[j]->irq); + } release_sub_crqs_no_irqs(adapter); return rc; } @@ -2777,12 +2781,6 @@ static void handle_control_ras_rsp(union ibmvnic_crq *crq, } } -static int ibmvnic_fw_comp_open(struct inode *inode, struct file *file) -{ - file->private_data = inode->i_private; - return 0; -} - static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len, loff_t *ppos) { @@ -2834,7 +2832,7 @@ static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len, static const struct file_operations trace_ops = { .owner = THIS_MODULE, - .open = ibmvnic_fw_comp_open, + .open = simple_open, .read = trace_read, }; @@ -2884,7 +2882,7 @@ static ssize_t paused_write(struct file *file, const char __user *user_buf, static const struct file_operations paused_ops = { .owner = THIS_MODULE, - .open = ibmvnic_fw_comp_open, + .open = simple_open, .read = paused_read, .write = paused_write, }; @@ -2932,7 +2930,7 @@ static ssize_t tracing_write(struct file *file, const char __user *user_buf, static const struct file_operations tracing_ops = { .owner = THIS_MODULE, - .open = ibmvnic_fw_comp_open, + .open = simple_open, .read = tracing_read, .write = tracing_write, }; @@ -2985,7 +2983,7 @@ static ssize_t error_level_write(struct file *file, const char __user *user_buf, static const struct file_operations error_level_ops = { .owner = THIS_MODULE, - .open = ibmvnic_fw_comp_open, + .open = simple_open, .read = error_level_read, .write = error_level_write, }; @@ -3036,7 +3034,7 @@ static ssize_t trace_level_write(struct file *file, const char __user *user_buf, static const struct file_operations trace_level_ops = { .owner = THIS_MODULE, - .open = ibmvnic_fw_comp_open, + .open = simple_open, .read = trace_level_read, .write = trace_level_write, }; @@ -3089,7 +3087,7 @@ static ssize_t trace_buff_size_write(struct file *file, static const struct file_operations trace_size_ops = { .owner = THIS_MODULE, - .open = ibmvnic_fw_comp_open, + .open = simple_open, .read = trace_buff_size_read, .write = trace_buff_size_write, }; @@ -3236,6 +3234,27 @@ static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter) spin_unlock_irqrestore(&adapter->inflight_lock, flags); } +static void ibmvnic_xport_event(struct work_struct *work) +{ + struct ibmvnic_adapter *adapter = container_of(work, + struct ibmvnic_adapter, + ibmvnic_xport); + struct device *dev = &adapter->vdev->dev; + long rc; + + ibmvnic_free_inflight(adapter); + release_sub_crqs(adapter); + if (adapter->migrated) { + rc = ibmvnic_reenable_crq_queue(adapter); + if (rc) + dev_err(dev, "Error after enable rc=%ld\n", rc); + adapter->migrated = false; + rc = ibmvnic_send_crq_init(adapter); + if (rc) + dev_err(dev, "Error sending init rc=%ld\n", rc); + } +} + static void ibmvnic_handle_crq(union ibmvnic_crq *crq, struct ibmvnic_adapter *adapter) { @@ -3271,21 +3290,16 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { dev_info(dev, "Re-enabling adapter\n"); adapter->migrated = true; - ibmvnic_free_inflight(adapter); - release_sub_crqs(adapter); - rc = ibmvnic_reenable_crq_queue(adapter); - if (rc) - dev_err(dev, "Error after enable rc=%ld\n", rc); - adapter->migrated = false; - rc = ibmvnic_send_crq_init(adapter); - if (rc) - dev_err(dev, "Error sending init rc=%ld\n", rc); + schedule_work(&adapter->ibmvnic_xport); + } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { + dev_info(dev, "Backing device failover detected\n"); + netif_carrier_off(netdev); + adapter->failover = true; } else { /* The adapter lost the connection */ dev_err(dev, "Virtual Adapter failed (rc=%d)\n", gen_crq->cmd); - ibmvnic_free_inflight(adapter); - release_sub_crqs(adapter); + schedule_work(&adapter->ibmvnic_xport); } return; case IBMVNIC_CRQ_CMD_RSP: @@ -3615,8 +3629,18 @@ static void handle_crq_init_rsp(struct work_struct *work) struct device *dev = &adapter->vdev->dev; struct net_device *netdev = adapter->netdev; unsigned long timeout = msecs_to_jiffies(30000); + bool restart = false; int rc; + if (adapter->failover) { + release_sub_crqs(adapter); + if (netif_running(netdev)) { + netif_tx_disable(netdev); + ibmvnic_close(netdev); + restart = true; + } + } + send_version_xchg(adapter); reinit_completion(&adapter->init_done); if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { @@ -3644,6 +3668,18 @@ static void handle_crq_init_rsp(struct work_struct *work) goto task_failed; netdev->real_num_tx_queues = adapter->req_tx_queues; + netdev->mtu = adapter->req_mtu; + + if (adapter->failover) { + adapter->failover = false; + if (restart) { + rc = ibmvnic_open(netdev); + if (rc) + goto restart_failed; + } + netif_carrier_on(netdev); + return; + } rc = register_netdev(netdev); if (rc) { @@ -3655,6 +3691,8 @@ static void handle_crq_init_rsp(struct work_struct *work) return; +restart_failed: + dev_err(dev, "Failed to restart ibmvnic, rc=%d\n", rc); register_failed: release_sub_crqs(adapter); task_failed: @@ -3692,6 +3730,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) dev_set_drvdata(&dev->dev, netdev); adapter->vdev = dev; adapter->netdev = netdev; + adapter->failover = false; ether_addr_copy(adapter->mac_addr, mac_addr_p); ether_addr_copy(netdev->dev_addr, adapter->mac_addr); @@ -3701,6 +3740,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) SET_NETDEV_DEV(netdev, &dev->dev); INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp); + INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event); spin_lock_init(&adapter->stats_lock); @@ -3721,6 +3761,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) if (dma_mapping_error(&dev->dev, adapter->stats_token)) { if (!firmware_has_feature(FW_FEATURE_CMO)) dev_err(&dev->dev, "Couldn't map stats buffer\n"); + rc = -ENOMEM; goto free_crq; } @@ -3767,6 +3808,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) } netdev->real_num_tx_queues = adapter->req_tx_queues; + netdev->mtu = adapter->req_mtu; rc = register_netdev(netdev); if (rc) {