+ return STATUS_SUCCESS;
+}
+
+/*
+ * --------------------------------------------------------------------------
+ * Utility function to fill up information about the datapath in a reply to
+ * userspace.
+ * Assumes that 'gOvsCtrlLock' lock is acquired.
+ * --------------------------------------------------------------------------
+ */
+static NTSTATUS
+OvsDpFillInfo(POVS_SWITCH_CONTEXT ovsSwitchContext,
+ POVS_MESSAGE msgIn,
+ PNL_BUFFER nlBuf)
+{
+ BOOLEAN writeOk;
+ OVS_MESSAGE msgOutTmp;
+ OVS_DATAPATH *datapath = &ovsSwitchContext->datapath;
+ PNL_MSG_HDR nlMsg;
+
+ ASSERT(NlBufAt(nlBuf, 0, 0) != 0 && NlBufRemLen(nlBuf) >= sizeof *msgIn);
+
+ msgOutTmp.nlMsg.nlmsgType = OVS_WIN_NL_DATAPATH_FAMILY_ID;
+ msgOutTmp.nlMsg.nlmsgFlags = 0; /* XXX: ? */
+ msgOutTmp.nlMsg.nlmsgSeq = msgIn->nlMsg.nlmsgSeq;
+ msgOutTmp.nlMsg.nlmsgPid = msgIn->nlMsg.nlmsgPid;
+
+ msgOutTmp.genlMsg.cmd = OVS_DP_CMD_GET;
+ msgOutTmp.genlMsg.version = nlDatapathFamilyOps.version;
+ msgOutTmp.genlMsg.reserved = 0;
+
+ msgOutTmp.ovsHdr.dp_ifindex = ovsSwitchContext->dpNo;
+
+ writeOk = NlMsgPutHead(nlBuf, (PCHAR)&msgOutTmp, sizeof msgOutTmp);
+ if (writeOk) {
+ writeOk = NlMsgPutTailString(nlBuf, OVS_DP_ATTR_NAME,
+ OVS_SYSTEM_DP_NAME);
+ }
+ if (writeOk) {
+ OVS_DP_STATS dpStats;
+
+ dpStats.n_hit = datapath->hits;
+ dpStats.n_missed = datapath->misses;
+ dpStats.n_lost = datapath->lost;
+ dpStats.n_flows = datapath->nFlows;
+ writeOk = NlMsgPutTailUnspec(nlBuf, OVS_DP_ATTR_STATS,
+ (PCHAR)&dpStats, sizeof dpStats);
+ }
+ nlMsg = (PNL_MSG_HDR)NlBufAt(nlBuf, 0, 0);
+ nlMsg->nlmsgLen = NlBufSize(nlBuf);
+
+ return writeOk ? STATUS_SUCCESS : STATUS_INVALID_BUFFER_SIZE;
+}
+
+/*
+ * --------------------------------------------------------------------------
+ * Handler for queueing an IRP used for event notification. The IRP is
+ * completed when a port state changes. STATUS_PENDING is returned on
+ * success. User mode keep a pending IRP at all times.
+ * --------------------------------------------------------------------------
+ */
+static NTSTATUS
+OvsPendEventCmdHandler(POVS_USER_PARAMS_CONTEXT usrParamsCtx,
+ UINT32 *replyLen)
+{
+ NDIS_STATUS status;
+
+ UNREFERENCED_PARAMETER(replyLen);
+
+ POVS_OPEN_INSTANCE instance =
+ (POVS_OPEN_INSTANCE)usrParamsCtx->ovsInstance;
+ POVS_MESSAGE msgIn = (POVS_MESSAGE)usrParamsCtx->inputBuffer;
+ OVS_EVENT_POLL poll;
+
+ poll.dpNo = msgIn->ovsHdr.dp_ifindex;
+ status = OvsWaitEventIoctl(usrParamsCtx->irp, instance->fileObject,
+ &poll, sizeof poll);
+ return status;
+}
+
+/*
+ * --------------------------------------------------------------------------
+ * Handler for the subscription for the event queue
+ * --------------------------------------------------------------------------
+ */
+static NTSTATUS
+OvsSubscribeEventCmdHandler(POVS_USER_PARAMS_CONTEXT usrParamsCtx,
+ UINT32 *replyLen)
+{
+ NDIS_STATUS status;
+ OVS_EVENT_SUBSCRIBE request;
+ BOOLEAN rc;
+ UINT8 join;
+ PNL_ATTR attrs[2];
+ const NL_POLICY policy[] = {
+ [OVS_NL_ATTR_MCAST_GRP] = {.type = NL_A_U32 },
+ [OVS_NL_ATTR_MCAST_JOIN] = {.type = NL_A_U8 },
+ };
+
+ UNREFERENCED_PARAMETER(replyLen);
+
+ POVS_OPEN_INSTANCE instance =
+ (POVS_OPEN_INSTANCE)usrParamsCtx->ovsInstance;
+ POVS_MESSAGE msgIn = (POVS_MESSAGE)usrParamsCtx->inputBuffer;
+
+ rc = NlAttrParse(&msgIn->nlMsg, sizeof (*msgIn),
+ NlMsgAttrsLen((PNL_MSG_HDR)msgIn), policy, attrs, 2);
+ if (!rc) {
+ status = STATUS_INVALID_PARAMETER;
+ goto done;
+ }
+
+ /* XXX Ignore the MC group for now */
+ join = NlAttrGetU8(attrs[OVS_NL_ATTR_MCAST_JOIN]);
+ request.dpNo = msgIn->ovsHdr.dp_ifindex;
+ request.subscribe = join;
+ request.mask = OVS_EVENT_MASK_ALL;
+
+ status = OvsSubscribeEventIoctl(instance->fileObject, &request,
+ sizeof request);
+done:
+ return status;
+}
+
+/*
+ * --------------------------------------------------------------------------
+ * Command Handler for 'OVS_DP_CMD_NEW'.
+ * --------------------------------------------------------------------------
+ */
+static NTSTATUS
+OvsNewDpCmdHandler(POVS_USER_PARAMS_CONTEXT usrParamsCtx,
+ UINT32 *replyLen)
+{
+ return HandleDpTransactionCommon(usrParamsCtx, replyLen);
+}
+
+/*
+ * --------------------------------------------------------------------------
+ * Command Handler for 'OVS_DP_CMD_GET'.
+ *
+ * The function handles both the dump based as well as the transaction based
+ * 'OVS_DP_CMD_GET' command. In the dump command, it handles the initial
+ * call to setup dump state, as well as subsequent calls to continue dumping
+ * data.
+ * --------------------------------------------------------------------------
+ */
+static NTSTATUS
+OvsGetDpCmdHandler(POVS_USER_PARAMS_CONTEXT usrParamsCtx,
+ UINT32 *replyLen)
+{
+ if (usrParamsCtx->devOp == OVS_TRANSACTION_DEV_OP) {
+ return HandleDpTransactionCommon(usrParamsCtx, replyLen);
+ } else {
+ return HandleGetDpDump(usrParamsCtx, replyLen);
+ }
+}
+
+/*
+ * --------------------------------------------------------------------------
+ * Function for handling the transaction based 'OVS_DP_CMD_GET' command.
+ * --------------------------------------------------------------------------
+ */
+static NTSTATUS
+HandleGetDpTransaction(POVS_USER_PARAMS_CONTEXT usrParamsCtx,
+ UINT32 *replyLen)
+{
+ return HandleDpTransactionCommon(usrParamsCtx, replyLen);
+}
+
+
+/*
+ * --------------------------------------------------------------------------
+ * Function for handling the dump-based 'OVS_DP_CMD_GET' command.
+ * --------------------------------------------------------------------------
+ */
+static NTSTATUS
+HandleGetDpDump(POVS_USER_PARAMS_CONTEXT usrParamsCtx,
+ UINT32 *replyLen)
+{
+ POVS_MESSAGE msgOut = (POVS_MESSAGE)usrParamsCtx->outputBuffer;
+ POVS_OPEN_INSTANCE instance =
+ (POVS_OPEN_INSTANCE)usrParamsCtx->ovsInstance;
+
+ if (usrParamsCtx->devOp == OVS_WRITE_DEV_OP) {
+ *replyLen = 0;
+ OvsSetupDumpStart(usrParamsCtx);
+ } else {
+ NL_BUFFER nlBuf;
+ NTSTATUS status;
+ POVS_MESSAGE msgIn = instance->dumpState.ovsMsg;
+
+ ASSERT(usrParamsCtx->devOp == OVS_READ_DEV_OP);
+
+ if (instance->dumpState.ovsMsg == NULL) {
+ ASSERT(FALSE);
+ return STATUS_INVALID_DEVICE_STATE;
+ }
+
+ /* Dump state must have been deleted after previous dump operation. */
+ ASSERT(instance->dumpState.index[0] == 0);
+ /* Output buffer has been validated while validating read dev op. */
+ ASSERT(msgOut != NULL && usrParamsCtx->outputLength >= sizeof *msgOut);
+
+ NlBufInit(&nlBuf, usrParamsCtx->outputBuffer,
+ usrParamsCtx->outputLength);
+
+ OvsAcquireCtrlLock();
+ if (!gOvsSwitchContext) {
+ /* Treat this as a dump done. */
+ OvsReleaseCtrlLock();
+ *replyLen = 0;
+ FreeUserDumpState(instance);
+ return STATUS_SUCCESS;
+ }
+ status = OvsDpFillInfo(gOvsSwitchContext, msgIn, &nlBuf);
+ OvsReleaseCtrlLock();
+
+ if (status != STATUS_SUCCESS) {
+ *replyLen = 0;
+ FreeUserDumpState(instance);
+ return status;
+ }
+
+ /* Increment the dump index. */
+ instance->dumpState.index[0] = 1;
+ *replyLen = msgOut->nlMsg.nlmsgLen;
+
+ /* Free up the dump state, since there's no more data to continue. */
+ FreeUserDumpState(instance);
+ }
+
+ return STATUS_SUCCESS;
+}
+
+
+/*
+ * --------------------------------------------------------------------------
+ * Command Handler for 'OVS_DP_CMD_SET'.
+ * --------------------------------------------------------------------------
+ */
+static NTSTATUS
+OvsSetDpCmdHandler(POVS_USER_PARAMS_CONTEXT usrParamsCtx,
+ UINT32 *replyLen)
+{
+ return HandleDpTransactionCommon(usrParamsCtx, replyLen);
+}
+
+/*
+ * --------------------------------------------------------------------------
+ * Function for handling transaction based 'OVS_DP_CMD_NEW', 'OVS_DP_CMD_GET'
+ * and 'OVS_DP_CMD_SET' commands.
+ *
+ * 'OVS_DP_CMD_NEW' is implemented to keep userspace code happy. Creation of a
+ * new datapath is not supported currently.
+ * --------------------------------------------------------------------------
+ */
+static NTSTATUS
+HandleDpTransactionCommon(POVS_USER_PARAMS_CONTEXT usrParamsCtx,
+ UINT32 *replyLen)
+{
+ POVS_MESSAGE msgIn = (POVS_MESSAGE)usrParamsCtx->inputBuffer;
+ POVS_MESSAGE msgOut = (POVS_MESSAGE)usrParamsCtx->outputBuffer;
+ NTSTATUS status = STATUS_SUCCESS;
+ NL_BUFFER nlBuf;
+ NL_ERROR nlError = NL_ERROR_SUCCESS;
+ static const NL_POLICY ovsDatapathSetPolicy[] = {
+ [OVS_DP_ATTR_NAME] = { .type = NL_A_STRING, .maxLen = IFNAMSIZ },
+ [OVS_DP_ATTR_UPCALL_PID] = { .type = NL_A_U32, .optional = TRUE },
+ [OVS_DP_ATTR_USER_FEATURES] = { .type = NL_A_U32, .optional = TRUE },
+ };
+ PNL_ATTR dpAttrs[ARRAY_SIZE(ovsDatapathSetPolicy)];
+
+ /* input buffer has been validated while validating write dev op. */
+ ASSERT(msgIn != NULL && usrParamsCtx->inputLength >= sizeof *msgIn);
+
+ /* Parse any attributes in the request. */
+ if (usrParamsCtx->ovsMsg->genlMsg.cmd == OVS_DP_CMD_SET ||
+ usrParamsCtx->ovsMsg->genlMsg.cmd == OVS_DP_CMD_NEW) {
+ if (!NlAttrParse((PNL_MSG_HDR)msgIn,
+ NLMSG_HDRLEN + GENL_HDRLEN + OVS_HDRLEN,
+ NlMsgAttrsLen((PNL_MSG_HDR)msgIn),
+ ovsDatapathSetPolicy, dpAttrs, ARRAY_SIZE(dpAttrs))) {
+ return STATUS_INVALID_PARAMETER;
+ }
+
+ /*
+ * XXX: Not clear at this stage if there's any role for the
+ * OVS_DP_ATTR_UPCALL_PID and OVS_DP_ATTR_USER_FEATURES attributes passed
+ * from userspace.
+ */
+
+ } else {
+ RtlZeroMemory(dpAttrs, sizeof dpAttrs);
+ }
+
+ /* Output buffer is optional for OVS_TRANSACTION_DEV_OP. */
+ if (msgOut == NULL || usrParamsCtx->outputLength < sizeof *msgOut) {
+ return STATUS_NDIS_INVALID_LENGTH;
+ }
+ NlBufInit(&nlBuf, usrParamsCtx->outputBuffer, usrParamsCtx->outputLength);
+
+ OvsAcquireCtrlLock();
+ if (dpAttrs[OVS_DP_ATTR_NAME] != NULL) {
+ if (!gOvsSwitchContext &&
+ !OvsCompareString(NlAttrGet(dpAttrs[OVS_DP_ATTR_NAME]),
+ OVS_SYSTEM_DP_NAME)) {
+ OvsReleaseCtrlLock();
+
+ /* Creation of new datapaths is not supported. */
+ if (usrParamsCtx->ovsMsg->genlMsg.cmd == OVS_DP_CMD_SET) {
+ nlError = NL_ERROR_NOTSUPP;
+ goto cleanup;
+ }
+
+ nlError = NL_ERROR_NODEV;
+ goto cleanup;
+ }
+ } else if ((UINT32)msgIn->ovsHdr.dp_ifindex != gOvsSwitchContext->dpNo) {
+ OvsReleaseCtrlLock();
+ nlError = NL_ERROR_NODEV;
+ goto cleanup;
+ }
+
+ if (usrParamsCtx->ovsMsg->genlMsg.cmd == OVS_DP_CMD_NEW) {
+ OvsReleaseCtrlLock();
+ nlError = NL_ERROR_EXIST;
+ goto cleanup;
+ }
+
+ status = OvsDpFillInfo(gOvsSwitchContext, msgIn, &nlBuf);
+ OvsReleaseCtrlLock();
+
+ *replyLen = NlBufSize(&nlBuf);
+
+cleanup:
+ if (nlError != NL_ERROR_SUCCESS) {
+ POVS_MESSAGE_ERROR msgError = (POVS_MESSAGE_ERROR)
+ usrParamsCtx->outputBuffer;
+
+ BuildErrorMsg(msgIn, msgError, nlError);
+ *replyLen = msgError->nlMsg.nlmsgLen;
+ }
+
+ return STATUS_SUCCESS;
+}
+
+
+NTSTATUS
+OvsSetupDumpStart(POVS_USER_PARAMS_CONTEXT usrParamsCtx)
+{
+ POVS_MESSAGE msgIn = (POVS_MESSAGE)usrParamsCtx->inputBuffer;
+ POVS_OPEN_INSTANCE instance =
+ (POVS_OPEN_INSTANCE)usrParamsCtx->ovsInstance;
+
+ /* input buffer has been validated while validating write dev op. */
+ ASSERT(msgIn != NULL && usrParamsCtx->inputLength >= sizeof *msgIn);
+
+ /* A write operation that does not indicate dump start is invalid. */
+ if ((msgIn->nlMsg.nlmsgFlags & NLM_F_DUMP) != NLM_F_DUMP) {
+ return STATUS_INVALID_PARAMETER;
+ }
+ /* XXX: Handle other NLM_F_* flags in the future. */
+
+ /*
+ * This operation should be setting up the dump state. If there's any
+ * previous state, clear it up so as to set it up afresh.
+ */
+ if (instance->dumpState.ovsMsg != NULL) {
+ FreeUserDumpState(instance);
+ }
+
+ return InitUserDumpState(instance, msgIn);
+}
+
+static VOID
+BuildMsgOut(POVS_MESSAGE msgIn, POVS_MESSAGE msgOut, UINT16 type,
+ UINT32 length, UINT16 flags)
+{
+ msgOut->nlMsg.nlmsgType = type;
+ msgOut->nlMsg.nlmsgFlags = flags;
+ msgOut->nlMsg.nlmsgSeq = msgIn->nlMsg.nlmsgSeq;
+ msgOut->nlMsg.nlmsgPid = msgIn->nlMsg.nlmsgPid;
+ msgOut->nlMsg.nlmsgLen = length;
+
+ msgOut->genlMsg.cmd = msgIn->genlMsg.cmd;
+ msgOut->genlMsg.version = msgIn->genlMsg.version;
+ msgOut->genlMsg.reserved = 0;
+}
+
+/*
+ * XXX: should move out these functions to a Netlink.c or to a OvsMessage.c
+ * or even make them inlined functions in Datapath.h. Can be done after the
+ * first sprint once we have more code to refactor.
+ */
+VOID
+BuildReplyMsgFromMsgIn(POVS_MESSAGE msgIn, POVS_MESSAGE msgOut, UINT16 flags)
+{
+ BuildMsgOut(msgIn, msgOut, msgIn->nlMsg.nlmsgType, sizeof(OVS_MESSAGE),
+ flags);
+}
+
+VOID
+BuildErrorMsg(POVS_MESSAGE msgIn, POVS_MESSAGE_ERROR msgOut, UINT errorCode)
+{
+ BuildMsgOut(msgIn, (POVS_MESSAGE)msgOut, NLMSG_ERROR,
+ sizeof(OVS_MESSAGE_ERROR), 0);
+
+ msgOut->errorMsg.error = errorCode;
+ msgOut->errorMsg.nlMsg = msgIn->nlMsg;
+}
+
+static NTSTATUS
+OvsCreateMsgFromVport(POVS_VPORT_ENTRY vport,
+ POVS_MESSAGE msgIn,
+ PVOID outBuffer,
+ UINT32 outBufLen,
+ int dpIfIndex)
+{
+ NL_BUFFER nlBuffer;
+ OVS_VPORT_FULL_STATS vportStats;
+ BOOLEAN ok;
+ OVS_MESSAGE msgOut;
+ PNL_MSG_HDR nlMsg;
+
+ NlBufInit(&nlBuffer, outBuffer, outBufLen);
+
+ BuildReplyMsgFromMsgIn(msgIn, &msgOut, NLM_F_MULTI);
+ msgOut.ovsHdr.dp_ifindex = dpIfIndex;
+
+ ok = NlMsgPutHead(&nlBuffer, (PCHAR)&msgOut, sizeof msgOut);
+ if (!ok) {
+ return STATUS_INSUFFICIENT_RESOURCES;
+ }
+
+ ok = NlMsgPutTailU32(&nlBuffer, OVS_VPORT_ATTR_PORT_NO, vport->portNo);
+ if (!ok) {
+ return STATUS_INSUFFICIENT_RESOURCES;
+ }
+
+ ok = NlMsgPutTailU32(&nlBuffer, OVS_VPORT_ATTR_TYPE, vport->ovsType);
+ if (!ok) {
+ return STATUS_INSUFFICIENT_RESOURCES;
+ }
+
+ ok = NlMsgPutTailString(&nlBuffer, OVS_VPORT_ATTR_NAME, vport->ovsName);
+ if (!ok) {
+ return STATUS_INSUFFICIENT_RESOURCES;
+ }
+
+ /*
+ * XXX: when we implement OVS_DP_ATTR_USER_FEATURES in datapath,
+ * we'll need to check the OVS_DP_F_VPORT_PIDS flag: if it is set,
+ * it means we have an array of pids, instead of a single pid.
+ * ATM we assume we have one pid only.
+ */
+
+ ok = NlMsgPutTailU32(&nlBuffer, OVS_VPORT_ATTR_UPCALL_PID,
+ vport->upcallPid);
+ if (!ok) {
+ return STATUS_INSUFFICIENT_RESOURCES;
+ }
+
+ /*stats*/
+ vportStats.rxPackets = vport->stats.rxPackets;
+ vportStats.rxBytes = vport->stats.rxBytes;
+ vportStats.txPackets = vport->stats.txPackets;
+ vportStats.txBytes = vport->stats.txBytes;
+ vportStats.rxErrors = vport->errStats.rxErrors;
+ vportStats.txErrors = vport->errStats.txErrors;
+ vportStats.rxDropped = vport->errStats.rxDropped;
+ vportStats.txDropped = vport->errStats.txDropped;
+
+ ok = NlMsgPutTailUnspec(&nlBuffer, OVS_VPORT_ATTR_STATS,
+ (PCHAR)&vportStats,
+ sizeof(OVS_VPORT_FULL_STATS));
+ if (!ok) {
+ return STATUS_INSUFFICIENT_RESOURCES;
+ }
+
+ /*
+ * XXX: when vxlan udp dest port becomes configurable, we will also need
+ * to add vport options
+ */
+
+ nlMsg = (PNL_MSG_HDR)NlBufAt(&nlBuffer, 0, 0);
+ nlMsg->nlmsgLen = NlBufSize(&nlBuffer);
+
+ return STATUS_SUCCESS;
+}
+
+static NTSTATUS
+OvsGetVportDumpNext(POVS_USER_PARAMS_CONTEXT usrParamsCtx,
+ UINT32 *replyLen)
+{
+ POVS_MESSAGE msgIn;
+ POVS_OPEN_INSTANCE instance =
+ (POVS_OPEN_INSTANCE)usrParamsCtx->ovsInstance;
+ LOCK_STATE_EX lockState;
+ UINT32 i = OVS_MAX_VPORT_ARRAY_SIZE;
+
+ /*
+ * XXX: this function shares some code with other dump command(s).
+ * In the future, we will need to refactor the dump functions
+ */
+
+ ASSERT(usrParamsCtx->devOp == OVS_READ_DEV_OP);
+
+ if (instance->dumpState.ovsMsg == NULL) {
+ ASSERT(FALSE);
+ return STATUS_INVALID_DEVICE_STATE;
+ }
+
+ /* Output buffer has been validated while validating read dev op. */
+ ASSERT(usrParamsCtx->outputBuffer != NULL);
+
+ msgIn = instance->dumpState.ovsMsg;
+
+ OvsAcquireCtrlLock();
+ if (!gOvsSwitchContext) {
+ /* Treat this as a dump done. */
+ OvsReleaseCtrlLock();
+ *replyLen = 0;
+ FreeUserDumpState(instance);
+ return STATUS_SUCCESS;
+ }
+
+ /*
+ * XXX: when we implement OVS_DP_ATTR_USER_FEATURES in datapath,
+ * we'll need to check the OVS_DP_F_VPORT_PIDS flag: if it is set,
+ * it means we have an array of pids, instead of a single pid.
+ * ATM we assume we have one pid only.
+ */
+ ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
+ NdisAcquireRWLockRead(gOvsSwitchContext->dispatchLock, &lockState,
+ NDIS_RWL_AT_DISPATCH_LEVEL);
+
+ if (gOvsSwitchContext->numVports > 0) {
+ /* inBucket: the bucket, used for lookup */
+ UINT32 inBucket = instance->dumpState.index[0];
+ /* inIndex: index within the given bucket, used for lookup */
+ UINT32 inIndex = instance->dumpState.index[1];
+ /* the bucket to be used for the next dump operation */
+ UINT32 outBucket = 0;
+ /* the index within the outBucket to be used for the next dump */
+ UINT32 outIndex = 0;
+
+ for (i = inBucket; i < OVS_MAX_VPORT_ARRAY_SIZE; i++) {
+ PLIST_ENTRY head, link;
+ head = &(gOvsSwitchContext->portHashArray[i]);
+ POVS_VPORT_ENTRY vport = NULL;
+
+ outIndex = 0;
+ LIST_FORALL(head, link) {
+
+ /*
+ * if one or more dumps were previously done on this same bucket,
+ * inIndex will be > 0, so we'll need to reply with the
+ * inIndex + 1 vport from the bucket.
+ */
+ if (outIndex >= inIndex) {
+ vport = CONTAINING_RECORD(link, OVS_VPORT_ENTRY, portLink);
+
+ if (vport->portNo != OVS_DPPORT_NUMBER_INVALID) {
+ OvsCreateMsgFromVport(vport, msgIn,
+ usrParamsCtx->outputBuffer,
+ usrParamsCtx->outputLength,
+ gOvsSwitchContext->dpNo);
+ ++outIndex;
+ break;
+ } else {
+ vport = NULL;
+ }
+ }
+
+ ++outIndex;
+ }
+
+ if (vport) {
+ break;
+ }
+
+ /*
+ * if no vport was found above, check the next bucket, beginning
+ * with the first (i.e. index 0) elem from within that bucket
+ */
+ inIndex = 0;
+ }
+
+ outBucket = i;
+
+ /* XXX: what about NLMSG_DONE (as msg type)? */
+ instance->dumpState.index[0] = outBucket;
+ instance->dumpState.index[1] = outIndex;
+ }
+
+ NdisReleaseRWLock(gOvsSwitchContext->dispatchLock, &lockState);
+
+ OvsReleaseCtrlLock();
+
+ /* if i < OVS_MAX_VPORT_ARRAY_SIZE => vport was found */
+ if (i < OVS_MAX_VPORT_ARRAY_SIZE) {
+ POVS_MESSAGE msgOut = (POVS_MESSAGE)usrParamsCtx->outputBuffer;
+ *replyLen = msgOut->nlMsg.nlmsgLen;
+ } else {
+ /*
+ * if i >= OVS_MAX_VPORT_ARRAY_SIZE => vport was not found =>
+ * it's dump done
+ */
+ *replyLen = 0;
+ /* Free up the dump state, since there's no more data to continue. */
+ FreeUserDumpState(instance);
+ }
+
+ return STATUS_SUCCESS;