From: Linus Torvalds Date: Tue, 14 Apr 2015 16:50:27 +0000 (-0700) Subject: Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial X-Git-Tag: v4.1-rc1~149 X-Git-Url: http://git.cascardo.eti.br/?a=commitdiff_plain;h=d0bbe0dd353af9521e9d8bc5236308c677b6f62a;hp=-c;p=cascardo%2Flinux.git Merge branch 'for-linus' of git://git./linux/kernel/git/jikos/trivial Pull trivial tree from Jiri Kosina: "Usual trivial tree updates. Nothing outstanding -- mostly printk() and comment fixes and unused identifier removals" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial: goldfish: goldfish_tty_probe() is not using 'i' any more powerpc: Fix comment in smu.h qla2xxx: Fix printks in ql_log message lib: correct link to the original source for div64_u64 si2168, tda10071, m88ds3103: Fix firmware wording usb: storage: Fix printk in isd200_log_config() qla2xxx: Fix printk in qla25xx_setup_mode init/main: fix reset_device comment ipwireless: missing assignment goldfish: remove unreachable line of code coredump: Fix do_coredump() comment stacktrace.h: remove duplicate declaration task_struct smpboot.h: Remove unused function prototype treewide: Fix typo in printk messages treewide: Fix typo in printk messages mod_devicetable: fix comment for match_flags --- d0bbe0dd353af9521e9d8bc5236308c677b6f62a diff --combined arch/powerpc/kernel/prom.c index b8e15c678960,20ce051312fc..308c5e15676b --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@@ -160,12 -160,6 +160,12 @@@ static struct ibm_pa_feature {CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1}, {0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0}, {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0}, + /* + * If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n), + * we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP + * which is 0 if the kernel doesn't support TM. + */ + {CPU_FTR_TM_COMP, 0, 0, 22, 0, 0}, }; static void __init scan_features(unsigned long node, const unsigned char *ftrs, @@@ -652,6 -646,9 +652,6 @@@ void __init early_init_devtree(void *pa if (!early_init_dt_verify(params)) panic("BUG: Failed verifying flat device tree, bad version?"); - /* Setup flat device-tree pointer */ - initial_boot_params = params; - #ifdef CONFIG_PPC_RTAS /* Some machines might need RTAS info for debugging, grab it now. */ of_scan_flat_dt(early_init_dt_scan_rtas, NULL); @@@ -699,7 -696,10 +699,7 @@@ reserve_crashkernel(); early_reserve_mem(); - /* - * Ensure that total memory size is page-aligned, because otherwise - * mark_bootmem() gets upset. - */ + /* Ensure that total memory size is page-aligned. */ limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE); memblock_enforce_memory_limit(limit); @@@ -721,7 -721,7 +721,7 @@@ */ of_scan_flat_dt(early_init_dt_scan_cpus, NULL); if (boot_cpuid < 0) { - printk("Failed to indentify boot CPU !\n"); + printk("Failed to identify boot CPU !\n"); BUG(); } diff --combined arch/powerpc/perf/hv-24x7.c index 9445a824819e,23cb66ced793..abeb9ec0d117 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@@ -13,66 -13,16 +13,66 @@@ #define pr_fmt(fmt) "hv-24x7: " fmt #include +#include #include #include +#include + #include #include #include +#include #include "hv-24x7.h" #include "hv-24x7-catalog.h" #include "hv-common.h" +static const char *event_domain_suffix(unsigned domain) +{ + switch (domain) { +#define DOMAIN(n, v, x, c) \ + case HV_PERF_DOMAIN_##n: \ + return "__" #n; +#include "hv-24x7-domains.h" +#undef DOMAIN + default: + WARN(1, "unknown domain %d\n", domain); + return "__UNKNOWN_DOMAIN_SUFFIX"; + } +} + +static bool domain_is_valid(unsigned domain) +{ + switch (domain) { +#define DOMAIN(n, v, x, c) \ + case HV_PERF_DOMAIN_##n: \ + /* fall through */ +#include "hv-24x7-domains.h" +#undef DOMAIN + return true; + default: + return false; + } +} + +static bool is_physical_domain(unsigned domain) +{ + switch (domain) { +#define DOMAIN(n, v, x, c) \ + case HV_PERF_DOMAIN_##n: \ + return c; +#include "hv-24x7-domains.h" +#undef DOMAIN + default: + return false; + } +} + +static bool catalog_entry_domain_is_valid(unsigned domain) +{ + return is_physical_domain(domain); +} + /* * TODO: Merging events: * - Think of the hcall as an interface to a 4d array of counters: @@@ -94,14 -44,13 +94,14 @@@ /* * Example usage: - * perf stat -e 'hv_24x7/domain=2,offset=8,starting_index=0,lpar=0xffffffff/' + * perf stat -e 'hv_24x7/domain=2,offset=8,vcpu=0,lpar=0xffffffff/' */ /* u3 0-6, one of HV_24X7_PERF_DOMAIN */ EVENT_DEFINE_RANGE_FORMAT(domain, config, 0, 3); /* u16 */ -EVENT_DEFINE_RANGE_FORMAT(starting_index, config, 16, 31); +EVENT_DEFINE_RANGE_FORMAT(core, config, 16, 31); +EVENT_DEFINE_RANGE_FORMAT(vcpu, config, 16, 31); /* u32, see "data_offset" */ EVENT_DEFINE_RANGE_FORMAT(offset, config, 32, 63); /* u16 */ @@@ -114,8 -63,7 +114,8 @@@ EVENT_DEFINE_RANGE(reserved3, config2 static struct attribute *format_attrs[] = { &format_attr_domain.attr, &format_attr_offset.attr, - &format_attr_starting_index.attr, + &format_attr_core.attr, + &format_attr_vcpu.attr, &format_attr_lpar.attr, NULL, }; @@@ -125,115 -73,8 +125,115 @@@ static struct attribute_group format_gr .attrs = format_attrs, }; +static struct attribute_group event_group = { + .name = "events", + /* .attrs is set in init */ +}; + +static struct attribute_group event_desc_group = { + .name = "event_descs", + /* .attrs is set in init */ +}; + +static struct attribute_group event_long_desc_group = { + .name = "event_long_descs", + /* .attrs is set in init */ +}; + static struct kmem_cache *hv_page_cache; +static char *event_name(struct hv_24x7_event_data *ev, int *len) +{ + *len = be16_to_cpu(ev->event_name_len) - 2; + return (char *)ev->remainder; +} + +static char *event_desc(struct hv_24x7_event_data *ev, int *len) +{ + unsigned nl = be16_to_cpu(ev->event_name_len); + __be16 *desc_len = (__be16 *)(ev->remainder + nl - 2); + *len = be16_to_cpu(*desc_len) - 2; + return (char *)ev->remainder + nl; +} + +static char *event_long_desc(struct hv_24x7_event_data *ev, int *len) +{ + unsigned nl = be16_to_cpu(ev->event_name_len); + __be16 *desc_len_ = (__be16 *)(ev->remainder + nl - 2); + unsigned desc_len = be16_to_cpu(*desc_len_); + __be16 *long_desc_len = (__be16 *)(ev->remainder + nl + desc_len - 2); + *len = be16_to_cpu(*long_desc_len) - 2; + return (char *)ev->remainder + nl + desc_len; +} + +static bool event_fixed_portion_is_within(struct hv_24x7_event_data *ev, + void *end) +{ + void *start = ev; + + return (start + offsetof(struct hv_24x7_event_data, remainder)) < end; +} + +/* + * Things we don't check: + * - padding for desc, name, and long/detailed desc is required to be '\0' + * bytes. + * + * Return NULL if we pass end, + * Otherwise return the address of the byte just following the event. + */ +static void *event_end(struct hv_24x7_event_data *ev, void *end) +{ + void *start = ev; + __be16 *dl_, *ldl_; + unsigned dl, ldl; + unsigned nl = be16_to_cpu(ev->event_name_len); + + if (nl < 2) { + pr_debug("%s: name length too short: %d", __func__, nl); + return NULL; + } + + if (start + nl > end) { + pr_debug("%s: start=%p + nl=%u > end=%p", + __func__, start, nl, end); + return NULL; + } + + dl_ = (__be16 *)(ev->remainder + nl - 2); + if (!IS_ALIGNED((uintptr_t)dl_, 2)) + pr_warn("desc len not aligned %p", dl_); + dl = be16_to_cpu(*dl_); + if (dl < 2) { + pr_debug("%s: desc len too short: %d", __func__, dl); + return NULL; + } + + if (start + nl + dl > end) { + pr_debug("%s: (start=%p + nl=%u + dl=%u)=%p > end=%p", + __func__, start, nl, dl, start + nl + dl, end); + return NULL; + } + + ldl_ = (__be16 *)(ev->remainder + nl + dl - 2); + if (!IS_ALIGNED((uintptr_t)ldl_, 2)) + pr_warn("long desc len not aligned %p", ldl_); + ldl = be16_to_cpu(*ldl_); + if (ldl < 2) { + pr_debug("%s: long desc len too short (ldl=%u)", + __func__, ldl); + return NULL; + } + + if (start + nl + dl + ldl > end) { + pr_debug("%s: start=%p + nl=%u + dl=%u + ldl=%u > end=%p", + __func__, start, nl, dl, ldl, end); + return NULL; + } + + return start + nl + dl + ldl; +} + static unsigned long h_get_24x7_catalog_page_(unsigned long phys_4096, unsigned long version, unsigned long index) @@@ -256,609 -97,6 +256,609 @@@ static unsigned long h_get_24x7_catalog version, index); } +static unsigned core_domains[] = { + HV_PERF_DOMAIN_PHYS_CORE, + HV_PERF_DOMAIN_VCPU_HOME_CORE, + HV_PERF_DOMAIN_VCPU_HOME_CHIP, + HV_PERF_DOMAIN_VCPU_HOME_NODE, + HV_PERF_DOMAIN_VCPU_REMOTE_NODE, +}; +/* chip event data always yeilds a single event, core yeilds multiple */ +#define MAX_EVENTS_PER_EVENT_DATA ARRAY_SIZE(core_domains) + +static char *event_fmt(struct hv_24x7_event_data *event, unsigned domain) +{ + const char *sindex; + const char *lpar; + + if (is_physical_domain(domain)) { + lpar = "0x0"; + sindex = "core"; + } else { + lpar = "?"; + sindex = "vcpu"; + } + + return kasprintf(GFP_KERNEL, + "domain=0x%x,offset=0x%x,%s=?,lpar=%s", + domain, + be16_to_cpu(event->event_counter_offs) + + be16_to_cpu(event->event_group_record_offs), + sindex, + lpar); +} + +/* Avoid trusting fw to NUL terminate strings */ +static char *memdup_to_str(char *maybe_str, int max_len, gfp_t gfp) +{ + return kasprintf(gfp, "%.*s", max_len, maybe_str); +} + +static ssize_t device_show_string(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *d; + + d = container_of(attr, struct dev_ext_attribute, attr); + return sprintf(buf, "%s\n", (char *)d->var); +} + +static struct attribute *device_str_attr_create_(char *name, char *str) +{ + struct dev_ext_attribute *attr = kzalloc(sizeof(*attr), GFP_KERNEL); + + if (!attr) + return NULL; + + attr->var = str; + attr->attr.attr.name = name; + attr->attr.attr.mode = 0444; + attr->attr.show = device_show_string; + return &attr->attr.attr; +} + +static struct attribute *device_str_attr_create(char *name, int name_max, + int name_nonce, + char *str, size_t str_max) +{ + char *n; + char *s = memdup_to_str(str, str_max, GFP_KERNEL); + struct attribute *a; + + if (!s) + return NULL; + + if (!name_nonce) + n = kasprintf(GFP_KERNEL, "%.*s", name_max, name); + else + n = kasprintf(GFP_KERNEL, "%.*s__%d", name_max, name, + name_nonce); + if (!n) + goto out_s; + + a = device_str_attr_create_(n, s); + if (!a) + goto out_n; + + return a; +out_n: + kfree(n); +out_s: + kfree(s); + return NULL; +} + +static void device_str_attr_destroy(struct attribute *attr) +{ + struct dev_ext_attribute *d; + + d = container_of(attr, struct dev_ext_attribute, attr.attr); + kfree(d->var); + kfree(d->attr.attr.name); + kfree(d); +} + +static struct attribute *event_to_attr(unsigned ix, + struct hv_24x7_event_data *event, + unsigned domain, + int nonce) +{ + int event_name_len; + char *ev_name, *a_ev_name, *val; + const char *ev_suffix; + struct attribute *attr; + + if (!domain_is_valid(domain)) { + pr_warn("catalog event %u has invalid domain %u\n", + ix, domain); + return NULL; + } + + val = event_fmt(event, domain); + if (!val) + return NULL; + + ev_suffix = event_domain_suffix(domain); + ev_name = event_name(event, &event_name_len); + if (!nonce) + a_ev_name = kasprintf(GFP_KERNEL, "%.*s%s", + (int)event_name_len, ev_name, ev_suffix); + else + a_ev_name = kasprintf(GFP_KERNEL, "%.*s%s__%d", + (int)event_name_len, ev_name, ev_suffix, nonce); + + + if (!a_ev_name) + goto out_val; + + attr = device_str_attr_create_(a_ev_name, val); + if (!attr) + goto out_name; + + return attr; +out_name: + kfree(a_ev_name); +out_val: + kfree(val); + return NULL; +} + +static struct attribute *event_to_desc_attr(struct hv_24x7_event_data *event, + int nonce) +{ + int nl, dl; + char *name = event_name(event, &nl); + char *desc = event_desc(event, &dl); + + /* If there isn't a description, don't create the sysfs file */ + if (!dl) + return NULL; + + return device_str_attr_create(name, nl, nonce, desc, dl); +} + +static struct attribute * +event_to_long_desc_attr(struct hv_24x7_event_data *event, int nonce) +{ + int nl, dl; + char *name = event_name(event, &nl); + char *desc = event_long_desc(event, &dl); + + /* If there isn't a description, don't create the sysfs file */ + if (!dl) + return NULL; + + return device_str_attr_create(name, nl, nonce, desc, dl); +} + +static ssize_t event_data_to_attrs(unsigned ix, struct attribute **attrs, + struct hv_24x7_event_data *event, int nonce) +{ + unsigned i; + + switch (event->domain) { + case HV_PERF_DOMAIN_PHYS_CHIP: + *attrs = event_to_attr(ix, event, event->domain, nonce); + return 1; + case HV_PERF_DOMAIN_PHYS_CORE: + for (i = 0; i < ARRAY_SIZE(core_domains); i++) { + attrs[i] = event_to_attr(ix, event, core_domains[i], + nonce); + if (!attrs[i]) { + pr_warn("catalog event %u: individual attr %u " + "creation failure\n", ix, i); + for (; i; i--) + device_str_attr_destroy(attrs[i - 1]); + return -1; + } + } + return i; + default: + pr_warn("catalog event %u: domain %u is not allowed in the " + "catalog\n", ix, event->domain); + return -1; + } +} + +static size_t event_to_attr_ct(struct hv_24x7_event_data *event) +{ + switch (event->domain) { + case HV_PERF_DOMAIN_PHYS_CHIP: + return 1; + case HV_PERF_DOMAIN_PHYS_CORE: + return ARRAY_SIZE(core_domains); + default: + return 0; + } +} + +static unsigned long vmalloc_to_phys(void *v) +{ + struct page *p = vmalloc_to_page(v); + + BUG_ON(!p); + return page_to_phys(p) + offset_in_page(v); +} + +/* */ +struct event_uniq { + struct rb_node node; + const char *name; + int nl; + unsigned ct; + unsigned domain; +}; + +static int memord(const void *d1, size_t s1, const void *d2, size_t s2) +{ + if (s1 < s2) + return 1; + if (s2 > s1) + return -1; + + return memcmp(d1, d2, s1); +} + +static int ev_uniq_ord(const void *v1, size_t s1, unsigned d1, const void *v2, + size_t s2, unsigned d2) +{ + int r = memord(v1, s1, v2, s2); + + if (r) + return r; + if (d1 > d2) + return 1; + if (d2 > d1) + return -1; + return 0; +} + +static int event_uniq_add(struct rb_root *root, const char *name, int nl, + unsigned domain) +{ + struct rb_node **new = &(root->rb_node), *parent = NULL; + struct event_uniq *data; + + /* Figure out where to put new node */ + while (*new) { + struct event_uniq *it; + int result; + + it = container_of(*new, struct event_uniq, node); + result = ev_uniq_ord(name, nl, domain, it->name, it->nl, + it->domain); + + parent = *new; + if (result < 0) + new = &((*new)->rb_left); + else if (result > 0) + new = &((*new)->rb_right); + else { + it->ct++; + pr_info("found a duplicate event %.*s, ct=%u\n", nl, + name, it->ct); + return it->ct; + } + } + + data = kmalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + *data = (struct event_uniq) { + .name = name, + .nl = nl, + .ct = 0, + .domain = domain, + }; + + /* Add new node and rebalance tree. */ + rb_link_node(&data->node, parent, new); + rb_insert_color(&data->node, root); + + /* data->ct */ + return 0; +} + +static void event_uniq_destroy(struct rb_root *root) +{ + /* + * the strings we point to are in the giant block of memory filled by + * the catalog, and are freed separately. + */ + struct event_uniq *pos, *n; + + rbtree_postorder_for_each_entry_safe(pos, n, root, node) + kfree(pos); +} + + +/* + * ensure the event structure's sizes are self consistent and don't cause us to + * read outside of the event + * + * On success, return the event length in bytes. + * Otherwise, return -1 (and print as appropriate). + */ +static ssize_t catalog_event_len_validate(struct hv_24x7_event_data *event, + size_t event_idx, + size_t event_data_bytes, + size_t event_entry_count, + size_t offset, void *end) +{ + ssize_t ev_len; + void *ev_end, *calc_ev_end; + + if (offset >= event_data_bytes) + return -1; + + if (event_idx >= event_entry_count) { + pr_devel("catalog event data has %zu bytes of padding after last event\n", + event_data_bytes - offset); + return -1; + } + + if (!event_fixed_portion_is_within(event, end)) { + pr_warn("event %zu fixed portion is not within range\n", + event_idx); + return -1; + } + + ev_len = be16_to_cpu(event->length); + + if (ev_len % 16) + pr_info("event %zu has length %zu not divisible by 16: event=%pK\n", + event_idx, ev_len, event); + + ev_end = (__u8 *)event + ev_len; + if (ev_end > end) { + pr_warn("event %zu has .length=%zu, ends after buffer end: ev_end=%pK > end=%pK, offset=%zu\n", + event_idx, ev_len, ev_end, end, + offset); + return -1; + } + + calc_ev_end = event_end(event, end); + if (!calc_ev_end) { + pr_warn("event %zu has a calculated length which exceeds buffer length %zu: event=%pK end=%pK, offset=%zu\n", + event_idx, event_data_bytes, event, end, + offset); + return -1; + } + + if (calc_ev_end > ev_end) { + pr_warn("event %zu exceeds it's own length: event=%pK, end=%pK, offset=%zu, calc_ev_end=%pK\n", + event_idx, event, ev_end, offset, calc_ev_end); + return -1; + } + + return ev_len; +} + +#define MAX_4K (SIZE_MAX / 4096) + +static void create_events_from_catalog(struct attribute ***events_, + struct attribute ***event_descs_, + struct attribute ***event_long_descs_) +{ + unsigned long hret; + size_t catalog_len, catalog_page_len, event_entry_count, + event_data_len, event_data_offs, + event_data_bytes, junk_events, event_idx, event_attr_ct, i, + attr_max, event_idx_last, desc_ct, long_desc_ct; + ssize_t ct, ev_len; + uint32_t catalog_version_num; + struct attribute **events, **event_descs, **event_long_descs; + struct hv_24x7_catalog_page_0 *page_0 = + kmem_cache_alloc(hv_page_cache, GFP_KERNEL); + void *page = page_0; + void *event_data, *end; + struct hv_24x7_event_data *event; + struct rb_root ev_uniq = RB_ROOT; + + if (!page) + goto e_out; + + hret = h_get_24x7_catalog_page(page, 0, 0); + if (hret) + goto e_free; + + catalog_version_num = be64_to_cpu(page_0->version); + catalog_page_len = be32_to_cpu(page_0->length); + + if (MAX_4K < catalog_page_len) { + pr_err("invalid page count: %zu\n", catalog_page_len); + goto e_free; + } + + catalog_len = catalog_page_len * 4096; + + event_entry_count = be16_to_cpu(page_0->event_entry_count); + event_data_offs = be16_to_cpu(page_0->event_data_offs); + event_data_len = be16_to_cpu(page_0->event_data_len); + + pr_devel("cv %zu cl %zu eec %zu edo %zu edl %zu\n", + (size_t)catalog_version_num, catalog_len, + event_entry_count, event_data_offs, event_data_len); + + if ((MAX_4K < event_data_len) + || (MAX_4K < event_data_offs) + || (MAX_4K - event_data_offs < event_data_len)) { + pr_err("invalid event data offs %zu and/or len %zu\n", + event_data_offs, event_data_len); + goto e_free; + } + + if ((event_data_offs + event_data_len) > catalog_page_len) { + pr_err("event data %zu-%zu does not fit inside catalog 0-%zu\n", + event_data_offs, + event_data_offs + event_data_len, + catalog_page_len); + goto e_free; + } + + if (SIZE_MAX / MAX_EVENTS_PER_EVENT_DATA - 1 < event_entry_count) { + pr_err("event_entry_count %zu is invalid\n", + event_entry_count); + goto e_free; + } + + event_data_bytes = event_data_len * 4096; + + /* + * event data can span several pages, events can cross between these + * pages. Use vmalloc to make this easier. + */ + event_data = vmalloc(event_data_bytes); + if (!event_data) { + pr_err("could not allocate event data\n"); + goto e_free; + } + + end = event_data + event_data_bytes; + + /* + * using vmalloc_to_phys() like this only works if PAGE_SIZE is + * divisible by 4096 + */ + BUILD_BUG_ON(PAGE_SIZE % 4096); + + for (i = 0; i < event_data_len; i++) { + hret = h_get_24x7_catalog_page_( + vmalloc_to_phys(event_data + i * 4096), + catalog_version_num, + i + event_data_offs); + if (hret) { + pr_err("failed to get event data in page %zu\n", + i + event_data_offs); + goto e_event_data; + } + } + + /* + * scan the catalog to determine the number of attributes we need, and + * verify it at the same time. + */ + for (junk_events = 0, event = event_data, event_idx = 0, attr_max = 0; + ; + event_idx++, event = (void *)event + ev_len) { + size_t offset = (void *)event - (void *)event_data; + char *name; + int nl; + + ev_len = catalog_event_len_validate(event, event_idx, + event_data_bytes, + event_entry_count, + offset, end); + if (ev_len < 0) + break; + + name = event_name(event, &nl); + + if (event->event_group_record_len == 0) { + pr_devel("invalid event %zu (%.*s): group_record_len == 0, skipping\n", + event_idx, nl, name); + junk_events++; + continue; + } + + if (!catalog_entry_domain_is_valid(event->domain)) { + pr_info("event %zu (%.*s) has invalid domain %d\n", + event_idx, nl, name, event->domain); + junk_events++; + continue; + } + + attr_max += event_to_attr_ct(event); + } + + event_idx_last = event_idx; + if (event_idx_last != event_entry_count) + pr_warn("event buffer ended before listed # of events were parsed (got %zu, wanted %zu, junk %zu)\n", + event_idx_last, event_entry_count, junk_events); + + events = kmalloc_array(attr_max + 1, sizeof(*events), GFP_KERNEL); + if (!events) + goto e_event_data; + + event_descs = kmalloc_array(event_idx + 1, sizeof(*event_descs), + GFP_KERNEL); + if (!event_descs) + goto e_event_attrs; + + event_long_descs = kmalloc_array(event_idx + 1, + sizeof(*event_long_descs), GFP_KERNEL); + if (!event_long_descs) + goto e_event_descs; + + /* Iterate over the catalog filling in the attribute vector */ + for (junk_events = 0, event_attr_ct = 0, desc_ct = 0, long_desc_ct = 0, + event = event_data, event_idx = 0; + event_idx < event_idx_last; + event_idx++, ev_len = be16_to_cpu(event->length), + event = (void *)event + ev_len) { + char *name; + int nl; + int nonce; + /* + * these are the only "bad" events that are intermixed and that + * we can ignore without issue. make sure to skip them here + */ + if (event->event_group_record_len == 0) + continue; + if (!catalog_entry_domain_is_valid(event->domain)) + continue; + + name = event_name(event, &nl); + nonce = event_uniq_add(&ev_uniq, name, nl, event->domain); + ct = event_data_to_attrs(event_idx, events + event_attr_ct, + event, nonce); + if (ct <= 0) { + pr_warn("event %zu (%.*s) creation failure, skipping\n", + event_idx, nl, name); + junk_events++; + } else { + event_attr_ct += ct; + event_descs[desc_ct] = event_to_desc_attr(event, nonce); + if (event_descs[desc_ct]) + desc_ct++; + event_long_descs[long_desc_ct] = + event_to_long_desc_attr(event, nonce); + if (event_long_descs[long_desc_ct]) + long_desc_ct++; + } + } + + pr_info("read %zu catalog entries, created %zu event attrs (%zu failures), %zu descs\n", + event_idx, event_attr_ct, junk_events, desc_ct); + + events[event_attr_ct] = NULL; + event_descs[desc_ct] = NULL; + event_long_descs[long_desc_ct] = NULL; + + event_uniq_destroy(&ev_uniq); + vfree(event_data); + kmem_cache_free(hv_page_cache, page); + + *events_ = events; + *event_descs_ = event_descs; + *event_long_descs_ = event_long_descs; + return; + +e_event_descs: + kfree(event_descs); +e_event_attrs: + kfree(events); +e_event_data: + vfree(event_data); +e_free: + kmem_cache_free(hv_page_cache, page); +e_out: + *events_ = NULL; + *event_descs_ = NULL; + *event_long_descs_ = NULL; +} + static ssize_t catalog_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t offset, size_t count) @@@ -939,7 -177,7 +939,7 @@@ static ssize_t _name##_show(struct devi } \ ret = sprintf(buf, _fmt, _expr); \ e_free: \ - kfree(page); \ + kmem_cache_free(hv_page_cache, page); \ return ret; \ } \ static DEVICE_ATTR_RO(_name) @@@ -969,21 -207,21 +969,21 @@@ static struct attribute_group if_group static const struct attribute_group *attr_groups[] = { &format_group, + &event_group, + &event_desc_group, + &event_long_desc_group, &if_group, NULL, }; -static bool is_physical_domain(int domain) -{ - return domain == HV_24X7_PERF_DOMAIN_PHYSICAL_CHIP || - domain == HV_24X7_PERF_DOMAIN_PHYSICAL_CORE; -} +DEFINE_PER_CPU(char, hv_24x7_reqb[4096]) __aligned(4096); +DEFINE_PER_CPU(char, hv_24x7_resb[4096]) __aligned(4096); static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix, u16 lpar, u64 *res, bool success_expected) { - unsigned long ret = -ENOMEM; + unsigned long ret; /* * request_buffer and result_buffer are not required to be 4k aligned, @@@ -1005,11 -243,13 +1005,11 @@@ BUILD_BUG_ON(sizeof(*request_buffer) > 4096); BUILD_BUG_ON(sizeof(*result_buffer) > 4096); - request_buffer = kmem_cache_zalloc(hv_page_cache, GFP_USER); - if (!request_buffer) - goto out; + request_buffer = (void *)get_cpu_var(hv_24x7_reqb); + result_buffer = (void *)get_cpu_var(hv_24x7_resb); - result_buffer = kmem_cache_zalloc(hv_page_cache, GFP_USER); - if (!result_buffer) - goto out_free_request_buffer; + memset(request_buffer, 0, 4096); + memset(result_buffer, 0, 4096); *request_buffer = (struct reqb) { .buf = { @@@ -1038,11 -278,15 +1038,11 @@@ domain, offset, ix, lpar, ret, ret, result_buffer->buf.detailed_rc, result_buffer->buf.failing_request_ix); - goto out_free_result_buffer; + goto out; } *res = be64_to_cpu(result_buffer->result); -out_free_result_buffer: - kfree(result_buffer); -out_free_request_buffer: - kfree(request_buffer); out: return ret; } @@@ -1050,17 -294,9 +1050,17 @@@ static unsigned long event_24x7_request(struct perf_event *event, u64 *res, bool success_expected) { + u16 idx; + unsigned domain = event_get_domain(event); + + if (is_physical_domain(domain)) + idx = event_get_core(event); + else + idx = event_get_vcpu(event); + return single_24x7_request(event_get_domain(event), event_get_offset(event), - event_get_starting_index(event), + idx, event_get_lpar(event), res, success_expected); @@@ -1123,10 -359,10 +1123,10 @@@ static int h_24x7_event_init(struct per return -EIO; } - /* PHYSICAL domains & other lpars require extra capabilities */ + /* Physical domains & other lpars require extra capabilities */ if (!caps.collect_privileged && (is_physical_domain(domain) || (event_get_lpar(event) != event_get_lpar_max()))) { - pr_devel("hv permisions disallow: is_physical_domain:%d, lpar=0x%llx\n", + pr_devel("hv permissions disallow: is_physical_domain:%d, lpar=0x%llx\n", is_physical_domain(domain), event_get_lpar(event)); return -EACCES; @@@ -1219,10 -455,6 +1219,10 @@@ static int hv_24x7_init(void /* sampling not supported */ h_24x7_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; + create_events_from_catalog(&event_group.attrs, + &event_desc_group.attrs, + &event_long_desc_group.attrs); + r = perf_pmu_register(&h_24x7_pmu, h_24x7_pmu.name, -1); if (r) return r; diff --combined drivers/media/dvb-frontends/m88ds3103.c index ba4ee0b48834,fdafbeb6ed8c..d3d928e1c0ce --- a/drivers/media/dvb-frontends/m88ds3103.c +++ b/drivers/media/dvb-frontends/m88ds3103.c @@@ -1,5 -1,5 +1,5 @@@ /* - * Montage M88DS3103 demodulator driver + * Montage M88DS3103/M88RS6000 demodulator driver * * Copyright (C) 2013 Antti Palosaari * @@@ -162,7 -162,7 +162,7 @@@ static int m88ds3103_wr_reg_val_tab(str dev_dbg(&priv->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len); - if (tab_len > 83) { + if (tab_len > 86) { ret = -EINVAL; goto err; } @@@ -245,9 -245,9 +245,9 @@@ static int m88ds3103_set_frontend(struc struct dtv_frontend_properties *c = &fe->dtv_property_cache; int ret, len; const struct m88ds3103_reg_val *init; - u8 u8tmp, u8tmp1, u8tmp2; - u8 buf[2]; - u16 u16tmp, divide_ratio; + u8 u8tmp, u8tmp1 = 0, u8tmp2 = 0; /* silence compiler warning */ + u8 buf[3]; + u16 u16tmp, divide_ratio = 0; u32 tuner_frequency, target_mclk; s32 s32tmp; @@@ -262,22 -262,6 +262,22 @@@ goto err; } + /* reset */ + ret = m88ds3103_wr_reg(priv, 0x07, 0x80); + if (ret) + goto err; + + ret = m88ds3103_wr_reg(priv, 0x07, 0x00); + if (ret) + goto err; + + /* Disable demod clock path */ + if (priv->chip_id == M88RS6000_CHIP_ID) { + ret = m88ds3103_wr_reg(priv, 0x06, 0xe0); + if (ret) + goto err; + } + /* program tuner */ if (fe->ops.tuner_ops.set_params) { ret = fe->ops.tuner_ops.set_params(fe); @@@ -298,44 -282,49 +298,44 @@@ tuner_frequency = c->frequency; } - /* reset */ - ret = m88ds3103_wr_reg(priv, 0x07, 0x80); - if (ret) - goto err; - - ret = m88ds3103_wr_reg(priv, 0x07, 0x00); - if (ret) - goto err; - - ret = m88ds3103_wr_reg(priv, 0xb2, 0x01); - if (ret) - goto err; + /* select M88RS6000 demod main mclk and ts mclk from tuner die. */ + if (priv->chip_id == M88RS6000_CHIP_ID) { + if (c->symbol_rate > 45010000) + priv->mclk_khz = 110250; + else + priv->mclk_khz = 96000; - ret = m88ds3103_wr_reg(priv, 0x00, 0x01); - if (ret) - goto err; + if (c->delivery_system == SYS_DVBS) + target_mclk = 96000; + else + target_mclk = 144000; - switch (c->delivery_system) { - case SYS_DVBS: - len = ARRAY_SIZE(m88ds3103_dvbs_init_reg_vals); - init = m88ds3103_dvbs_init_reg_vals; - target_mclk = 96000; - break; - case SYS_DVBS2: - len = ARRAY_SIZE(m88ds3103_dvbs2_init_reg_vals); - init = m88ds3103_dvbs2_init_reg_vals; + /* Enable demod clock path */ + ret = m88ds3103_wr_reg(priv, 0x06, 0x00); + if (ret) + goto err; + usleep_range(10000, 20000); + } else { + /* set M88DS3103 mclk and ts mclk. */ + priv->mclk_khz = 96000; switch (priv->cfg->ts_mode) { case M88DS3103_TS_SERIAL: case M88DS3103_TS_SERIAL_D7: - if (c->symbol_rate < 18000000) - target_mclk = 96000; - else - target_mclk = 144000; + target_mclk = priv->cfg->ts_clk; break; case M88DS3103_TS_PARALLEL: case M88DS3103_TS_CI: - if (c->symbol_rate < 18000000) + if (c->delivery_system == SYS_DVBS) target_mclk = 96000; - else if (c->symbol_rate < 28000000) - target_mclk = 144000; - else - target_mclk = 192000; + else { + if (c->symbol_rate < 18000000) + target_mclk = 96000; + else if (c->symbol_rate < 28000000) + target_mclk = 144000; + else + target_mclk = 192000; + } break; default: dev_dbg(&priv->i2c->dev, "%s: invalid ts_mode\n", @@@ -343,55 -332,6 +343,55 @@@ ret = -EINVAL; goto err; } + + switch (target_mclk) { + case 96000: + u8tmp1 = 0x02; /* 0b10 */ + u8tmp2 = 0x01; /* 0b01 */ + break; + case 144000: + u8tmp1 = 0x00; /* 0b00 */ + u8tmp2 = 0x01; /* 0b01 */ + break; + case 192000: + u8tmp1 = 0x03; /* 0b11 */ + u8tmp2 = 0x00; /* 0b00 */ + break; + } + ret = m88ds3103_wr_reg_mask(priv, 0x22, u8tmp1 << 6, 0xc0); + if (ret) + goto err; + ret = m88ds3103_wr_reg_mask(priv, 0x24, u8tmp2 << 6, 0xc0); + if (ret) + goto err; + } + + ret = m88ds3103_wr_reg(priv, 0xb2, 0x01); + if (ret) + goto err; + + ret = m88ds3103_wr_reg(priv, 0x00, 0x01); + if (ret) + goto err; + + switch (c->delivery_system) { + case SYS_DVBS: + if (priv->chip_id == M88RS6000_CHIP_ID) { + len = ARRAY_SIZE(m88rs6000_dvbs_init_reg_vals); + init = m88rs6000_dvbs_init_reg_vals; + } else { + len = ARRAY_SIZE(m88ds3103_dvbs_init_reg_vals); + init = m88ds3103_dvbs_init_reg_vals; + } + break; + case SYS_DVBS2: + if (priv->chip_id == M88RS6000_CHIP_ID) { + len = ARRAY_SIZE(m88rs6000_dvbs2_init_reg_vals); + init = m88rs6000_dvbs2_init_reg_vals; + } else { + len = ARRAY_SIZE(m88ds3103_dvbs2_init_reg_vals); + init = m88ds3103_dvbs2_init_reg_vals; + } break; default: dev_dbg(&priv->i2c->dev, "%s: invalid delivery_system\n", @@@ -407,30 -347,7 +407,30 @@@ goto err; } - u8tmp1 = 0; /* silence compiler warning */ + if (priv->chip_id == M88RS6000_CHIP_ID) { + if ((c->delivery_system == SYS_DVBS2) + && ((c->symbol_rate / 1000) <= 5000)) { + ret = m88ds3103_wr_reg(priv, 0xc0, 0x04); + if (ret) + goto err; + buf[0] = 0x09; + buf[1] = 0x22; + buf[2] = 0x88; + ret = m88ds3103_wr_regs(priv, 0x8a, buf, 3); + if (ret) + goto err; + } + ret = m88ds3103_wr_reg_mask(priv, 0x9d, 0x08, 0x08); + if (ret) + goto err; + ret = m88ds3103_wr_reg(priv, 0xf1, 0x01); + if (ret) + goto err; + ret = m88ds3103_wr_reg_mask(priv, 0x30, 0x80, 0x80); + if (ret) + goto err; + } + switch (priv->cfg->ts_mode) { case M88DS3103_TS_SERIAL: u8tmp1 = 0x00; @@@ -466,15 -383,16 +466,15 @@@ ret = m88ds3103_wr_reg_mask(priv, 0x29, u8tmp1, 0x20); if (ret) goto err; - } - - if (priv->cfg->ts_clk) { - divide_ratio = DIV_ROUND_UP(target_mclk, priv->cfg->ts_clk); - u8tmp1 = divide_ratio / 2; - u8tmp2 = DIV_ROUND_UP(divide_ratio, 2); - } else { - divide_ratio = 0; u8tmp1 = 0; u8tmp2 = 0; + break; + default: + if (priv->cfg->ts_clk) { + divide_ratio = DIV_ROUND_UP(target_mclk, priv->cfg->ts_clk); + u8tmp1 = divide_ratio / 2; + u8tmp2 = DIV_ROUND_UP(divide_ratio, 2); + } } dev_dbg(&priv->i2c->dev, @@@ -502,6 -420,29 +502,6 @@@ if (ret) goto err; - switch (target_mclk) { - case 96000: - u8tmp1 = 0x02; /* 0b10 */ - u8tmp2 = 0x01; /* 0b01 */ - break; - case 144000: - u8tmp1 = 0x00; /* 0b00 */ - u8tmp2 = 0x01; /* 0b01 */ - break; - case 192000: - u8tmp1 = 0x03; /* 0b11 */ - u8tmp2 = 0x00; /* 0b00 */ - break; - } - - ret = m88ds3103_wr_reg_mask(priv, 0x22, u8tmp1 << 6, 0xc0); - if (ret) - goto err; - - ret = m88ds3103_wr_reg_mask(priv, 0x24, u8tmp2 << 6, 0xc0); - if (ret) - goto err; - if (c->symbol_rate <= 3000000) u8tmp = 0x20; else if (c->symbol_rate <= 10000000) @@@ -525,7 -466,7 +525,7 @@@ if (ret) goto err; - u16tmp = DIV_ROUND_CLOSEST((c->symbol_rate / 1000) << 15, M88DS3103_MCLK_KHZ / 2); + u16tmp = DIV_ROUND_CLOSEST((c->symbol_rate / 1000) << 15, priv->mclk_khz / 2); buf[0] = (u16tmp >> 0) & 0xff; buf[1] = (u16tmp >> 8) & 0xff; ret = m88ds3103_wr_regs(priv, 0x61, buf, 2); @@@ -548,7 -489,7 +548,7 @@@ (tuner_frequency - c->frequency)); s32tmp = 0x10000 * (tuner_frequency - c->frequency); - s32tmp = DIV_ROUND_CLOSEST(s32tmp, M88DS3103_MCLK_KHZ); + s32tmp = DIV_ROUND_CLOSEST(s32tmp, priv->mclk_khz); if (s32tmp < 0) s32tmp += 0x10000; @@@ -579,7 -520,7 +579,7 @@@ static int m88ds3103_init(struct dvb_fr struct m88ds3103_priv *priv = fe->demodulator_priv; int ret, len, remaining; const struct firmware *fw = NULL; - u8 *fw_file = M88DS3103_FIRMWARE; + u8 *fw_file; u8 u8tmp; dev_dbg(&priv->i2c->dev, "%s:\n", __func__); @@@ -600,6 -541,15 +600,6 @@@ if (ret) goto err; - /* reset */ - ret = m88ds3103_wr_reg(priv, 0x07, 0x60); - if (ret) - goto err; - - ret = m88ds3103_wr_reg(priv, 0x07, 0x00); - if (ret) - goto err; - /* firmware status */ ret = m88ds3103_rd_reg(priv, 0xb9, &u8tmp); if (ret) @@@ -610,27 -560,14 +610,27 @@@ if (u8tmp) goto skip_fw_download; + /* global reset, global diseqc reset, golbal fec reset */ + ret = m88ds3103_wr_reg(priv, 0x07, 0xe0); + if (ret) + goto err; + + ret = m88ds3103_wr_reg(priv, 0x07, 0x00); + if (ret) + goto err; + /* cold state - try to download firmware */ dev_info(&priv->i2c->dev, "%s: found a '%s' in cold state\n", KBUILD_MODNAME, m88ds3103_ops.info.name); + if (priv->chip_id == M88RS6000_CHIP_ID) + fw_file = M88RS6000_FIRMWARE; + else + fw_file = M88DS3103_FIRMWARE; /* request the firmware, this will block and timeout */ ret = request_firmware(&fw, fw_file, priv->i2c->dev.parent); if (ret) { - dev_err(&priv->i2c->dev, "%s: firmare file '%s' not found\n", + dev_err(&priv->i2c->dev, "%s: firmware file '%s' not found\n", KBUILD_MODNAME, fw_file); goto err; } @@@ -640,7 -577,7 +640,7 @@@ ret = m88ds3103_wr_reg(priv, 0xb2, 0x01); if (ret) - goto err; + goto error_fw_release; for (remaining = fw->size; remaining > 0; remaining -= (priv->cfg->i2c_wr_max - 1)) { @@@ -654,13 -591,13 +654,13 @@@ dev_err(&priv->i2c->dev, "%s: firmware download failed=%d\n", KBUILD_MODNAME, ret); - goto err; + goto error_fw_release; } } ret = m88ds3103_wr_reg(priv, 0xb2, 0x00); if (ret) - goto err; + goto error_fw_release; release_firmware(fw); fw = NULL; @@@ -686,10 -623,10 +686,10 @@@ skip_fw_download priv->warm = true; return 0; -err: - if (fw) - release_firmware(fw); +error_fw_release: + release_firmware(fw); +err: dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret); return ret; } @@@ -698,18 -635,13 +698,18 @@@ static int m88ds3103_sleep(struct dvb_f { struct m88ds3103_priv *priv = fe->demodulator_priv; int ret; + u8 u8tmp; dev_dbg(&priv->i2c->dev, "%s:\n", __func__); priv->delivery_system = SYS_UNDEFINED; /* TS Hi-Z */ - ret = m88ds3103_wr_reg_mask(priv, 0x27, 0x00, 0x01); + if (priv->chip_id == M88RS6000_CHIP_ID) + u8tmp = 0x29; + else + u8tmp = 0x27; + ret = m88ds3103_wr_reg_mask(priv, u8tmp, 0x00, 0x01); if (ret) goto err; @@@ -898,7 -830,7 +898,7 @@@ static int m88ds3103_get_frontend(struc goto err; c->symbol_rate = 1ull * ((buf[1] << 8) | (buf[0] << 0)) * - M88DS3103_MCLK_KHZ * 1000 / 0x10000; + priv->mclk_khz * 1000 / 0x10000; return 0; err: @@@ -1378,22 -1310,18 +1378,22 @@@ struct dvb_frontend *m88ds3103_attach(c priv->i2c = i2c; mutex_init(&priv->i2c_mutex); - ret = m88ds3103_rd_reg(priv, 0x01, &chip_id); + /* 0x00: chip id[6:0], 0x01: chip ver[7:0], 0x02: chip ver[15:8] */ + ret = m88ds3103_rd_reg(priv, 0x00, &chip_id); if (ret) goto err; - dev_dbg(&priv->i2c->dev, "%s: chip_id=%02x\n", __func__, chip_id); + chip_id >>= 1; + dev_info(&priv->i2c->dev, "%s: chip_id=%02x\n", __func__, chip_id); switch (chip_id) { - case 0xd0: + case M88RS6000_CHIP_ID: + case M88DS3103_CHIP_ID: break; default: goto err; } + priv->chip_id = chip_id; switch (priv->cfg->clock_out) { case M88DS3103_CLOCK_OUT_DISABLED: @@@ -1409,11 -1337,6 +1409,11 @@@ goto err; } + /* 0x29 register is defined differently for m88rs6000. */ + /* set internal tuner address to 0x21 */ + if (chip_id == M88RS6000_CHIP_ID) + u8tmp = 0x00; + ret = m88ds3103_wr_reg(priv, 0x29, u8tmp); if (ret) goto err; @@@ -1441,9 -1364,6 +1441,9 @@@ /* create dvb_frontend */ memcpy(&priv->fe.ops, &m88ds3103_ops, sizeof(struct dvb_frontend_ops)); + if (priv->chip_id == M88RS6000_CHIP_ID) + strncpy(priv->fe.ops.info.name, + "Montage M88RS6000", sizeof(priv->fe.ops.info.name)); priv->fe.demodulator_priv = priv; return &priv->fe; @@@ -1503,4 -1423,3 +1503,4 @@@ MODULE_AUTHOR("Antti Palosaari +#include #include #include #include @@@ -178,6 -177,65 +178,6 @@@ static int mmc_decode_csd(struct mmc_ca return 0; } -/* - * Read extended CSD. - */ -static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd) -{ - int err; - u8 *ext_csd; - - BUG_ON(!card); - BUG_ON(!new_ext_csd); - - *new_ext_csd = NULL; - - if (card->csd.mmca_vsn < CSD_SPEC_VER_4) - return 0; - - /* - * As the ext_csd is so large and mostly unused, we don't store the - * raw block in mmc_card. - */ - ext_csd = kmalloc(512, GFP_KERNEL); - if (!ext_csd) { - pr_err("%s: could not allocate a buffer to " - "receive the ext_csd.\n", mmc_hostname(card->host)); - return -ENOMEM; - } - - err = mmc_send_ext_csd(card, ext_csd); - if (err) { - kfree(ext_csd); - *new_ext_csd = NULL; - - /* If the host or the card can't do the switch, - * fail more gracefully. */ - if ((err != -EINVAL) - && (err != -ENOSYS) - && (err != -EFAULT)) - return err; - - /* - * High capacity cards should have this "magic" size - * stored in their CSD. - */ - if (card->csd.capacity == (4096 * 512)) { - pr_err("%s: unable to read EXT_CSD " - "on a possible high capacity card. " - "Card will be ignored.\n", - mmc_hostname(card->host)); - } else { - pr_warn("%s: unable to read EXT_CSD, performance might suffer\n", - mmc_hostname(card->host)); - err = 0; - } - } else - *new_ext_csd = ext_csd; - - return err; -} - static void mmc_select_card_type(struct mmc_card *card) { struct mmc_host *host = card->host; @@@ -333,12 -391,15 +333,12 @@@ static void mmc_manage_gp_partitions(st /* * Decode extended CSD. */ -static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) +static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) { int err = 0, idx; unsigned int part_size; - - BUG_ON(!card); - - if (!ext_csd) - return 0; + struct device_node *np; + bool broken_hpi = false; /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */ card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE]; @@@ -352,11 -413,6 +352,11 @@@ } } + np = mmc_of_find_child_device(card->host, 0); + if (np && of_device_is_compatible(np, "mmc-card")) + broken_hpi = of_property_read_bool(np, "broken-hpi"); + of_node_put(np); + /* * The EXT_CSD format is meant to be forward compatible. As long * as CSD_STRUCTURE does not change, all values for EXT_CSD_REV @@@ -491,18 -547,16 +491,18 @@@ /* check whether the eMMC card supports BKOPS */ if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) { card->ext_csd.bkops = 1; - card->ext_csd.bkops_en = ext_csd[EXT_CSD_BKOPS_EN]; + card->ext_csd.man_bkops_en = + (ext_csd[EXT_CSD_BKOPS_EN] & + EXT_CSD_MANUAL_BKOPS_MASK); card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS]; - if (!card->ext_csd.bkops_en) - pr_info("%s: BKOPS_EN bit is not set\n", + if (!card->ext_csd.man_bkops_en) + pr_info("%s: MAN_BKOPS_EN bit is not set\n", mmc_hostname(card->host)); } /* check whether the eMMC card supports HPI */ - if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) { + if (!broken_hpi && (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1)) { card->ext_csd.hpi = 1; if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2) card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION; @@@ -574,56 -628,16 +574,56 @@@ card->ext_csd.data_sector_size = 512; } + /* eMMC v5 or later */ + if (card->ext_csd.rev >= 7) { + memcpy(card->ext_csd.fwrev, &ext_csd[EXT_CSD_FIRMWARE_VERSION], + MMC_FIRMWARE_LEN); + card->ext_csd.ffu_capable = + (ext_csd[EXT_CSD_SUPPORTED_MODE] & 0x1) && + !(ext_csd[EXT_CSD_FW_CONFIG] & 0x1); + } out: return err; } -static inline void mmc_free_ext_csd(u8 *ext_csd) +static int mmc_read_ext_csd(struct mmc_card *card) { + u8 *ext_csd; + int err; + + if (!mmc_can_ext_csd(card)) + return 0; + + err = mmc_get_ext_csd(card, &ext_csd); + if (err) { + /* If the host or the card can't do the switch, + * fail more gracefully. */ + if ((err != -EINVAL) + && (err != -ENOSYS) + && (err != -EFAULT)) + return err; + + /* + * High capacity cards should have this "magic" size + * stored in their CSD. + */ + if (card->csd.capacity == (4096 * 512)) { + pr_err("%s: unable to read EXT_CSD on a possible high capacity card. Card will be ignored.\n", + mmc_hostname(card->host)); + } else { + pr_warn("%s: unable to read EXT_CSD, performance might suffer\n", + mmc_hostname(card->host)); + err = 0; + } + + return err; + } + + err = mmc_decode_ext_csd(card, ext_csd); kfree(ext_csd); + return err; } - static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width) { u8 *bw_ext_csd; @@@ -633,8 -647,11 +633,8 @@@ return 0; err = mmc_get_ext_csd(card, &bw_ext_csd); - - if (err || bw_ext_csd == NULL) { - err = -EINVAL; - goto out; - } + if (err) + return err; /* only compare read only fields */ err = !((card->ext_csd.raw_partition_support == @@@ -693,7 -710,8 +693,7 @@@ if (err) err = -EINVAL; -out: - mmc_free_ext_csd(bw_ext_csd); + kfree(bw_ext_csd); return err; } @@@ -704,7 -722,7 +704,7 @@@ MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n" MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year); MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9); MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9); -MMC_DEV_ATTR(fwrev, "0x%x\n", card->cid.fwrev); +MMC_DEV_ATTR(ffu_capable, "%d\n", card->ext_csd.ffu_capable); MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev); MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid); MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name); @@@ -717,22 -735,6 +717,22 @@@ MMC_DEV_ATTR(enhanced_area_size, "%u\n" MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult); MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors); +static ssize_t mmc_fwrev_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mmc_card *card = mmc_dev_to_card(dev); + + if (card->ext_csd.rev < 7) { + return sprintf(buf, "0x%x\n", card->cid.fwrev); + } else { + return sprintf(buf, "0x%*phN\n", MMC_FIRMWARE_LEN, + card->ext_csd.fwrev); + } +} + +static DEVICE_ATTR(fwrev, S_IRUGO, mmc_fwrev_show, NULL); + static struct attribute *mmc_std_attrs[] = { &dev_attr_cid.attr, &dev_attr_csd.attr, @@@ -740,7 -742,6 +740,7 @@@ &dev_attr_erase_size.attr, &dev_attr_preferred_erase_size.attr, &dev_attr_fwrev.attr, + &dev_attr_ffu_capable.attr, &dev_attr_hwrev.attr, &dev_attr_manfid.attr, &dev_attr_name.attr, @@@ -773,6 -774,14 +773,6 @@@ static int __mmc_select_powerclass(stru unsigned int pwrclass_val = 0; int err = 0; - /* Power class selection is supported for versions >= 4.0 */ - if (card->csd.mmca_vsn < CSD_SPEC_VER_4) - return 0; - - /* Power class values are defined only for 4/8 bit bus */ - if (bus_width == EXT_CSD_BUS_WIDTH_1) - return 0; - switch (1 << host->ios.vdd) { case MMC_VDD_165_195: if (host->ios.clock <= MMC_HIGH_26_MAX_DTR) @@@ -835,7 -844,7 +835,7 @@@ static int mmc_select_powerclass(struc int err, ddr; /* Power class selection is supported for versions >= 4.0 */ - if (card->csd.mmca_vsn < CSD_SPEC_VER_4) + if (!mmc_can_ext_csd(card)) return 0; bus_width = host->ios.bus_width; @@@ -896,7 -905,7 +896,7 @@@ static int mmc_select_bus_width(struct unsigned idx, bus_width = 0; int err = 0; - if ((card->csd.mmca_vsn < CSD_SPEC_VER_4) && + if (!mmc_can_ext_csd(card) || !(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) return 0; @@@ -989,7 -998,7 +989,7 @@@ static int mmc_select_hs_ddr(struct mmc ext_csd_bits, card->ext_csd.generic_cmd6_time); if (err) { - pr_warn("%s: switch to bus width %d ddr failed\n", + pr_err("%s: switch to bus width %d ddr failed\n", mmc_hostname(host), 1 << bus_width); return err; } @@@ -1060,7 -1069,7 +1060,7 @@@ static int mmc_select_hs400(struct mmc_ card->ext_csd.generic_cmd6_time, true, true, true); if (err) { - pr_warn("%s: switch to high-speed from hs200 failed, err:%d\n", + pr_err("%s: switch to high-speed from hs200 failed, err:%d\n", mmc_hostname(host), err); return err; } @@@ -1070,7 -1079,7 +1070,7 @@@ EXT_CSD_DDR_BUS_WIDTH_8, card->ext_csd.generic_cmd6_time); if (err) { - pr_warn("%s: switch to bus width for hs400 failed, err:%d\n", + pr_err("%s: switch to bus width for hs400 failed, err:%d\n", mmc_hostname(host), err); return err; } @@@ -1080,7 -1089,7 +1080,7 @@@ card->ext_csd.generic_cmd6_time, true, true, true); if (err) { - pr_warn("%s: switch to hs400 failed, err:%d\n", + pr_err("%s: switch to hs400 failed, err:%d\n", mmc_hostname(host), err); return err; } @@@ -1137,7 -1146,8 +1137,7 @@@ static int mmc_select_timing(struct mmc { int err = 0; - if ((card->csd.mmca_vsn < CSD_SPEC_VER_4 && - card->ext_csd.hs_max_dtr == 0)) + if (!mmc_can_ext_csd(card)) goto bus_speed; if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200) @@@ -1165,6 -1175,38 +1165,6 @@@ bus_speed return err; } -const u8 tuning_blk_pattern_4bit[MMC_TUNING_BLK_PATTERN_4BIT_SIZE] = { - 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, - 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef, - 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb, - 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef, - 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c, - 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee, - 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff, - 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde, -}; -EXPORT_SYMBOL(tuning_blk_pattern_4bit); - -const u8 tuning_blk_pattern_8bit[MMC_TUNING_BLK_PATTERN_8BIT_SIZE] = { - 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, - 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc, - 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff, - 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff, - 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd, - 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb, - 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff, - 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff, - 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, - 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, - 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, - 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, - 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, - 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, - 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, - 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, -}; -EXPORT_SYMBOL(tuning_blk_pattern_8bit); - /* * Execute tuning sequence to seek the proper bus operating * conditions for HS200 and HS400, which sends CMD21 to the device. @@@ -1172,6 -1214,7 +1172,6 @@@ static int mmc_hs200_tuning(struct mmc_card *card) { struct mmc_host *host = card->host; - int err = 0; /* * Timing should be adjusted to the HS400 target @@@ -1182,7 -1225,18 +1182,7 @@@ if (host->ops->prepare_hs400_tuning) host->ops->prepare_hs400_tuning(host, &host->ios); - if (host->ops->execute_tuning) { - mmc_host_clk_hold(host); - err = host->ops->execute_tuning(host, - MMC_SEND_TUNING_BLOCK_HS200); - mmc_host_clk_release(host); - - if (err) - pr_warn("%s: tuning execution failed\n", - mmc_hostname(host)); - } - - return err; + return mmc_execute_tuning(card); } /* @@@ -1198,6 -1252,7 +1198,6 @@@ static int mmc_init_card(struct mmc_hos int err; u32 cid[4]; u32 rocr; - u8 *ext_csd = NULL; BUG_ON(!host); WARN_ON(!host->claimed); @@@ -1262,12 -1317,6 +1262,12 @@@ memcpy(card->raw_cid, cid, sizeof(card->raw_cid)); } + /* + * Call the optional HC's init_card function to handle quirks. + */ + if (host->ops->init_card) + host->ops->init_card(host, card); + /* * For native busses: set card RCA and quit open drain mode. */ @@@ -1312,8 -1361,14 +1312,8 @@@ } if (!oldcard) { - /* - * Fetch and process extended CSD. - */ - - err = mmc_get_ext_csd(card, &ext_csd); - if (err) - goto free_card; - err = mmc_read_ext_csd(card, ext_csd); + /* Read extended CSD. */ + err = mmc_read_ext_csd(card); if (err) goto free_card; @@@ -1403,18 -1458,18 +1403,18 @@@ if (mmc_card_hs200(card)) { err = mmc_hs200_tuning(card); if (err) - goto err; + goto free_card; err = mmc_select_hs400(card); if (err) - goto err; + goto free_card; } else if (mmc_card_hs(card)) { /* Select the desired bus width optionally */ err = mmc_select_bus_width(card); if (!IS_ERR_VALUE(err)) { err = mmc_select_hs_ddr(card); if (err) - goto err; + goto free_card; } } @@@ -1490,12 -1545,15 +1490,12 @@@ if (!oldcard) host->card = card; - mmc_free_ext_csd(ext_csd); return 0; free_card: if (!oldcard) mmc_remove_card(card); err: - mmc_free_ext_csd(ext_csd); - return err; } @@@ -1758,7 -1816,7 +1758,7 @@@ static int mmc_runtime_suspend(struct m err = _mmc_suspend(host, true); if (err) - pr_err("%s: error %d doing aggessive suspend\n", + pr_err("%s: error %d doing aggressive suspend\n", mmc_hostname(host), err); return err; @@@ -1776,7 -1834,7 +1776,7 @@@ static int mmc_runtime_resume(struct mm err = _mmc_resume(host); if (err) - pr_err("%s: error %d doing aggessive resume\n", + pr_err("%s: error %d doing aggressive resume\n", mmc_hostname(host), err); return 0; @@@ -1793,46 -1851,6 +1793,46 @@@ static int mmc_power_restore(struct mmc return ret; } +int mmc_can_reset(struct mmc_card *card) +{ + u8 rst_n_function; + + rst_n_function = card->ext_csd.rst_n_function; + if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED) + return 0; + return 1; +} +EXPORT_SYMBOL(mmc_can_reset); + +static int mmc_reset(struct mmc_host *host) +{ + struct mmc_card *card = host->card; + u32 status; + + if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) + return -EOPNOTSUPP; + + if (!mmc_can_reset(card)) + return -EOPNOTSUPP; + + mmc_host_clk_hold(host); + mmc_set_clock(host, host->f_init); + + host->ops->hw_reset(host); + + /* If the reset has happened, then a status command will fail */ + if (!mmc_send_status(card, &status)) { + mmc_host_clk_release(host); + return -ENOSYS; + } + + /* Set initial state and call mmc_set_ios */ + mmc_set_initial_state(host); + mmc_host_clk_release(host); + + return mmc_power_restore(host); +} + static const struct mmc_bus_ops mmc_ops = { .remove = mmc_remove, .detect = mmc_detect, @@@ -1843,7 -1861,6 +1843,7 @@@ .power_restore = mmc_power_restore, .alive = mmc_alive, .shutdown = mmc_shutdown, + .reset = mmc_reset, }; /* diff --combined drivers/mmc/core/sd.c index ad4d43eae99d,c2cddfd99c7c..31a9ef256d06 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@@ -660,10 -660,15 +660,10 @@@ static int mmc_sd_init_uhs_card(struct * SPI mode doesn't define CMD19 and tuning is only valid for SDR50 and * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104. */ - if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning && - (card->sd_bus_speed == UHS_SDR50_BUS_SPEED || - card->sd_bus_speed == UHS_SDR104_BUS_SPEED)) { - mmc_host_clk_hold(card->host); - err = card->host->ops->execute_tuning(card->host, - MMC_SEND_TUNING_BLOCK); - mmc_host_clk_release(card->host); - } - + if (!mmc_host_is_spi(card->host) && + (card->sd_bus_speed == UHS_SDR50_BUS_SPEED || + card->sd_bus_speed == UHS_SDR104_BUS_SPEED)) + err = mmc_execute_tuning(card); out: kfree(status); @@@ -927,12 -932,6 +927,12 @@@ static int mmc_sd_init_card(struct mmc_ memcpy(card->raw_cid, cid, sizeof(card->raw_cid)); } + /* + * Call the optional HC's init_card function to handle quirks. + */ + if (host->ops->init_card) + host->ops->init_card(host, card); + /* * For native busses: get card RCA and quit open drain mode. */ @@@ -1157,7 -1156,7 +1157,7 @@@ static int mmc_sd_runtime_suspend(struc err = _mmc_sd_suspend(host); if (err) - pr_err("%s: error %d doing aggessive suspend\n", + pr_err("%s: error %d doing aggressive suspend\n", mmc_hostname(host), err); return err; @@@ -1175,7 -1174,7 +1175,7 @@@ static int mmc_sd_runtime_resume(struc err = _mmc_sd_resume(host); if (err) - pr_err("%s: error %d doing aggessive resume\n", + pr_err("%s: error %d doing aggressive resume\n", mmc_hostname(host), err); return 0; @@@ -1192,12 -1191,6 +1192,12 @@@ static int mmc_sd_power_restore(struct return ret; } +static int mmc_sd_reset(struct mmc_host *host) +{ + mmc_power_cycle(host, host->card->ocr); + return mmc_sd_power_restore(host); +} + static const struct mmc_bus_ops mmc_sd_ops = { .remove = mmc_sd_remove, .detect = mmc_sd_detect, @@@ -1208,7 -1201,6 +1208,7 @@@ .power_restore = mmc_sd_power_restore, .alive = mmc_sd_alive, .shutdown = mmc_sd_suspend, + .reset = mmc_sd_reset, }; /* @@@ -1279,3 -1271,4 +1279,3 @@@ err return err; } - diff --combined drivers/mtd/nand/gpmi-nand/gpmi-lib.c index 27f272ed502a,2788af980086..43fa16b5f510 --- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c @@@ -1105,7 -1105,7 +1105,7 @@@ int gpmi_is_ready(struct gpmi_nand_dat mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip); reg = readl(r->gpmi_regs + HW_GPMI_STAT); } else - dev_err(this->dev, "unknow arch.\n"); + dev_err(this->dev, "unknown arch.\n"); return reg & mask; } @@@ -1353,156 -1353,3 +1353,156 @@@ int gpmi_read_page(struct gpmi_nand_dat set_dma_type(this, DMA_FOR_READ_ECC_PAGE); return start_dma_with_bch_irq(this, desc); } + +/** + * gpmi_copy_bits - copy bits from one memory region to another + * @dst: destination buffer + * @dst_bit_off: bit offset we're starting to write at + * @src: source buffer + * @src_bit_off: bit offset we're starting to read from + * @nbits: number of bits to copy + * + * This functions copies bits from one memory region to another, and is used by + * the GPMI driver to copy ECC sections which are not guaranteed to be byte + * aligned. + * + * src and dst should not overlap. + * + */ +void gpmi_copy_bits(u8 *dst, size_t dst_bit_off, + const u8 *src, size_t src_bit_off, + size_t nbits) +{ + size_t i; + size_t nbytes; + u32 src_buffer = 0; + size_t bits_in_src_buffer = 0; + + if (!nbits) + return; + + /* + * Move src and dst pointers to the closest byte pointer and store bit + * offsets within a byte. + */ + src += src_bit_off / 8; + src_bit_off %= 8; + + dst += dst_bit_off / 8; + dst_bit_off %= 8; + + /* + * Initialize the src_buffer value with bits available in the first + * byte of data so that we end up with a byte aligned src pointer. + */ + if (src_bit_off) { + src_buffer = src[0] >> src_bit_off; + if (nbits >= (8 - src_bit_off)) { + bits_in_src_buffer += 8 - src_bit_off; + } else { + src_buffer &= GENMASK(nbits - 1, 0); + bits_in_src_buffer += nbits; + } + nbits -= bits_in_src_buffer; + src++; + } + + /* Calculate the number of bytes that can be copied from src to dst. */ + nbytes = nbits / 8; + + /* Try to align dst to a byte boundary. */ + if (dst_bit_off) { + if (bits_in_src_buffer < (8 - dst_bit_off) && nbytes) { + src_buffer |= src[0] << bits_in_src_buffer; + bits_in_src_buffer += 8; + src++; + nbytes--; + } + + if (bits_in_src_buffer >= (8 - dst_bit_off)) { + dst[0] &= GENMASK(dst_bit_off - 1, 0); + dst[0] |= src_buffer << dst_bit_off; + src_buffer >>= (8 - dst_bit_off); + bits_in_src_buffer -= (8 - dst_bit_off); + dst_bit_off = 0; + dst++; + if (bits_in_src_buffer > 7) { + bits_in_src_buffer -= 8; + dst[0] = src_buffer; + dst++; + src_buffer >>= 8; + } + } + } + + if (!bits_in_src_buffer && !dst_bit_off) { + /* + * Both src and dst pointers are byte aligned, thus we can + * just use the optimized memcpy function. + */ + if (nbytes) + memcpy(dst, src, nbytes); + } else { + /* + * src buffer is not byte aligned, hence we have to copy each + * src byte to the src_buffer variable before extracting a byte + * to store in dst. + */ + for (i = 0; i < nbytes; i++) { + src_buffer |= src[i] << bits_in_src_buffer; + dst[i] = src_buffer; + src_buffer >>= 8; + } + } + /* Update dst and src pointers */ + dst += nbytes; + src += nbytes; + + /* + * nbits is the number of remaining bits. It should not exceed 8 as + * we've already copied as much bytes as possible. + */ + nbits %= 8; + + /* + * If there's no more bits to copy to the destination and src buffer + * was already byte aligned, then we're done. + */ + if (!nbits && !bits_in_src_buffer) + return; + + /* Copy the remaining bits to src_buffer */ + if (nbits) + src_buffer |= (*src & GENMASK(nbits - 1, 0)) << + bits_in_src_buffer; + bits_in_src_buffer += nbits; + + /* + * In case there were not enough bits to get a byte aligned dst buffer + * prepare the src_buffer variable to match the dst organization (shift + * src_buffer by dst_bit_off and retrieve the least significant bits + * from dst). + */ + if (dst_bit_off) + src_buffer = (src_buffer << dst_bit_off) | + (*dst & GENMASK(dst_bit_off - 1, 0)); + bits_in_src_buffer += dst_bit_off; + + /* + * Keep most significant bits from dst if we end up with an unaligned + * number of bits. + */ + nbytes = bits_in_src_buffer / 8; + if (bits_in_src_buffer % 8) { + src_buffer |= (dst[nbytes] & + GENMASK(7, bits_in_src_buffer % 8)) << + (nbytes * 8); + nbytes++; + } + + /* Copy the remaining bytes to dst */ + for (i = 0; i < nbytes; i++) { + dst[i] = src_buffer; + src_buffer >>= 8; + } +} diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index cfe3c7695455,bb2af7207826..4c9678c8e139 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@@ -193,7 -193,6 +193,7 @@@ void bnx2x_vfop_qctor_prep(struct bnx2 /* Setup-op general parameters */ setup_p->gen_params.spcl_id = vf->sp_cl_id; setup_p->gen_params.stat_id = vfq_stat_id(vf, q); + setup_p->gen_params.fp_hsi = vf->fp_hsi; /* Setup-op pause params: * Nothing to do, the pause thresholds are set by default to 0 which @@@ -592,7 -591,7 +592,7 @@@ int bnx2x_vf_mcast(struct bnx2x *bp, st mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem), GFP_KERNEL); if (!mc) { - BNX2X_ERR("Cannot Configure mulicasts due to lack of memory\n"); + BNX2X_ERR("Cannot Configure multicasts due to lack of memory\n"); return -ENOMEM; } } @@@ -2238,9 -2237,7 +2238,9 @@@ int bnx2x_vf_close(struct bnx2x *bp, st cookie.vf = vf; cookie.state = VF_ACQUIRED; - bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); + rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); + if (rc) + goto op_err; } DP(BNX2X_MSG_IOV, "set state to acquired\n"); diff --combined drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index 3e0f705a4311,a5a482946679..75ee9e4ced51 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@@ -487,8 -487,7 +487,8 @@@ int qlcnic_nic_del_mac(struct qlcnic_ad return err; } -int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan) +int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan, + enum qlcnic_mac_type mac_type) { struct qlcnic_mac_vlan_list *cur; struct list_head *head; @@@ -514,29 -513,10 +514,29 @@@ } cur->vlan_id = vlan; + cur->mac_type = mac_type; + list_add_tail(&cur->list, &adapter->mac_list); return 0; } +void qlcnic_flush_mcast_mac(struct qlcnic_adapter *adapter) +{ + struct qlcnic_mac_vlan_list *cur; + struct list_head *head, *tmp; + + list_for_each_safe(head, tmp, &adapter->mac_list) { + cur = list_entry(head, struct qlcnic_mac_vlan_list, list); + if (cur->mac_type != QLCNIC_MULTICAST_MAC) + continue; + + qlcnic_sre_macaddr_change(adapter, cur->mac_addr, + cur->vlan_id, QLCNIC_MAC_DEL); + list_del(&cur->list); + kfree(cur); + } +} + static void __qlcnic_set_multi(struct net_device *netdev, u16 vlan) { struct qlcnic_adapter *adapter = netdev_priv(netdev); @@@ -550,9 -530,8 +550,9 @@@ if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) return; - qlcnic_nic_add_mac(adapter, adapter->mac_addr, vlan); - qlcnic_nic_add_mac(adapter, bcast_addr, vlan); + qlcnic_nic_add_mac(adapter, adapter->mac_addr, vlan, + QLCNIC_UNICAST_MAC); + qlcnic_nic_add_mac(adapter, bcast_addr, vlan, QLCNIC_BROADCAST_MAC); if (netdev->flags & IFF_PROMISC) { if (!(adapter->flags & QLCNIC_PROMISC_DISABLED)) @@@ -561,10 -540,8 +561,10 @@@ (netdev_mc_count(netdev) > ahw->max_mc_count)) { mode = VPORT_MISS_MODE_ACCEPT_MULTI; } else if (!netdev_mc_empty(netdev)) { + qlcnic_flush_mcast_mac(adapter); netdev_for_each_mc_addr(ha, netdev) - qlcnic_nic_add_mac(adapter, ha->addr, vlan); + qlcnic_nic_add_mac(adapter, ha->addr, vlan, + QLCNIC_MULTICAST_MAC); } /* configure unicast MAC address, if there is not sufficient space @@@ -574,8 -551,7 +574,8 @@@ mode = VPORT_MISS_MODE_ACCEPT_ALL; } else if (!netdev_uc_empty(netdev)) { netdev_for_each_uc_addr(ha, netdev) - qlcnic_nic_add_mac(adapter, ha->addr, vlan); + qlcnic_nic_add_mac(adapter, ha->addr, vlan, + QLCNIC_UNICAST_MAC); } if (mode == VPORT_MISS_MODE_ACCEPT_ALL && @@@ -818,7 -794,7 +818,7 @@@ int qlcnic_82xx_config_intr_coalesce(st if (rv) netdev_err(adapter->netdev, - "Failed to set Rx coalescing parametrs\n"); + "Failed to set Rx coalescing parameters\n"); return rv; } diff --combined drivers/net/wireless/ath/ath10k/mac.c index d6d2f0f00caa,8698af91eaca..4ce433f1c1fb --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@@ -27,8 -27,6 +27,8 @@@ #include "htt.h" #include "txrx.h" #include "testmode.h" +#include "wmi.h" +#include "wmi-ops.h" /**********/ /* Crypto */ @@@ -37,7 -35,7 +37,7 @@@ static int ath10k_send_key(struct ath10k_vif *arvif, struct ieee80211_key_conf *key, enum set_key_cmd cmd, - const u8 *macaddr) + const u8 *macaddr, bool def_idx) { struct ath10k *ar = arvif->ar; struct wmi_vdev_install_key_arg arg = { @@@ -58,7 -56,10 +58,7 @@@ switch (key->cipher) { case WLAN_CIPHER_SUITE_CCMP: arg.key_cipher = WMI_CIPHER_AES_CCM; - if (arvif->vdev_type == WMI_VDEV_TYPE_AP) - key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; - else - key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; break; case WLAN_CIPHER_SUITE_TKIP: arg.key_cipher = WMI_CIPHER_TKIP; @@@ -72,13 -73,7 +72,13 @@@ * Otherwise pairwise key must be set */ if (memcmp(macaddr, arvif->vif->addr, ETH_ALEN)) arg.key_flags = WMI_KEY_PAIRWISE; + + if (def_idx) + arg.key_flags |= WMI_KEY_TX_USAGE; break; + case WLAN_CIPHER_SUITE_AES_CMAC: + /* this one needs to be done in software */ + return 1; default: ath10k_warn(ar, "cipher %d is not supported\n", key->cipher); return -EOPNOTSUPP; @@@ -95,7 -90,7 +95,7 @@@ static int ath10k_install_key(struct ath10k_vif *arvif, struct ieee80211_key_conf *key, enum set_key_cmd cmd, - const u8 *macaddr) + const u8 *macaddr, bool def_idx) { struct ath10k *ar = arvif->ar; int ret; @@@ -104,7 -99,7 +104,7 @@@ reinit_completion(&ar->install_key_done); - ret = ath10k_send_key(arvif, key, cmd, macaddr); + ret = ath10k_send_key(arvif, key, cmd, macaddr, def_idx); if (ret) return ret; @@@ -122,7 -117,6 +122,7 @@@ static int ath10k_install_peer_wep_keys struct ath10k_peer *peer; int ret; int i; + bool def_idx; lockdep_assert_held(&ar->conf_mutex); @@@ -136,20 -130,13 +136,20 @@@ for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) { if (arvif->wep_keys[i] == NULL) continue; + /* set TX_USAGE flag for default key id */ + if (arvif->def_wep_key_idx == i) + def_idx = true; + else + def_idx = false; ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY, - addr); + addr, def_idx); if (ret) return ret; + spin_lock_bh(&ar->data_lock); peer->keys[i] = arvif->wep_keys[i]; + spin_unlock_bh(&ar->data_lock); } return 0; @@@ -177,9 -164,8 +177,9 @@@ static int ath10k_clear_peer_keys(struc if (peer->keys[i] == NULL) continue; + /* key flags are not required to delete the key */ ret = ath10k_install_key(arvif, peer->keys[i], - DISABLE_KEY, addr); + DISABLE_KEY, addr, false); if (ret && first_errno == 0) first_errno = ret; @@@ -187,39 -173,12 +187,39 @@@ ath10k_warn(ar, "failed to remove peer wep key %d: %d\n", i, ret); + spin_lock_bh(&ar->data_lock); peer->keys[i] = NULL; + spin_unlock_bh(&ar->data_lock); } return first_errno; } +bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr, + u8 keyidx) +{ + struct ath10k_peer *peer; + int i; + + lockdep_assert_held(&ar->data_lock); + + /* We don't know which vdev this peer belongs to, + * since WMI doesn't give us that information. + * + * FIXME: multi-bss needs to be handled. + */ + peer = ath10k_peer_find(ar, 0, addr); + if (!peer) + return false; + + for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { + if (peer->keys[i] && peer->keys[i]->keyidx == keyidx) + return true; + } + + return false; +} + static int ath10k_clear_vdev_key(struct ath10k_vif *arvif, struct ieee80211_key_conf *key) { @@@ -253,8 -212,8 +253,8 @@@ if (i == ARRAY_SIZE(peer->keys)) break; - - ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr); + /* key flags are not required to delete the key */ + ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, false); if (ret && first_errno == 0) first_errno = ret; @@@ -279,10 -238,7 +279,10 @@@ chan_to_phymode(const struct cfg80211_c case IEEE80211_BAND_2GHZ: switch (chandef->width) { case NL80211_CHAN_WIDTH_20_NOHT: - phymode = MODE_11G; + if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM) + phymode = MODE_11B; + else + phymode = MODE_11G; break; case NL80211_CHAN_WIDTH_20: phymode = MODE_11NG_HT20; @@@ -370,9 -326,6 +370,9 @@@ static int ath10k_peer_create(struct at lockdep_assert_held(&ar->conf_mutex); + if (ar->num_peers >= ar->max_num_peers) + return -ENOBUFS; + ret = ath10k_wmi_peer_create(ar, vdev_id, addr); if (ret) { ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n", @@@ -386,8 -339,9 +386,8 @@@ addr, vdev_id, ret); return ret; } - spin_lock_bh(&ar->data_lock); + ar->num_peers++; - spin_unlock_bh(&ar->data_lock); return 0; } @@@ -437,11 -391,15 +437,11 @@@ static int ath10k_mac_set_kickout(struc return 0; } -static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value) +static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value) { struct ath10k *ar = arvif->ar; u32 vdev_param; - if (value != 0xFFFFFFFF) - value = min_t(u32, arvif->ar->hw->wiphy->rts_threshold, - ATH10K_RTS_MAX); - vdev_param = ar->wmi.vdev_param->rts_threshold; return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value); } @@@ -474,7 -432,9 +474,7 @@@ static int ath10k_peer_delete(struct at if (ret) return ret; - spin_lock_bh(&ar->data_lock); ar->num_peers--; - spin_unlock_bh(&ar->data_lock); return 0; } @@@ -511,63 -471,20 +511,63 @@@ static void ath10k_peer_cleanup_all(str list_del(&peer->list); kfree(peer); } - ar->num_peers = 0; spin_unlock_bh(&ar->data_lock); + + ar->num_peers = 0; + ar->num_stations = 0; } /************************/ /* Interface management */ /************************/ +void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + + lockdep_assert_held(&ar->data_lock); + + if (!arvif->beacon) + return; + + if (!arvif->beacon_buf) + dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr, + arvif->beacon->len, DMA_TO_DEVICE); + + if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED && + arvif->beacon_state != ATH10K_BEACON_SENT)) + return; + + dev_kfree_skb_any(arvif->beacon); + + arvif->beacon = NULL; + arvif->beacon_state = ATH10K_BEACON_SCHEDULED; +} + +static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + + lockdep_assert_held(&ar->data_lock); + + ath10k_mac_vif_beacon_free(arvif); + + if (arvif->beacon_buf) { + dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, + arvif->beacon_buf, arvif->beacon_paddr); + arvif->beacon_buf = NULL; + } +} + static inline int ath10k_vdev_setup_sync(struct ath10k *ar) { int ret; lockdep_assert_held(&ar->conf_mutex); + if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) + return -ESHUTDOWN; + ret = wait_for_completion_timeout(&ar->vdev_setup_done, ATH10K_VDEV_SETUP_TIMEOUT_HZ); if (ret == 0) @@@ -600,8 -517,6 +600,8 @@@ static int ath10k_monitor_vdev_start(st arg.channel.max_reg_power = channel->max_reg_power * 2; arg.channel.max_antenna_gain = channel->max_antenna_gain * 2; + reinit_completion(&ar->vdev_setup_done); + ret = ath10k_wmi_vdev_start(ar, &arg); if (ret) { ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n", @@@ -649,8 -564,6 +649,8 @@@ static int ath10k_monitor_vdev_stop(str ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n", ar->monitor_vdev_id, ret); + reinit_completion(&ar->vdev_setup_done); + ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); if (ret) ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n", @@@ -677,9 -590,9 +677,9 @@@ static int ath10k_monitor_vdev_create(s return -ENOMEM; } - bit = ffs(ar->free_vdev_map); + bit = __ffs64(ar->free_vdev_map); - ar->monitor_vdev_id = bit - 1; + ar->monitor_vdev_id = bit; ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id, WMI_VDEV_TYPE_MONITOR, @@@ -690,7 -603,7 +690,7 @@@ return ret; } - ar->free_vdev_map &= ~(1 << ar->monitor_vdev_id); + ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id); ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n", ar->monitor_vdev_id); @@@ -710,7 -623,7 +710,7 @@@ static int ath10k_monitor_vdev_delete(s return ret; } - ar->free_vdev_map |= 1 << ar->monitor_vdev_id; + ar->free_vdev_map |= 1LL << ar->monitor_vdev_id; ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n", ar->monitor_vdev_id); @@@ -981,143 -894,6 +981,143 @@@ static int ath10k_vdev_stop(struct ath1 return ret; } +static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif, + struct sk_buff *bcn) +{ + struct ath10k *ar = arvif->ar; + struct ieee80211_mgmt *mgmt; + const u8 *p2p_ie; + int ret; + + if (arvif->vdev_type != WMI_VDEV_TYPE_AP) + return 0; + + if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO) + return 0; + + mgmt = (void *)bcn->data; + p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, + mgmt->u.beacon.variable, + bcn->len - (mgmt->u.beacon.variable - + bcn->data)); + if (!p2p_ie) + return -ENOENT; + + ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie); + if (ret) { + ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + + return 0; +} + +static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui, + u8 oui_type, size_t ie_offset) +{ + size_t len; + const u8 *next; + const u8 *end; + u8 *ie; + + if (WARN_ON(skb->len < ie_offset)) + return -EINVAL; + + ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type, + skb->data + ie_offset, + skb->len - ie_offset); + if (!ie) + return -ENOENT; + + len = ie[1] + 2; + end = skb->data + skb->len; + next = ie + len; + + if (WARN_ON(next > end)) + return -EINVAL; + + memmove(ie, next, end - next); + skb_trim(skb, skb->len - len); + + return 0; +} + +static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + struct ieee80211_hw *hw = ar->hw; + struct ieee80211_vif *vif = arvif->vif; + struct ieee80211_mutable_offsets offs = {}; + struct sk_buff *bcn; + int ret; + + if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) + return 0; + + bcn = ieee80211_beacon_get_template(hw, vif, &offs); + if (!bcn) { + ath10k_warn(ar, "failed to get beacon template from mac80211\n"); + return -EPERM; + } + + ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn); + if (ret) { + ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret); + kfree_skb(bcn); + return ret; + } + + /* P2P IE is inserted by firmware automatically (as configured above) + * so remove it from the base beacon template to avoid duplicate P2P + * IEs in beacon frames. + */ + ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, + offsetof(struct ieee80211_mgmt, + u.beacon.variable)); + + ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0, + 0, NULL, 0); + kfree_skb(bcn); + + if (ret) { + ath10k_warn(ar, "failed to submit beacon template command: %d\n", + ret); + return ret; + } + + return 0; +} + +static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + struct ieee80211_hw *hw = ar->hw; + struct ieee80211_vif *vif = arvif->vif; + struct sk_buff *prb; + int ret; + + if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) + return 0; + + prb = ieee80211_proberesp_get(hw, vif); + if (!prb) { + ath10k_warn(ar, "failed to get probe resp template from mac80211\n"); + return -EPERM; + } + + ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb); + kfree_skb(prb); + + if (ret) { + ath10k_warn(ar, "failed to submit probe resp template command: %d\n", + ret); + return ret; + } + + return 0; +} + static void ath10k_control_beaconing(struct ath10k_vif *arvif, struct ieee80211_bss_conf *info) { @@@ -1133,7 -909,15 +1133,7 @@@ arvif->is_up = false; spin_lock_bh(&arvif->ar->data_lock); - if (arvif->beacon) { - dma_unmap_single(arvif->ar->dev, - ATH10K_SKB_CB(arvif->beacon)->paddr, - arvif->beacon->len, DMA_TO_DEVICE); - dev_kfree_skb_any(arvif->beacon); - - arvif->beacon = NULL; - arvif->beacon_sent = false; - } + ath10k_mac_vif_beacon_free(arvif); spin_unlock_bh(&arvif->ar->data_lock); return; @@@ -1182,6 -966,14 +1182,6 @@@ static void ath10k_control_ibss(struct if (is_zero_ether_addr(arvif->bssid)) return; - ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, - arvif->bssid); - if (ret) { - ath10k_warn(ar, "failed to delete IBSS BSSID peer %pM for vdev %d: %d\n", - arvif->bssid, arvif->vdev_id, ret); - return; - } - memset(arvif->bssid, 0, ETH_ALEN); return; @@@ -1202,85 -994,28 +1202,85 @@@ arvif->vdev_id, ret); } -/* - * Review this when mac80211 gains per-interface powersave support. - */ +static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + u32 param; + u32 value; + int ret; + + lockdep_assert_held(&arvif->ar->conf_mutex); + + if (arvif->u.sta.uapsd) + value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER; + else + value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS; + + param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD; + ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value); + if (ret) { + ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n", + value, arvif->vdev_id, ret); + return ret; + } + + return 0; +} + +static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + u32 param; + u32 value; + int ret; + + lockdep_assert_held(&arvif->ar->conf_mutex); + + if (arvif->u.sta.uapsd) + value = WMI_STA_PS_PSPOLL_COUNT_UAPSD; + else + value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX; + + param = WMI_STA_PS_PARAM_PSPOLL_COUNT; + ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, + param, value); + if (ret) { + ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n", + value, arvif->vdev_id, ret); + return ret; + } + + return 0; +} + static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif) { struct ath10k *ar = arvif->ar; + struct ieee80211_vif *vif = arvif->vif; struct ieee80211_conf *conf = &ar->hw->conf; enum wmi_sta_powersave_param param; enum wmi_sta_ps_mode psmode; int ret; + int ps_timeout; lockdep_assert_held(&arvif->ar->conf_mutex); if (arvif->vif->type != NL80211_IFTYPE_STATION) return 0; - if (conf->flags & IEEE80211_CONF_PS) { + if (vif->bss_conf.ps) { psmode = WMI_STA_PS_MODE_ENABLED; param = WMI_STA_PS_PARAM_INACTIVITY_TIME; + ps_timeout = conf->dynamic_ps_timeout; + if (ps_timeout == 0) { + /* Firmware doesn't like 0 */ + ps_timeout = ieee80211_tu_to_usec( + vif->bss_conf.beacon_int) / 1000; + } + ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, - conf->dynamic_ps_timeout); + ps_timeout); if (ret) { ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n", arvif->vdev_id, ret); @@@ -1303,81 -1038,55 +1303,81 @@@ return 0; } +static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + struct wmi_sta_keepalive_arg arg = {}; + int ret; + + lockdep_assert_held(&arvif->ar->conf_mutex); + + if (arvif->vdev_type != WMI_VDEV_TYPE_STA) + return 0; + + if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map)) + return 0; + + /* Some firmware revisions have a bug and ignore the `enabled` field. + * Instead use the interval to disable the keepalive. + */ + arg.vdev_id = arvif->vdev_id; + arg.enabled = 1; + arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME; + arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE; + + ret = ath10k_wmi_sta_keepalive(ar, &arg); + if (ret) { + ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + + return 0; +} + /**********************/ /* Station management */ /**********************/ +static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar, + struct ieee80211_vif *vif) +{ + /* Some firmware revisions have unstable STA powersave when listen + * interval is set too high (e.g. 5). The symptoms are firmware doesn't + * generate NullFunc frames properly even if buffered frames have been + * indicated in Beacon TIM. Firmware would seldom wake up to pull + * buffered frames. Often pinging the device from AP would simply fail. + * + * As a workaround set it to 1. + */ + if (vif->type == NL80211_IFTYPE_STATION) + return 1; + + return ar->hw->conf.listen_interval; +} + static void ath10k_peer_assoc_h_basic(struct ath10k *ar, - struct ath10k_vif *arvif, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct ieee80211_bss_conf *bss_conf, struct wmi_peer_assoc_complete_arg *arg) { + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + lockdep_assert_held(&ar->conf_mutex); ether_addr_copy(arg->addr, sta->addr); arg->vdev_id = arvif->vdev_id; arg->peer_aid = sta->aid; arg->peer_flags |= WMI_PEER_AUTH; - - if (arvif->vdev_type == WMI_VDEV_TYPE_STA) - /* - * Seems FW have problems with Power Save in STA - * mode when we setup this parameter to high (eg. 5). - * Often we see that FW don't send NULL (with clean P flags) - * frame even there is info about buffered frames in beacons. - * Sometimes we have to wait more than 10 seconds before FW - * will wakeup. Often sending one ping from AP to our device - * just fail (more than 50%). - * - * Seems setting this FW parameter to 1 couse FW - * will check every beacon and will wakup immediately - * after detection buffered data. - */ - arg->peer_listen_intval = 1; - else - arg->peer_listen_intval = ar->hw->conf.listen_interval; - + arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif); arg->peer_num_spatial_streams = 1; - - /* - * The assoc capabilities are available only in managed mode. - */ - if (arvif->vdev_type == WMI_VDEV_TYPE_STA && bss_conf) - arg->peer_caps = bss_conf->assoc_capability; + arg->peer_caps = vif->bss_conf.assoc_capability; } static void ath10k_peer_assoc_h_crypto(struct ath10k *ar, - struct ath10k_vif *arvif, + struct ieee80211_vif *vif, struct wmi_peer_assoc_complete_arg *arg) { - struct ieee80211_vif *vif = arvif->vif; struct ieee80211_bss_conf *info = &vif->bss_conf; struct cfg80211_bss *bss; const u8 *rsnie = NULL; @@@ -1603,10 -1312,6 +1603,10 @@@ static void ath10k_peer_assoc_h_vht(str return; arg->peer_flags |= WMI_PEER_VHT; + + if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) + arg->peer_flags |= WMI_PEER_VHT_2G; + arg->peer_vht_caps = vht_cap->cap; ampdu_factor = (vht_cap->cap & @@@ -1638,12 -1343,11 +1638,12 @@@ } static void ath10k_peer_assoc_h_qos(struct ath10k *ar, - struct ath10k_vif *arvif, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct ieee80211_bss_conf *bss_conf, struct wmi_peer_assoc_complete_arg *arg) { + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + switch (arvif->vdev_type) { case WMI_VDEV_TYPE_AP: if (sta->wme) @@@ -1655,29 -1359,16 +1655,29 @@@ } break; case WMI_VDEV_TYPE_STA: - if (bss_conf->qos) + if (vif->bss_conf.qos) + arg->peer_flags |= WMI_PEER_QOS; + break; + case WMI_VDEV_TYPE_IBSS: + if (sta->wme) arg->peer_flags |= WMI_PEER_QOS; break; default: break; } + + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n", + sta->addr, !!(arg->peer_flags & WMI_PEER_QOS)); +} + +static bool ath10k_mac_sta_has_11g_rates(struct ieee80211_sta *sta) +{ + /* First 4 rates in ath10k_rates are CCK (11b) rates. */ + return sta->supp_rates[IEEE80211_BAND_2GHZ] >> 4; } static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, - struct ath10k_vif *arvif, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct wmi_peer_assoc_complete_arg *arg) { @@@ -1685,20 -1376,13 +1685,20 @@@ switch (ar->hw->conf.chandef.chan->band) { case IEEE80211_BAND_2GHZ: - if (sta->ht_cap.ht_supported) { + if (sta->vht_cap.vht_supported) { + if (sta->bandwidth == IEEE80211_STA_RX_BW_40) + phymode = MODE_11AC_VHT40; + else + phymode = MODE_11AC_VHT20; + } else if (sta->ht_cap.ht_supported) { if (sta->bandwidth == IEEE80211_STA_RX_BW_40) phymode = MODE_11NG_HT40; else phymode = MODE_11NG_HT20; - } else { + } else if (ath10k_mac_sta_has_11g_rates(sta)) { phymode = MODE_11G; + } else { + phymode = MODE_11B; } break; @@@ -1735,21 -1419,22 +1735,21 @@@ } static int ath10k_peer_assoc_prepare(struct ath10k *ar, - struct ath10k_vif *arvif, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct ieee80211_bss_conf *bss_conf, struct wmi_peer_assoc_complete_arg *arg) { lockdep_assert_held(&ar->conf_mutex); memset(arg, 0, sizeof(*arg)); - ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, arg); - ath10k_peer_assoc_h_crypto(ar, arvif, arg); + ath10k_peer_assoc_h_basic(ar, vif, sta, arg); + ath10k_peer_assoc_h_crypto(ar, vif, arg); ath10k_peer_assoc_h_rates(ar, sta, arg); ath10k_peer_assoc_h_ht(ar, sta, arg); ath10k_peer_assoc_h_vht(ar, sta, arg); - ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, arg); - ath10k_peer_assoc_h_phymode(ar, arvif, sta, arg); + ath10k_peer_assoc_h_qos(ar, vif, sta, arg); + ath10k_peer_assoc_h_phymode(ar, vif, sta, arg); return 0; } @@@ -1795,9 -1480,6 +1795,9 @@@ static void ath10k_bss_assoc(struct iee lockdep_assert_held(&ar->conf_mutex); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n", + arvif->vdev_id, arvif->bssid, arvif->aid); + rcu_read_lock(); ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); @@@ -1812,7 -1494,8 +1812,7 @@@ * before calling ath10k_setup_peer_smps() which might sleep. */ ht_cap = ap_sta->ht_cap; - ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta, - bss_conf, &peer_arg); + ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg); if (ret) { ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n", bss_conf->bssid, arvif->vdev_id, ret); @@@ -1840,8 -1523,6 +1840,8 @@@ "mac vdev %d up (associated) bssid %pM aid %d\n", arvif->vdev_id, bss_conf->bssid, bss_conf->aid); + WARN_ON(arvif->is_up); + arvif->aid = bss_conf->aid; ether_addr_copy(arvif->bssid, bss_conf->bssid); @@@ -1855,6 -1536,9 +1855,6 @@@ arvif->is_up = true; } -/* - * FIXME: flush TIDs - */ static void ath10k_bss_disassoc(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@@ -1864,31 -1548,45 +1864,31 @@@ lockdep_assert_held(&ar->conf_mutex); - /* - * For some reason, calling VDEV-DOWN before VDEV-STOP - * makes the FW to send frames via HTT after disassociation. - * No idea why this happens, even though VDEV-DOWN is supposed - * to be analogous to link down, so just stop the VDEV. - */ - ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d stop (disassociated\n", - arvif->vdev_id); - - /* FIXME: check return value */ - ret = ath10k_vdev_stop(arvif); - - /* - * If we don't call VDEV-DOWN after VDEV-STOP FW will remain active and - * report beacons from previously associated network through HTT. - * This in turn would spam mac80211 WARN_ON if we bring down all - * interfaces as it expects there is no rx when no interface is - * running. - */ - ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d down\n", arvif->vdev_id); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n", + arvif->vdev_id, arvif->bssid); - /* FIXME: why don't we print error if wmi call fails? */ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); + if (ret) + ath10k_warn(ar, "faield to down vdev %i: %d\n", + arvif->vdev_id, ret); - arvif->def_wep_key_idx = 0; + arvif->def_wep_key_idx = -1; - arvif->is_started = false; arvif->is_up = false; } -static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif, - struct ieee80211_sta *sta, bool reassoc) +static int ath10k_station_assoc(struct ath10k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + bool reassoc) { + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); struct wmi_peer_assoc_complete_arg peer_arg; int ret = 0; lockdep_assert_held(&ar->conf_mutex); - ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg); + ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg); if (ret) { ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n", sta->addr, arvif->vdev_id, ret); @@@ -1903,54 -1601,43 +1903,54 @@@ return ret; } - ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, &sta->ht_cap); - if (ret) { - ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n", - arvif->vdev_id, ret); - return ret; - } - - if (!sta->wme && !reassoc) { - arvif->num_legacy_stations++; - ret = ath10k_recalc_rtscts_prot(arvif); + /* Re-assoc is run only to update supported rates for given station. It + * doesn't make much sense to reconfigure the peer completely. + */ + if (!reassoc) { + ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, + &sta->ht_cap); if (ret) { - ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", + ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n", arvif->vdev_id, ret); return ret; } - } - ret = ath10k_install_peer_wep_keys(arvif, sta->addr); - if (ret) { - ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n", - arvif->vdev_id, ret); - return ret; - } + ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta); + if (ret) { + ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n", + sta->addr, arvif->vdev_id, ret); + return ret; + } - ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta); - if (ret) { - ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n", - sta->addr, arvif->vdev_id, ret); - return ret; + if (!sta->wme) { + arvif->num_legacy_stations++; + ret = ath10k_recalc_rtscts_prot(arvif); + if (ret) { + ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", + arvif->vdev_id, ret); + return ret; + } + } + + /* Plumb cached keys only for static WEP */ + if (arvif->def_wep_key_idx != -1) { + ret = ath10k_install_peer_wep_keys(arvif, sta->addr); + if (ret) { + ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + } } return ret; } -static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif, +static int ath10k_station_disassoc(struct ath10k *ar, + struct ieee80211_vif *vif, struct ieee80211_sta *sta) { + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); int ret = 0; lockdep_assert_held(&ar->conf_mutex); @@@ -2042,7 -1729,6 +2042,7 @@@ static int ath10k_update_channel_list(s ch->passive = passive; ch->freq = channel->center_freq; + ch->band_center_freq1 = channel->center_freq; ch->min_power = 0; ch->max_power = channel->max_power * 2; ch->max_reg_power = channel->max_reg_power * 2; @@@ -2204,13 -1890,75 +2204,13 @@@ static void ath10k_tx_h_nwifi(struct ie * used only for CQM purposes (e.g. hostapd station keepalive ping) so * it is safe to downgrade to NullFunc. */ + hdr = (void *)skb->data; if (ieee80211_is_qos_nullfunc(hdr->frame_control)) { hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA); cb->htt.tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; } } -static void ath10k_tx_wep_key_work(struct work_struct *work) -{ - struct ath10k_vif *arvif = container_of(work, struct ath10k_vif, - wep_key_work); - struct ath10k *ar = arvif->ar; - int ret, keyidx = arvif->def_wep_key_newidx; - - mutex_lock(&arvif->ar->conf_mutex); - - if (arvif->ar->state != ATH10K_STATE_ON) - goto unlock; - - if (arvif->def_wep_key_idx == keyidx) - goto unlock; - - ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n", - arvif->vdev_id, keyidx); - - ret = ath10k_wmi_vdev_set_param(arvif->ar, - arvif->vdev_id, - arvif->ar->wmi.vdev_param->def_keyid, - keyidx); - if (ret) { - ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n", - arvif->vdev_id, - ret); - goto unlock; - } - - arvif->def_wep_key_idx = keyidx; - -unlock: - mutex_unlock(&arvif->ar->conf_mutex); -} - -static void ath10k_tx_h_update_wep_key(struct ieee80211_vif *vif, - struct ieee80211_key_conf *key, - struct sk_buff *skb) -{ - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); - struct ath10k *ar = arvif->ar; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - - if (!ieee80211_has_protected(hdr->frame_control)) - return; - - if (!key) - return; - - if (key->cipher != WLAN_CIPHER_SUITE_WEP40 && - key->cipher != WLAN_CIPHER_SUITE_WEP104) - return; - - if (key->keyidx == arvif->def_wep_key_idx) - return; - - /* FIXME: Most likely a few frames will be TXed with an old key. Simply - * queueing frames until key index is updated is not an option because - * sk_buff may need more processing to be done, e.g. offchannel */ - arvif->def_wep_key_newidx = key->keyidx; - ieee80211_queue_work(ar->hw, &arvif->wep_key_work); -} - static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct ieee80211_vif *vif, struct sk_buff *skb) @@@ -2235,18 -1983,6 +2235,18 @@@ } } +static bool ath10k_mac_need_offchan_tx_work(struct ath10k *ar) +{ + /* FIXME: Not really sure since when the behaviour changed. At some + * point new firmware stopped requiring creation of peer entries for + * offchannel tx (and actually creating them causes issues with wmi-htc + * tx credit replenishment and reliability). Assuming it's at least 3.4 + * because that's when the `freq` was introduced to TX_FRM HTT command. + */ + return !(ar->htt.target_version_major >= 3 && + ar->htt.target_version_minor >= 4); +} + static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; @@@ -2362,7 -2098,7 +2362,7 @@@ void ath10k_offchan_tx_work(struct work ret = wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ); - if (ret <= 0) + if (ret == 0) ath10k_warn(ar, "timed out waiting for offchannel skb %p\n", skb); @@@ -2422,11 -2158,10 +2422,11 @@@ void __ath10k_scan_finish(struct ath10 case ATH10K_SCAN_IDLE: break; case ATH10K_SCAN_RUNNING: - case ATH10K_SCAN_ABORTING: if (ar->scan.is_roc) ieee80211_remain_on_channel_expired(ar->hw); - else + /* fall through */ + case ATH10K_SCAN_ABORTING: + if (!ar->scan.is_roc) ieee80211_scan_completed(ar->hw, (ar->scan.state == ATH10K_SCAN_ABORTING)); @@@ -2571,6 -2306,7 +2571,6 @@@ static void ath10k_tx(struct ieee80211_ struct ath10k *ar = hw->priv; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_vif *vif = info->control.vif; - struct ieee80211_key_conf *key = info->control.hw_key; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; /* We should disable CCK RATE due to P2P */ @@@ -2584,34 -2320,30 +2584,34 @@@ /* it makes no sense to process injected frames like that */ if (vif && vif->type != NL80211_IFTYPE_MONITOR) { ath10k_tx_h_nwifi(hw, skb); - ath10k_tx_h_update_wep_key(vif, key, skb); ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb); ath10k_tx_h_seq_no(vif, skb); } if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { spin_lock_bh(&ar->data_lock); - ATH10K_SKB_CB(skb)->htt.is_offchan = true; + ATH10K_SKB_CB(skb)->htt.freq = ar->scan.roc_freq; ATH10K_SKB_CB(skb)->vdev_id = ar->scan.vdev_id; spin_unlock_bh(&ar->data_lock); - ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n", - skb); + if (ath10k_mac_need_offchan_tx_work(ar)) { + ATH10K_SKB_CB(skb)->htt.freq = 0; + ATH10K_SKB_CB(skb)->htt.is_offchan = true; - skb_queue_tail(&ar->offchan_tx_queue, skb); - ieee80211_queue_work(hw, &ar->offchan_tx_work); - return; + ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n", + skb); + + skb_queue_tail(&ar->offchan_tx_queue, skb); + ieee80211_queue_work(hw, &ar->offchan_tx_work); + return; + } } ath10k_tx_htt(ar, skb); } /* Must not be called with conf_mutex held as workers can use that also. */ -static void ath10k_drain_tx(struct ath10k *ar) +void ath10k_drain_tx(struct ath10k *ar) { /* make sure rcu-protected mac80211 tx path itself is drained */ synchronize_net(); @@@ -2644,8 -2376,16 +2644,8 @@@ void ath10k_halt(struct ath10k *ar ath10k_hif_power_down(ar); spin_lock_bh(&ar->data_lock); - list_for_each_entry(arvif, &ar->arvifs, list) { - if (!arvif->beacon) - continue; - - dma_unmap_single(arvif->ar->dev, - ATH10K_SKB_CB(arvif->beacon)->paddr, - arvif->beacon->len, DMA_TO_DEVICE); - dev_kfree_skb_any(arvif->beacon); - arvif->beacon = NULL; - } + list_for_each_entry(arvif, &ar->arvifs, list) + ath10k_mac_vif_beacon_cleanup(arvif); spin_unlock_bh(&ar->data_lock); } @@@ -2668,28 -2408,12 +2668,28 @@@ static int ath10k_get_antenna(struct ie return 0; } +static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg) +{ + /* It is not clear that allowing gaps in chainmask + * is helpful. Probably it will not do what user + * is hoping for, so warn in that case. + */ + if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0) + return; + + ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n", + dbg, cm); +} + static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant) { int ret; lockdep_assert_held(&ar->conf_mutex); + ath10k_check_chain_mask(ar, tx_ant, "tx"); + ath10k_check_chain_mask(ar, rx_ant, "rx"); + ar->cfg_tx_chainmask = tx_ant; ar->cfg_rx_chainmask = rx_ant; @@@ -2953,68 -2677,12 +2953,68 @@@ static void ath10k_config_chan(struct a ath10k_monitor_recalc(ar); } +static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower) +{ + int ret; + u32 param; + + lockdep_assert_held(&ar->conf_mutex); + + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower); + + param = ar->wmi.pdev_param->txpower_limit2g; + ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2); + if (ret) { + ath10k_warn(ar, "failed to set 2g txpower %d: %d\n", + txpower, ret); + return ret; + } + + param = ar->wmi.pdev_param->txpower_limit5g; + ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2); + if (ret) { + ath10k_warn(ar, "failed to set 5g txpower %d: %d\n", + txpower, ret); + return ret; + } + + return 0; +} + +static int ath10k_mac_txpower_recalc(struct ath10k *ar) +{ + struct ath10k_vif *arvif; + int ret, txpower = -1; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + WARN_ON(arvif->txpower < 0); + + if (txpower == -1) + txpower = arvif->txpower; + else + txpower = min(txpower, arvif->txpower); + } + + if (WARN_ON(txpower == -1)) + return -EINVAL; + + ret = ath10k_mac_txpower_setup(ar, txpower); + if (ret) { + ath10k_warn(ar, "failed to setup tx power %d: %d\n", + txpower, ret); + return ret; + } + + return 0; +} + static int ath10k_config(struct ieee80211_hw *hw, u32 changed) { struct ath10k *ar = hw->priv; struct ieee80211_conf *conf = &hw->conf; int ret = 0; - u32 param; mutex_lock(&ar->conf_mutex); @@@ -3038,6 -2706,25 +3038,6 @@@ } } - if (changed & IEEE80211_CONF_CHANGE_POWER) { - ath10k_dbg(ar, ATH10K_DBG_MAC, "mac config power %d\n", - hw->conf.power_level); - - param = ar->wmi.pdev_param->txpower_limit2g; - ret = ath10k_wmi_pdev_set_param(ar, param, - hw->conf.power_level * 2); - if (ret) - ath10k_warn(ar, "failed to set 2g txpower %d: %d\n", - hw->conf.power_level, ret); - - param = ar->wmi.pdev_param->txpower_limit5g; - ret = ath10k_wmi_pdev_set_param(ar, param, - hw->conf.power_level * 2); - if (ret) - ath10k_warn(ar, "failed to set 5g txpower %d: %d\n", - hw->conf.power_level, ret); - } - if (changed & IEEE80211_CONF_CHANGE_PS) ath10k_config_ps(ar); @@@ -3052,17 -2739,6 +3052,17 @@@ return ret; } +static u32 get_nss_from_chainmask(u16 chain_mask) +{ + if ((chain_mask & 0x15) == 0x15) + return 4; + else if ((chain_mask & 0x7) == 0x7) + return 3; + else if ((chain_mask & 0x3) == 0x3) + return 2; + return 1; +} + /* * TODO: * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE, @@@ -3081,8 -2757,6 +3081,8 @@@ static int ath10k_add_interface(struct int bit; u32 vdev_param; + vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; + mutex_lock(&ar->conf_mutex); memset(arvif, 0, sizeof(*arvif)); @@@ -3090,6 -2764,7 +3090,6 @@@ arvif->ar = ar; arvif->vif = vif; - INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work); INIT_LIST_HEAD(&arvif->list); if (ar->free_vdev_map == 0) { @@@ -3097,19 -2772,15 +3097,19 @@@ ret = -EBUSY; goto err; } - bit = ffs(ar->free_vdev_map); + bit = __ffs64(ar->free_vdev_map); - arvif->vdev_id = bit - 1; - arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE; + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n", + bit, ar->free_vdev_map); - if (ar->p2p) - arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE; + arvif->vdev_id = bit; + arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE; switch (vif->type) { + case NL80211_IFTYPE_P2P_DEVICE: + arvif->vdev_type = WMI_VDEV_TYPE_STA; + arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE; + break; case NL80211_IFTYPE_UNSPECIFIED: case NL80211_IFTYPE_STATION: arvif->vdev_type = WMI_VDEV_TYPE_STA; @@@ -3133,39 -2804,8 +3133,39 @@@ break; } - ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d\n", - arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype); + /* Some firmware revisions don't wait for beacon tx completion before + * sending another SWBA event. This could lead to hardware using old + * (freed) beacon data in some cases, e.g. tx credit starvation + * combined with missed TBTT. This is very very rare. + * + * On non-IOMMU-enabled hosts this could be a possible security issue + * because hw could beacon some random data on the air. On + * IOMMU-enabled hosts DMAR faults would occur in most cases and target + * device would crash. + * + * Since there are no beacon tx completions (implicit nor explicit) + * propagated to host the only workaround for this is to allocate a + * DMA-coherent buffer for a lifetime of a vif and use it for all + * beacon tx commands. Worst case for this approach is some beacons may + * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap. + */ + if (vif->type == NL80211_IFTYPE_ADHOC || + vif->type == NL80211_IFTYPE_AP) { + arvif->beacon_buf = dma_zalloc_coherent(ar->dev, + IEEE80211_MAX_FRAME_LEN, + &arvif->beacon_paddr, + GFP_ATOMIC); + if (!arvif->beacon_buf) { + ret = -ENOMEM; + ath10k_warn(ar, "failed to allocate beacon buffer: %d\n", + ret); + goto err; + } + } + + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n", + arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, + arvif->beacon_buf ? "single-buf" : "per-skb"); ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, vif->addr); @@@ -3175,21 -2815,18 +3175,21 @@@ goto err; } - ar->free_vdev_map &= ~(1 << arvif->vdev_id); + ar->free_vdev_map &= ~(1LL << arvif->vdev_id); list_add(&arvif->list, &ar->arvifs); - vdev_param = ar->wmi.vdev_param->def_keyid; - ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param, - arvif->def_wep_key_idx); + /* It makes no sense to have firmware do keepalives. mac80211 already + * takes care of this with idle connection polling. + */ + ret = ath10k_mac_vif_disable_keepalive(arvif); if (ret) { - ath10k_warn(ar, "failed to set vdev %i default key id: %d\n", + ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n", arvif->vdev_id, ret); goto err_vdev_delete; } + arvif->def_wep_key_idx = -1; + vdev_param = ar->wmi.vdev_param->tx_encap_type; ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ATH10K_HW_TXRX_NATIVE_WIFI); @@@ -3200,20 -2837,6 +3200,20 @@@ goto err_vdev_delete; } + if (ar->cfg_tx_chainmask) { + u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask); + + vdev_param = ar->wmi.vdev_param->nss; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, + nss); + if (ret) { + ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n", + arvif->vdev_id, ar->cfg_tx_chainmask, nss, + ret); + goto err_vdev_delete; + } + } + if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr); if (ret) { @@@ -3241,16 -2864,22 +3241,16 @@@ goto err_peer_delete; } - param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD; - value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS; - ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, - param, value); + ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif); if (ret) { - ath10k_warn(ar, "failed to set vdev %i TX wake thresh: %d\n", + ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n", arvif->vdev_id, ret); goto err_peer_delete; } - param = WMI_STA_PS_PARAM_PSPOLL_COUNT; - value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX; - ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, - param, value); + ret = ath10k_mac_vif_recalc_ps_poll_count(arvif); if (ret) { - ath10k_warn(ar, "failed to set vdev %i PSPOLL count: %d\n", + ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n", arvif->vdev_id, ret); goto err_peer_delete; } @@@ -3270,13 -2899,6 +3270,13 @@@ goto err_peer_delete; } + arvif->txpower = vif->bss_conf.txpower; + ret = ath10k_mac_txpower_recalc(ar); + if (ret) { + ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); + goto err_peer_delete; + } + mutex_unlock(&ar->conf_mutex); return 0; @@@ -3286,16 -2908,10 +3286,16 @@@ err_peer_delete err_vdev_delete: ath10k_wmi_vdev_delete(ar, arvif->vdev_id); - ar->free_vdev_map |= 1 << arvif->vdev_id; + ar->free_vdev_map |= 1LL << arvif->vdev_id; list_del(&arvif->list); err: + if (arvif->beacon_buf) { + dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, + arvif->beacon_buf, arvif->beacon_paddr); + arvif->beacon_buf = NULL; + } + mutex_unlock(&ar->conf_mutex); return ret; @@@ -3310,8 -2926,17 +3310,8 @@@ static void ath10k_remove_interface(str mutex_lock(&ar->conf_mutex); - cancel_work_sync(&arvif->wep_key_work); - spin_lock_bh(&ar->data_lock); - if (arvif->beacon) { - dma_unmap_single(arvif->ar->dev, - ATH10K_SKB_CB(arvif->beacon)->paddr, - arvif->beacon->len, DMA_TO_DEVICE); - dev_kfree_skb_any(arvif->beacon); - arvif->beacon = NULL; - } - + ath10k_mac_vif_beacon_cleanup(arvif); spin_unlock_bh(&ar->data_lock); ret = ath10k_spectral_vif_stop(arvif); @@@ -3319,7 -2944,7 +3319,7 @@@ ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n", arvif->vdev_id, ret); - ar->free_vdev_map |= 1 << arvif->vdev_id; + ar->free_vdev_map |= 1LL << arvif->vdev_id; list_del(&arvif->list); if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { @@@ -3418,21 -3043,9 +3418,21 @@@ static void ath10k_bss_info_changed(str if (ret) ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n", arvif->vdev_id, ret); + + ret = ath10k_mac_setup_bcn_tmpl(arvif); + if (ret) + ath10k_warn(ar, "failed to update beacon template: %d\n", + ret); + } + + if (changed & BSS_CHANGED_AP_PROBE_RESP) { + ret = ath10k_mac_setup_prb_tmpl(arvif); + if (ret) + ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n", + arvif->vdev_id, ret); } - if (changed & BSS_CHANGED_BEACON_INFO) { + if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) { arvif->dtim_period = info->dtim_period; ath10k_dbg(ar, ATH10K_DBG_MAC, @@@ -3455,8 -3068,54 +3455,8 @@@ arvif->u.ap.hidden_ssid = info->hidden_ssid; } - /* - * Firmware manages AP self-peer internally so make sure to not create - * it in driver. Otherwise AP self-peer deletion may timeout later. - */ - if (changed & BSS_CHANGED_BSSID && - vif->type != NL80211_IFTYPE_AP) { - if (!is_zero_ether_addr(info->bssid)) { - ath10k_dbg(ar, ATH10K_DBG_MAC, - "mac vdev %d create peer %pM\n", - arvif->vdev_id, info->bssid); - - ret = ath10k_peer_create(ar, arvif->vdev_id, - info->bssid); - if (ret) - ath10k_warn(ar, "failed to add peer %pM for vdev %d when changing bssid: %i\n", - info->bssid, arvif->vdev_id, ret); - - if (vif->type == NL80211_IFTYPE_STATION) { - /* - * this is never erased as we it for crypto key - * clearing; this is FW requirement - */ - ether_addr_copy(arvif->bssid, info->bssid); - - ath10k_dbg(ar, ATH10K_DBG_MAC, - "mac vdev %d start %pM\n", - arvif->vdev_id, info->bssid); - - ret = ath10k_vdev_start(arvif); - if (ret) { - ath10k_warn(ar, "failed to start vdev %i: %d\n", - arvif->vdev_id, ret); - goto exit; - } - - arvif->is_started = true; - } - - /* - * Mac80211 does not keep IBSS bssid when leaving IBSS, - * so driver need to store it. It is needed when leaving - * IBSS in order to remove BSSID peer. - */ - if (vif->type == NL80211_IFTYPE_ADHOC) - memcpy(arvif->bssid, info->bssid, - ETH_ALEN); - } - } + if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid)) + ether_addr_copy(arvif->bssid, info->bssid); if (changed & BSS_CHANGED_BEACON_ENABLED) ath10k_control_beaconing(arvif, info); @@@ -3518,28 -3177,10 +3518,28 @@@ ath10k_monitor_stop(ar); ath10k_bss_assoc(hw, vif, info); ath10k_monitor_recalc(ar); + } else { + ath10k_bss_disassoc(hw, vif); } } -exit: + if (changed & BSS_CHANGED_TXPOWER) { + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n", + arvif->vdev_id, info->txpower); + + arvif->txpower = info->txpower; + ret = ath10k_mac_txpower_recalc(ar); + if (ret) + ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); + } + + if (changed & BSS_CHANGED_PS) { + ret = ath10k_mac_vif_setup_ps(arvif); + if (ret) + ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n", + arvif->vdev_id, ret); + } + mutex_unlock(&ar->conf_mutex); } @@@ -3625,10 -3266,9 +3625,10 @@@ static void ath10k_cancel_hw_scan(struc struct ath10k *ar = hw->priv; mutex_lock(&ar->conf_mutex); - cancel_delayed_work_sync(&ar->scan.timeout); ath10k_scan_abort(ar); mutex_unlock(&ar->conf_mutex); + + cancel_delayed_work_sync(&ar->scan.timeout); } static void ath10k_set_key_h_def_keyidx(struct ath10k *ar, @@@ -3679,7 -3319,6 +3679,7 @@@ static int ath10k_set_key(struct ieee80 const u8 *peer_addr; bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 || key->cipher == WLAN_CIPHER_SUITE_WEP104; + bool def_idx = false; int ret = 0; if (key->keyidx > WMI_MAX_KEY_INDEX) @@@ -3725,14 -3364,7 +3725,14 @@@ ath10k_clear_vdev_key(arvif, key); } - ret = ath10k_install_key(arvif, key, cmd, peer_addr); + /* set TX_USAGE flag for all the keys incase of dot1x-WEP. For + * static WEP, do not set this flag for the keys whose key id + * is greater than default key id. + */ + if (arvif->def_wep_key_idx == -1) + def_idx = true; + + ret = ath10k_install_key(arvif, key, cmd, peer_addr, def_idx); if (ret) { ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n", arvif->vdev_id, peer_addr, ret); @@@ -3757,39 -3389,6 +3757,39 @@@ exit return ret; } +static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + int keyidx) +{ + struct ath10k *ar = hw->priv; + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + int ret; + + mutex_lock(&arvif->ar->conf_mutex); + + if (arvif->ar->state != ATH10K_STATE_ON) + goto unlock; + + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n", + arvif->vdev_id, keyidx); + + ret = ath10k_wmi_vdev_set_param(arvif->ar, + arvif->vdev_id, + arvif->ar->wmi.vdev_param->def_keyid, + keyidx); + + if (ret) { + ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n", + arvif->vdev_id, + ret); + goto unlock; + } + + arvif->def_wep_key_idx = keyidx; +unlock: + mutex_unlock(&arvif->ar->conf_mutex); +} + static void ath10k_sta_rc_update_wk(struct work_struct *wk) { struct ath10k *ar; @@@ -3850,12 -3449,11 +3850,12 @@@ sta->addr, smps, err); } - if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) { - ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n", + if (changed & IEEE80211_RC_SUPP_RATES_CHANGED || + changed & IEEE80211_RC_NSS_CHANGED) { + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n", sta->addr); - err = ath10k_station_assoc(ar, arvif, sta, true); + err = ath10k_station_assoc(ar, arvif->vif, sta, true); if (err) ath10k_warn(ar, "failed to reassociate station: %pM\n", sta->addr); @@@ -3864,37 -3462,6 +3864,37 @@@ mutex_unlock(&ar->conf_mutex); } +static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + + lockdep_assert_held(&ar->conf_mutex); + + if (arvif->vdev_type != WMI_VDEV_TYPE_AP && + arvif->vdev_type != WMI_VDEV_TYPE_IBSS) + return 0; + + if (ar->num_stations >= ar->max_num_stations) + return -ENOBUFS; + + ar->num_stations++; + + return 0; +} + +static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + + lockdep_assert_held(&ar->conf_mutex); + + if (arvif->vdev_type != WMI_VDEV_TYPE_AP && + arvif->vdev_type != WMI_VDEV_TYPE_IBSS) + return; + + ar->num_stations--; +} + static int ath10k_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@@ -3904,6 -3471,7 +3904,6 @@@ struct ath10k *ar = hw->priv; struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; - int max_num_peers; int ret = 0; if (old_state == IEEE80211_STA_NOTEXIST && @@@ -3921,46 -3489,31 +3921,46 @@@ mutex_lock(&ar->conf_mutex); if (old_state == IEEE80211_STA_NOTEXIST && - new_state == IEEE80211_STA_NONE && - vif->type != NL80211_IFTYPE_STATION) { + new_state == IEEE80211_STA_NONE) { /* * New station addition. */ - if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) - max_num_peers = TARGET_10X_NUM_PEERS_MAX - 1; - else - max_num_peers = TARGET_NUM_PEERS; + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n", + arvif->vdev_id, sta->addr, + ar->num_stations + 1, ar->max_num_stations, + ar->num_peers + 1, ar->max_num_peers); - if (ar->num_peers >= max_num_peers) { - ath10k_warn(ar, "number of peers exceeded: peers number %d (max peers %d)\n", - ar->num_peers, max_num_peers); - ret = -ENOBUFS; + ret = ath10k_mac_inc_num_stations(arvif); + if (ret) { + ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n", + ar->max_num_stations); goto exit; } - ath10k_dbg(ar, ATH10K_DBG_MAC, - "mac vdev %d peer create %pM (new sta) num_peers %d\n", - arvif->vdev_id, sta->addr, ar->num_peers); - ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr); - if (ret) + if (ret) { ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n", sta->addr, arvif->vdev_id, ret); + ath10k_mac_dec_num_stations(arvif); + goto exit; + } + + if (vif->type == NL80211_IFTYPE_STATION) { + WARN_ON(arvif->is_started); + + ret = ath10k_vdev_start(arvif); + if (ret) { + ath10k_warn(ar, "failed to start vdev %i: %d\n", + arvif->vdev_id, ret); + WARN_ON(ath10k_peer_delete(ar, arvif->vdev_id, + sta->addr)); + ath10k_mac_dec_num_stations(arvif); + goto exit; + } + + arvif->is_started = true; + } } else if ((old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST)) { /* @@@ -3969,24 -3522,13 +3969,24 @@@ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d peer delete %pM (sta gone)\n", arvif->vdev_id, sta->addr); + + if (vif->type == NL80211_IFTYPE_STATION) { + WARN_ON(!arvif->is_started); + + ret = ath10k_vdev_stop(arvif); + if (ret) + ath10k_warn(ar, "failed to stop vdev %i: %d\n", + arvif->vdev_id, ret); + + arvif->is_started = false; + } + ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); if (ret) ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n", sta->addr, arvif->vdev_id, ret); - if (vif->type == NL80211_IFTYPE_STATION) - ath10k_bss_disassoc(hw, vif); + ath10k_mac_dec_num_stations(arvif); } else if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC && (vif->type == NL80211_IFTYPE_AP || @@@ -3997,7 -3539,7 +3997,7 @@@ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n", sta->addr); - ret = ath10k_station_assoc(ar, arvif, sta, false); + ret = ath10k_station_assoc(ar, vif, sta, false); if (ret) ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n", sta->addr, arvif->vdev_id, ret); @@@ -4011,7 -3553,7 +4011,7 @@@ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n", sta->addr); - ret = ath10k_station_disassoc(ar, arvif, sta); + ret = ath10k_station_disassoc(ar, vif, sta); if (ret) ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n", sta->addr, arvif->vdev_id, ret); @@@ -4025,8 -3567,6 +4025,8 @@@ static int ath10k_conf_tx_uapsd(struct u16 ac, bool enable) { struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct wmi_sta_uapsd_auto_trig_arg arg = {}; + u32 prio = 0, acc = 0; u32 value = 0; int ret = 0; @@@ -4039,26 -3579,18 +4039,26 @@@ case IEEE80211_AC_VO: value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN | WMI_STA_PS_UAPSD_AC3_TRIGGER_EN; + prio = 7; + acc = 3; break; case IEEE80211_AC_VI: value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN | WMI_STA_PS_UAPSD_AC2_TRIGGER_EN; + prio = 5; + acc = 2; break; case IEEE80211_AC_BE: value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN | WMI_STA_PS_UAPSD_AC1_TRIGGER_EN; + prio = 2; + acc = 1; break; case IEEE80211_AC_BK: value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN | WMI_STA_PS_UAPSD_AC0_TRIGGER_EN; + prio = 0; + acc = 0; break; } @@@ -4086,43 -3618,6 +4086,43 @@@ if (ret) ath10k_warn(ar, "failed to set rx wake param: %d\n", ret); + ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif); + if (ret) { + ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + + ret = ath10k_mac_vif_recalc_ps_poll_count(arvif); + if (ret) { + ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + + if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) || + test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) { + /* Only userspace can make an educated decision when to send + * trigger frame. The following effectively disables u-UAPSD + * autotrigger in firmware (which is enabled by default + * provided the autotrigger service is available). + */ + + arg.wmm_ac = acc; + arg.user_priority = prio; + arg.service_interval = 0; + arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC; + arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC; + + ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id, + arvif->bssid, &arg, 1); + if (ret) { + ath10k_warn(ar, "failed to set uapsd auto trigger %d\n", + ret); + return ret; + } + } + exit: return ret; } @@@ -4132,7 -3627,6 +4132,7 @@@ static int ath10k_conf_tx(struct ieee80 const struct ieee80211_tx_queue_params *params) { struct ath10k *ar = hw->priv; + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); struct wmi_wmm_params_arg *p = NULL; int ret; @@@ -4140,16 -3634,16 +4140,16 @@@ switch (ac) { case IEEE80211_AC_VO: - p = &ar->wmm_params.ac_vo; + p = &arvif->wmm_params.ac_vo; break; case IEEE80211_AC_VI: - p = &ar->wmm_params.ac_vi; + p = &arvif->wmm_params.ac_vi; break; case IEEE80211_AC_BE: - p = &ar->wmm_params.ac_be; + p = &arvif->wmm_params.ac_be; break; case IEEE80211_AC_BK: - p = &ar->wmm_params.ac_bk; + p = &arvif->wmm_params.ac_bk; break; } @@@ -4169,23 -3663,11 +4169,23 @@@ */ p->txop = params->txop * 32; - /* FIXME: FW accepts wmm params per hw, not per vif */ - ret = ath10k_wmi_pdev_set_wmm_params(ar, &ar->wmm_params); - if (ret) { - ath10k_warn(ar, "failed to set wmm params: %d\n", ret); - goto exit; + if (ar->wmi.ops->gen_vdev_wmm_conf) { + ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id, + &arvif->wmm_params); + if (ret) { + ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n", + arvif->vdev_id, ret); + goto exit; + } + } else { + /* This won't work well with multi-interface cases but it's + * better than nothing. + */ + ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params); + if (ret) { + ath10k_warn(ar, "failed to set wmm params: %d\n", ret); + goto exit; + } } ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd); @@@ -4235,8 -3717,6 +4235,8 @@@ static int ath10k_remain_on_channel(str if (ret) goto exit; + duration = max(duration, WMI_SCAN_CHAN_MIN_TIME_MSEC); + memset(&arg, 0, sizeof(arg)); ath10k_wmi_start_scan_init(ar, &arg); arg.vdev_id = arvif->vdev_id; @@@ -4281,11 -3761,10 +4281,11 @@@ static int ath10k_cancel_remain_on_chan struct ath10k *ar = hw->priv; mutex_lock(&ar->conf_mutex); - cancel_delayed_work_sync(&ar->scan.timeout); ath10k_scan_abort(ar); mutex_unlock(&ar->conf_mutex); + cancel_delayed_work_sync(&ar->scan.timeout); + return 0; } @@@ -4317,6 -3796,29 +4317,6 @@@ static int ath10k_set_rts_threshold(str return ret; } -static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value) -{ - struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif; - int ret = 0; - - mutex_lock(&ar->conf_mutex); - list_for_each_entry(arvif, &ar->arvifs, list) { - ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d fragmentation threshold %d\n", - arvif->vdev_id, value); - - ret = ath10k_mac_set_rts(arvif, value); - if (ret) { - ath10k_warn(ar, "failed to set fragmentation threshold for vdev %d: %d\n", - arvif->vdev_id, ret); - break; - } - } - mutex_unlock(&ar->conf_mutex); - - return ret; -} - static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop) { @@@ -4341,9 -3843,7 +4341,9 @@@ empty = (ar->htt.num_pending_tx == 0); spin_unlock_bh(&ar->htt.tx_lock); - skip = (ar->state == ATH10K_STATE_WEDGED); + skip = (ar->state == ATH10K_STATE_WEDGED) || + test_bit(ATH10K_FLAG_CRASH_FLUSH, + &ar->dev_flags); (empty || skip); }), ATH10K_FLUSH_TIMEOUT_HZ); @@@ -4429,14 -3929,10 +4429,14 @@@ exit } #endif -static void ath10k_restart_complete(struct ieee80211_hw *hw) +static void ath10k_reconfig_complete(struct ieee80211_hw *hw, + enum ieee80211_reconfig_type reconfig_type) { struct ath10k *ar = hw->priv; + if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART) + return; + mutex_lock(&ar->conf_mutex); /* If device failed to restart it will be in a different state, e.g. @@@ -4444,7 -3940,6 +4444,7 @@@ if (ar->state == ATH10K_STATE_RESTARTED) { ath10k_info(ar, "device successfully recovered\n"); ar->state = ATH10K_STATE_ON; + ieee80211_wake_queues(ar->hw); } mutex_unlock(&ar->conf_mutex); @@@ -4480,9 -3975,6 +4480,9 @@@ static int ath10k_get_survey(struct iee survey->channel = &sband->channels[idx]; + if (ar->rx_channel == survey->channel) + survey->filled |= SURVEY_INFO_IN_USE; + exit: mutex_unlock(&ar->conf_mutex); return ret; @@@ -4530,10 -4022,6 +4530,10 @@@ ath10k_default_bitrate_mask(struct ath1 u32 legacy = 0x00ff; u8 ht = 0xff, i; u16 vht = 0x3ff; + u16 nrf = ar->num_rf_chains; + + if (ar->cfg_tx_chainmask) + nrf = get_nss_from_chainmask(ar->cfg_tx_chainmask); switch (band) { case IEEE80211_BAND_2GHZ: @@@ -4549,11 -4037,11 +4549,11 @@@ if (mask->control[band].legacy != legacy) return false; - for (i = 0; i < ar->num_rf_chains; i++) + for (i = 0; i < nrf; i++) if (mask->control[band].ht_mcs[i] != ht) return false; - for (i = 0; i < ar->num_rf_chains; i++) + for (i = 0; i < nrf; i++) if (mask->control[band].vht_mcs[i] != vht) return false; @@@ -4804,9 -4292,6 +4804,9 @@@ static int ath10k_set_bitrate_mask(stru u8 fixed_nss = ar->num_rf_chains; u8 force_sgi; + if (ar->cfg_tx_chainmask) + fixed_nss = get_nss_from_chainmask(ar->cfg_tx_chainmask); + force_sgi = mask->control[band].gi; if (force_sgi == NL80211_TXRATE_FORCE_LGI) return -EINVAL; @@@ -4857,7 -4342,7 +4857,7 @@@ static void ath10k_sta_rc_update(struc bw = WMI_PEER_CHWIDTH_80MHZ; break; case IEEE80211_STA_RX_BW_160: - ath10k_warn(ar, "Invalid bandwith %d in rc update for %pM\n", + ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n", sta->bandwidth, sta->addr); bw = WMI_PEER_CHWIDTH_20MHZ; break; @@@ -4955,25 -4440,22 +4955,25 @@@ static const struct ieee80211_ops ath10 .hw_scan = ath10k_hw_scan, .cancel_hw_scan = ath10k_cancel_hw_scan, .set_key = ath10k_set_key, + .set_default_unicast_key = ath10k_set_default_unicast_key, .sta_state = ath10k_sta_state, .conf_tx = ath10k_conf_tx, .remain_on_channel = ath10k_remain_on_channel, .cancel_remain_on_channel = ath10k_cancel_remain_on_channel, .set_rts_threshold = ath10k_set_rts_threshold, - .set_frag_threshold = ath10k_set_frag_threshold, .flush = ath10k_flush, .tx_last_beacon = ath10k_tx_last_beacon, .set_antenna = ath10k_set_antenna, .get_antenna = ath10k_get_antenna, - .restart_complete = ath10k_restart_complete, + .reconfig_complete = ath10k_reconfig_complete, .get_survey = ath10k_get_survey, .set_bitrate_mask = ath10k_set_bitrate_mask, .sta_rc_update = ath10k_sta_rc_update, .get_tsf = ath10k_get_tsf, .ampdu_action = ath10k_ampdu_action, + .get_et_sset_count = ath10k_debug_get_et_sset_count, + .get_et_stats = ath10k_debug_get_et_stats, + .get_et_strings = ath10k_debug_get_et_strings, CFG80211_TESTMODE_CMD(ath10k_tm_cmd) @@@ -4981,9 -4463,6 +4981,9 @@@ .suspend = ath10k_suspend, .resume = ath10k_resume, #endif +#ifdef CONFIG_MAC80211_DEBUGFS + .sta_add_debugfs = ath10k_sta_add_debugfs, +#endif }; #define RATETAB_ENT(_rate, _rateid, _flags) { \ @@@ -5054,9 -4533,6 +5054,9 @@@ static const struct ieee80211_channel a CHAN5G(165, 5825, 0), }; +/* Note: Be careful if you re-order these. There is code which depends on this + * ordering. + */ static struct ieee80211_rate ath10k_rates[] = { /* CCK */ RATETAB_ENT(10, 0x82, 0), @@@ -5110,10 -4586,6 +5110,10 @@@ static const struct ieee80211_iface_lim .types = BIT(NL80211_IFTYPE_P2P_GO) }, { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_DEVICE) + }, + { .max = 7, .types = BIT(NL80211_IFTYPE_AP) }, @@@ -5271,13 -4743,6 +5271,13 @@@ struct ath10k_vif *ath10k_get_arvif(str int ath10k_mac_register(struct ath10k *ar) { + static const u32 cipher_suites[] = { + WLAN_CIPHER_SUITE_WEP40, + WLAN_CIPHER_SUITE_WEP104, + WLAN_CIPHER_SUITE_TKIP, + WLAN_CIPHER_SUITE_CCMP, + WLAN_CIPHER_SUITE_AES_CMAC, + }; struct ieee80211_supported_band *band; struct ieee80211_sta_vht_cap vht_cap; struct ieee80211_sta_ht_cap ht_cap; @@@ -5307,8 -4772,7 +5307,8 @@@ band->bitrates = ath10k_g_rates; band->ht_cap = ht_cap; - /* vht is not supported in 2.4 GHz */ + /* Enable the VHT support at 2.4 GHz */ + band->vht_cap = vht_cap; ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band; } @@@ -5336,24 -4800,36 +5336,24 @@@ BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP); - if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { - /* TODO: Have to deal with 2x2 chips if/when the come out. */ - ar->supp_tx_chainmask = TARGET_10X_TX_CHAIN_MASK; - ar->supp_rx_chainmask = TARGET_10X_RX_CHAIN_MASK; - } else { - ar->supp_tx_chainmask = TARGET_TX_CHAIN_MASK; - ar->supp_rx_chainmask = TARGET_RX_CHAIN_MASK; - } - ar->hw->wiphy->available_antennas_rx = ar->supp_rx_chainmask; ar->hw->wiphy->available_antennas_tx = ar->supp_tx_chainmask; if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features)) ar->hw->wiphy->interface_modes |= + BIT(NL80211_IFTYPE_P2P_DEVICE) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO); ar->hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS | - IEEE80211_HW_SUPPORTS_UAPSD | IEEE80211_HW_MFP_CAPABLE | IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_HAS_RATE_CONTROL | IEEE80211_HW_AP_LINK_PS | - IEEE80211_HW_SPECTRUM_MGMT; - - /* MSDU can have HTT TX fragment pushed in front. The additional 4 - * bytes is used for padding/alignment if necessary. */ - ar->hw->extra_tx_headroom += sizeof(struct htt_data_tx_desc_frag)*2 + 4; + IEEE80211_HW_SPECTRUM_MGMT | + IEEE80211_HW_SW_CRYPTO_CONTROL; ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS; @@@ -5373,52 -4849,27 +5373,52 @@@ ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL; + if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) { + ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; + + /* Firmware delivers WPS/P2P Probe Requests frames to driver so + * that userspace (e.g. wpa_supplicant/hostapd) can generate + * correct Probe Responses. This is more of a hack advert.. + */ + ar->hw->wiphy->probe_resp_offload |= + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; + } + ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; ar->hw->wiphy->max_remain_on_channel_duration = 5000; ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; + ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE; + /* * on LL hardware queues are managed entirely by the FW * so we only advertise to mac we can do the queues thing */ ar->hw->queues = 4; - if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { - ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb; - ar->hw->wiphy->n_iface_combinations = - ARRAY_SIZE(ath10k_10x_if_comb); - } else { + switch (ar->wmi.op_version) { + case ATH10K_FW_WMI_OP_VERSION_MAIN: + case ATH10K_FW_WMI_OP_VERSION_TLV: ar->hw->wiphy->iface_combinations = ath10k_if_comb; ar->hw->wiphy->n_iface_combinations = ARRAY_SIZE(ath10k_if_comb); - ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); + break; + case ATH10K_FW_WMI_OP_VERSION_10_1: + case ATH10K_FW_WMI_OP_VERSION_10_2: + case ATH10K_FW_WMI_OP_VERSION_10_2_4: + ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb; + ar->hw->wiphy->n_iface_combinations = + ARRAY_SIZE(ath10k_10x_if_comb); + break; + case ATH10K_FW_WMI_OP_VERSION_UNSET: + case ATH10K_FW_WMI_OP_VERSION_MAX: + WARN_ON(1); + ret = -EINVAL; + goto err_free; } ar->hw->netdev_features = NETIF_F_HW_CSUM; @@@ -5440,9 -4891,6 +5440,9 @@@ goto err_free; } + ar->hw->wiphy->cipher_suites = cipher_suites; + ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); + ret = ieee80211_register_hw(ar->hw); if (ret) { ath10k_err(ar, "failed to register ieee80211: %d\n", ret); diff --combined drivers/net/wireless/ath/wcn36xx/smd.c index 69ed39731902,3866b285b3ff..dbd894428be6 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@@ -21,61 -21,6 +21,61 @@@ #include #include "smd.h" +struct wcn36xx_cfg_val { + u32 cfg_id; + u32 value; +}; + +#define WCN36XX_CFG_VAL(id, val) \ +{ \ + .cfg_id = WCN36XX_HAL_CFG_ ## id, \ + .value = val \ +} + +static struct wcn36xx_cfg_val wcn36xx_cfg_vals[] = { + WCN36XX_CFG_VAL(CURRENT_TX_ANTENNA, 1), + WCN36XX_CFG_VAL(CURRENT_RX_ANTENNA, 1), + WCN36XX_CFG_VAL(LOW_GAIN_OVERRIDE, 0), + WCN36XX_CFG_VAL(POWER_STATE_PER_CHAIN, 785), + WCN36XX_CFG_VAL(CAL_PERIOD, 5), + WCN36XX_CFG_VAL(CAL_CONTROL, 1), + WCN36XX_CFG_VAL(PROXIMITY, 0), + WCN36XX_CFG_VAL(NETWORK_DENSITY, 3), + WCN36XX_CFG_VAL(MAX_MEDIUM_TIME, 6000), + WCN36XX_CFG_VAL(MAX_MPDUS_IN_AMPDU, 64), + WCN36XX_CFG_VAL(RTS_THRESHOLD, 2347), + WCN36XX_CFG_VAL(SHORT_RETRY_LIMIT, 6), + WCN36XX_CFG_VAL(LONG_RETRY_LIMIT, 6), + WCN36XX_CFG_VAL(FRAGMENTATION_THRESHOLD, 8000), + WCN36XX_CFG_VAL(DYNAMIC_THRESHOLD_ZERO, 5), + WCN36XX_CFG_VAL(DYNAMIC_THRESHOLD_ONE, 10), + WCN36XX_CFG_VAL(DYNAMIC_THRESHOLD_TWO, 15), + WCN36XX_CFG_VAL(FIXED_RATE, 0), + WCN36XX_CFG_VAL(RETRYRATE_POLICY, 4), + WCN36XX_CFG_VAL(RETRYRATE_SECONDARY, 0), + WCN36XX_CFG_VAL(RETRYRATE_TERTIARY, 0), + WCN36XX_CFG_VAL(FORCE_POLICY_PROTECTION, 5), + WCN36XX_CFG_VAL(FIXED_RATE_MULTICAST_24GHZ, 1), + WCN36XX_CFG_VAL(FIXED_RATE_MULTICAST_5GHZ, 5), + WCN36XX_CFG_VAL(DEFAULT_RATE_INDEX_5GHZ, 5), + WCN36XX_CFG_VAL(MAX_BA_SESSIONS, 40), + WCN36XX_CFG_VAL(PS_DATA_INACTIVITY_TIMEOUT, 200), + WCN36XX_CFG_VAL(PS_ENABLE_BCN_FILTER, 1), + WCN36XX_CFG_VAL(PS_ENABLE_RSSI_MONITOR, 1), + WCN36XX_CFG_VAL(NUM_BEACON_PER_RSSI_AVERAGE, 20), + WCN36XX_CFG_VAL(STATS_PERIOD, 10), + WCN36XX_CFG_VAL(CFP_MAX_DURATION, 30000), + WCN36XX_CFG_VAL(FRAME_TRANS_ENABLED, 0), + WCN36XX_CFG_VAL(BA_THRESHOLD_HIGH, 128), + WCN36XX_CFG_VAL(MAX_BA_BUFFERS, 2560), + WCN36XX_CFG_VAL(DYNAMIC_PS_POLL_VALUE, 0), + WCN36XX_CFG_VAL(TX_PWR_CTRL_ENABLE, 1), + WCN36XX_CFG_VAL(ENABLE_CLOSE_LOOP, 1), + WCN36XX_CFG_VAL(ENABLE_LPWR_IMG_TRANSITION, 0), + WCN36XX_CFG_VAL(MAX_ASSOC_LIMIT, 10), + WCN36XX_CFG_VAL(ENABLE_MCC_ADAPTIVE_SCHEDULER, 0), +}; + static int put_cfg_tlv_u32(struct wcn36xx *wcn, size_t *len, u32 id, u32 value) { struct wcn36xx_hal_cfg *entry; @@@ -412,10 -357,8 +412,10 @@@ static int wcn36xx_smd_start_rsp(struc int wcn36xx_smd_start(struct wcn36xx *wcn) { - struct wcn36xx_hal_mac_start_req_msg msg_body; + struct wcn36xx_hal_mac_start_req_msg msg_body, *body; int ret = 0; + int i; + size_t len; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_REQ); @@@ -425,22 -368,10 +425,22 @@@ PREPARE_HAL_BUF(wcn->hal_buf, msg_body); + body = (struct wcn36xx_hal_mac_start_req_msg *)wcn->hal_buf; + len = body->header.len; + + for (i = 0; i < ARRAY_SIZE(wcn36xx_cfg_vals); i++) { + ret = put_cfg_tlv_u32(wcn, &len, wcn36xx_cfg_vals[i].cfg_id, + wcn36xx_cfg_vals[i].value); + if (ret) + goto out; + } + body->header.len = len; + body->params.len = len - sizeof(*body); + wcn36xx_dbg(WCN36XX_DBG_HAL, "hal start type %d\n", msg_body.params.type); - ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len); + ret = wcn36xx_smd_send_and_wait(wcn, body->header.len); if (ret) { wcn36xx_err("Sending hal_start failed\n"); goto out; @@@ -1701,7 -1632,7 +1701,7 @@@ int wcn36xx_smd_keep_alive_req(struct w } else if (packet_type == WCN36XX_HAL_KEEP_ALIVE_UNSOLICIT_ARP_RSP) { /* TODO: it also support ARP response type */ } else { - wcn36xx_warn("unknow keep alive packet type %d\n", packet_type); + wcn36xx_warn("unknown keep alive packet type %d\n", packet_type); ret = -EINVAL; goto out; } diff --combined drivers/net/wireless/rtlwifi/rtl8723be/dm.c index 2367e8f47a5b,13430b53f7ca..e77c3a46c94a --- a/drivers/net/wireless/rtlwifi/rtl8723be/dm.c +++ b/drivers/net/wireless/rtlwifi/rtl8723be/dm.c @@@ -26,7 -26,6 +26,7 @@@ #include "../wifi.h" #include "../base.h" #include "../pci.h" +#include "../core.h" #include "reg.h" #include "def.h" #include "phy.h" @@@ -212,6 -211,35 +212,6 @@@ void rtl8723be_dm_txpower_track_adjust( (pwr_val << 16) | (pwr_val << 24); } -static void rtl8723be_dm_diginit(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct dig_t *dm_digtable = &rtlpriv->dm_digtable; - - dm_digtable->dig_enable_flag = true; - dm_digtable->cur_igvalue = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f); - dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW; - dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH; - dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW; - dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH; - dm_digtable->rx_gain_max = DM_DIG_MAX; - dm_digtable->rx_gain_min = DM_DIG_MIN; - dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT; - dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX; - dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN; - dm_digtable->pre_cck_cca_thres = 0xff; - dm_digtable->cur_cck_cca_thres = 0x83; - dm_digtable->forbidden_igi = DM_DIG_MIN; - dm_digtable->large_fa_hit = 0; - dm_digtable->recover_cnt = 0; - dm_digtable->dig_dynamic_min = DM_DIG_MIN; - dm_digtable->dig_dynamic_min_1 = DM_DIG_MIN; - dm_digtable->media_connect_0 = false; - dm_digtable->media_connect_1 = false; - rtlpriv->dm.dm_initialgain_enable = true; - dm_digtable->bt30_cur_igi = 0x32; -} - void rtl8723be_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@@ -265,10 -293,9 +265,10 @@@ static void rtl8723be_dm_init_dynamic_a void rtl8723be_dm_init(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 cur_igvalue = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f); rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER; - rtl8723be_dm_diginit(hw); + rtl_dm_diginit(hw, cur_igvalue); rtl8723be_dm_init_rate_adaptive_mask(hw); rtl8723_dm_init_edca_turbo(hw); rtl8723_dm_init_dynamic_bb_powersaving(hw); @@@ -309,7 -336,7 +309,7 @@@ static void rtl8723be_dm_find_minimum_r rtl_dm_dig->min_undec_pwdb_for_dm = rtlpriv->dm.entry_min_undec_sm_pwdb; RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, - "AP Ext Port or disconnet PWDB = 0x%x\n", + "AP Ext Port or disconnect PWDB = 0x%x\n", rtl_dm_dig->min_undec_pwdb_for_dm); } RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n", @@@ -397,7 -424,7 +397,7 @@@ static void rtl8723be_dm_dig(struct iee struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_digtable = &rtlpriv->dm_digtable; struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - u8 dig_dynamic_min, dig_maxofmin; + u8 dig_min_0, dig_maxofmin; bool bfirstconnect, bfirstdisconnect; u8 dm_dig_max, dm_dig_min; u8 current_igi = dm_digtable->cur_igvalue; @@@ -407,7 -434,7 +407,7 @@@ if (mac->act_scanning) return; - dig_dynamic_min = dm_digtable->dig_dynamic_min; + dig_min_0 = dm_digtable->dig_min_0; bfirstconnect = (mac->link_state >= MAC80211_LINKED) && !dm_digtable->media_connect_0; bfirstdisconnect = (mac->link_state < MAC80211_LINKED) && @@@ -429,20 -456,20 +429,20 @@@ if (rtlpriv->dm.one_entry_only) { offset = 12; if (dm_digtable->rssi_val_min - offset < dm_dig_min) - dig_dynamic_min = dm_dig_min; + dig_min_0 = dm_dig_min; else if (dm_digtable->rssi_val_min - offset > dig_maxofmin) - dig_dynamic_min = dig_maxofmin; + dig_min_0 = dig_maxofmin; else - dig_dynamic_min = + dig_min_0 = dm_digtable->rssi_val_min - offset; } else { - dig_dynamic_min = dm_dig_min; + dig_min_0 = dm_dig_min; } } else { dm_digtable->rx_gain_max = dm_dig_max; - dig_dynamic_min = dm_dig_min; + dig_min_0 = dm_dig_min; RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "no link\n"); } @@@ -470,11 -497,11 +470,11 @@@ } else { if (dm_digtable->large_fa_hit < 3) { if ((dm_digtable->forbidden_igi - 1) < - dig_dynamic_min) { + dig_min_0) { dm_digtable->forbidden_igi = - dig_dynamic_min; + dig_min_0; dm_digtable->rx_gain_min = - dig_dynamic_min; + dig_min_0; } else { dm_digtable->forbidden_igi--; dm_digtable->rx_gain_min = @@@ -525,7 -552,7 +525,7 @@@ rtl8723be_dm_write_dig(hw, current_igi); dm_digtable->media_connect_0 = ((mac->link_state >= MAC80211_LINKED) ? true : false); - dm_digtable->dig_dynamic_min = dig_dynamic_min; + dm_digtable->dig_min_0 = dig_min_0; } static void rtl8723be_dm_false_alarm_counter_statistics( diff --combined drivers/net/wireless/rtlwifi/rtl8821ae/dm.c index 0b2082dc48f1,cb20e23744e3..342678d2ed42 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c @@@ -26,7 -26,6 +26,7 @@@ #include "../wifi.h" #include "../base.h" #include "../pci.h" +#include "../core.h" #include "reg.h" #include "def.h" #include "phy.h" @@@ -520,6 -519,34 +520,6 @@@ void rtl8821ae_dm_initialize_txpower_tr } } -static void rtl8821ae_dm_diginit(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct dig_t *dm_digtable = &rtlpriv->dm_digtable; - - dm_digtable->cur_igvalue = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f); - dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW; - dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH; - dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW; - dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH; - dm_digtable->rx_gain_max = DM_DIG_MAX; - dm_digtable->rx_gain_min = DM_DIG_MIN; - dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT; - dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX; - dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN; - dm_digtable->pre_cck_cca_thres = 0xff; - dm_digtable->cur_cck_cca_thres = 0x83; - dm_digtable->forbidden_igi = DM_DIG_MIN; - dm_digtable->large_fa_hit = 0; - dm_digtable->recover_cnt = 0; - dm_digtable->dig_dynamic_min = DM_DIG_MIN; - dm_digtable->dig_dynamic_min_1 = DM_DIG_MIN; - dm_digtable->media_connect_0 = false; - dm_digtable->media_connect_1 = false; - rtlpriv->dm.dm_initialgain_enable = true; - dm_digtable->bt30_cur_igi = 0x32; -} - void rtl8821ae_dm_init_edca_turbo(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@@ -579,7 -606,6 +579,7 @@@ void rtl8821ae_dm_init(struct ieee80211 { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &rtlpriv->phy; + u32 cur_igvalue = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f); spin_lock(&rtlpriv->locks.iqk_lock); rtlphy->lck_inprogress = false; @@@ -587,7 -613,7 +587,7 @@@ rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER; rtl8821ae_dm_common_info_self_init(hw); - rtl8821ae_dm_diginit(hw); + rtl_dm_diginit(hw, cur_igvalue); rtl8821ae_dm_init_rate_adaptive_mask(hw); rtl8821ae_dm_init_edca_turbo(hw); rtl8821ae_dm_initialize_txpower_tracking_thermalmeter(hw); @@@ -796,7 -822,7 +796,7 @@@ static void rtl8821ae_dm_dig(struct iee struct dig_t *dm_digtable = &rtlpriv->dm_digtable; struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - u8 dig_dynamic_min; + u8 dig_min_0; u8 dig_max_of_min; bool first_connect, first_disconnect; u8 dm_dig_max, dm_dig_min, offset; @@@ -811,7 -837,7 +811,7 @@@ } /*add by Neil Chen to avoid PSD is processing*/ - dig_dynamic_min = dm_digtable->dig_dynamic_min; + dig_min_0 = dm_digtable->dig_min_0; first_connect = (mac->link_state >= MAC80211_LINKED) && (!dm_digtable->media_connect_0); first_disconnect = (mac->link_state < MAC80211_LINKED) && @@@ -850,30 -876,30 +850,30 @@@ offset = 0; if (dm_digtable->rssi_val_min - offset < dm_dig_min) - dig_dynamic_min = dm_dig_min; + dig_min_0 = dm_dig_min; else if (dm_digtable->rssi_val_min - offset > dig_max_of_min) - dig_dynamic_min = dig_max_of_min; + dig_min_0 = dig_max_of_min; else - dig_dynamic_min = + dig_min_0 = dm_digtable->rssi_val_min - offset; RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, - "bOneEntryOnly=TRUE, dig_dynamic_min=0x%x\n", - dig_dynamic_min); + "bOneEntryOnly=TRUE, dig_min_0=0x%x\n", + dig_min_0); } else { - dig_dynamic_min = dm_dig_min; + dig_min_0 = dm_dig_min; } } else { dm_digtable->rx_gain_max = dm_dig_max; - dig_dynamic_min = dm_dig_min; + dig_min_0 = dm_dig_min; RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "No Link\n"); } if (rtlpriv->falsealm_cnt.cnt_all > 10000) { RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, - "Abnornally false alarm case.\n"); + "Abnormally false alarm case.\n"); if (dm_digtable->large_fa_hit != 3) dm_digtable->large_fa_hit++; @@@ -899,11 -925,11 +899,11 @@@ } else { if (dm_digtable->large_fa_hit < 3) { if ((dm_digtable->forbidden_igi - 1) < - dig_dynamic_min) { + dig_min_0) { dm_digtable->forbidden_igi = - dig_dynamic_min; + dig_min_0; dm_digtable->rx_gain_min = - dig_dynamic_min; + dig_min_0; RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "Normal Case: At Lower Bound\n"); } else { @@@ -998,7 -1024,7 +998,7 @@@ rtl8821ae_dm_write_dig(hw, current_igi); dm_digtable->media_connect_0 = ((mac->link_state >= MAC80211_LINKED) ? true : false); - dm_digtable->dig_dynamic_min = dig_dynamic_min; + dm_digtable->dig_min_0 = dig_min_0; } static void rtl8821ae_dm_common_info_self_update(struct ieee80211_hw *hw) @@@ -2052,7 -2078,8 +2052,7 @@@ void rtl8821ae_dm_txpwr_track_set_pwr(s if (rtldm->tx_rate != 0xFF) tx_rate = rtl8821ae_hw_rate_to_mrate(hw, rtldm->tx_rate); - RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "===>rtl8812ae_dm_txpwr_track_set_pwr\n"); + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "===>%s\n", __func__); if (tx_rate != 0xFF) { /* Mimic Modify High Rate BBSwing Limit.*/ /*CCK*/ @@@ -2101,7 -2128,7 +2101,7 @@@ if (method == BBSWING) { RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "===>rtl8812ae_dm_txpwr_track_set_pwr\n"); + "===>%s\n", __func__); if (rf_path == RF90_PATH_A) { final_swing_idx[RF90_PATH_A] = (rtldm->ofdm_index[RF90_PATH_A] > @@@ -2233,8 -2260,7 +2233,8 @@@ void rtl8821ae_dm_txpower_tracking_call rtldm->txpower_trackinginit = true; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "===>rtl8812ae_dm_txpower_tracking_callback_thermalmeter,\n pDM_Odm->BbSwingIdxCckBase: %d,pDM_Odm->BbSwingIdxOfdmBase[A]:%d, pDM_Odm->DefaultOfdmIndex: %d\n", + "===>%s,\n pDM_Odm->BbSwingIdxCckBase: %d,pDM_Odm->BbSwingIdxOfdmBase[A]:%d, pDM_Odm->DefaultOfdmIndex: %d\n", + __func__, rtldm->swing_idx_cck_base, rtldm->swing_idx_ofdm_base[RF90_PATH_A], rtldm->default_ofdm_index); @@@ -2513,7 -2539,8 +2513,7 @@@ } } - RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "<===rtl8812ae_dm_txpower_tracking_callback_thermalmeter\n"); + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===%s\n", __func__); } void rtl8821ae_dm_check_txpower_tracking_thermalmeter(struct ieee80211_hw *hw) diff --combined drivers/pinctrl/nomadik/pinctrl-abx500.c index 1806b24faa14,90a2c194027c..23db4c9ac76c --- a/drivers/pinctrl/nomadik/pinctrl-abx500.c +++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c @@@ -466,7 -466,7 +466,7 @@@ static int abx500_set_mode(struct pinct break; default: - dev_dbg(pct->dev, "unknow alt_setting %d\n", alt_setting); + dev_dbg(pct->dev, "unknown alt_setting %d\n", alt_setting); return -EINVAL; } @@@ -891,13 -891,14 +891,13 @@@ static int abx500_dt_subnode_to_map(str const char *function = NULL; unsigned long *configs; unsigned int nconfigs = 0; - bool has_config = 0; struct property *prop; - const char *group, *gpio_name; - struct device_node *np_config; - ret = of_property_read_string(np, "ste,function", &function); + ret = of_property_read_string(np, "function", &function); if (ret >= 0) { - ret = of_property_count_strings(np, "ste,pins"); + const char *group; + + ret = of_property_count_strings(np, "groups"); if (ret < 0) goto exit; @@@ -906,7 -907,7 +906,7 @@@ if (ret < 0) goto exit; - of_property_for_each_string(np, "ste,pins", prop, group) { + of_property_for_each_string(np, "groups", prop, group) { ret = abx500_dt_add_map_mux(map, reserved_maps, num_maps, group, function); if (ret < 0) @@@ -914,12 -915,19 +914,12 @@@ } } - ret = pinconf_generic_parse_dt_config(np, &configs, &nconfigs); - if (nconfigs) - has_config = 1; - np_config = of_parse_phandle(np, "ste,config", 0); - if (np_config) { - ret = pinconf_generic_parse_dt_config(np_config, &configs, - &nconfigs); - if (ret) - goto exit; - has_config |= nconfigs; - } - if (has_config) { - ret = of_property_count_strings(np, "ste,pins"); + ret = pinconf_generic_parse_dt_config(np, pctldev, &configs, &nconfigs); + if (nconfigs) { + const char *gpio_name; + const char *pin; + + ret = of_property_count_strings(np, "pins"); if (ret < 0) goto exit; @@@ -929,8 -937,8 +929,8 @@@ if (ret < 0) goto exit; - of_property_for_each_string(np, "ste,pins", prop, group) { - gpio_name = abx500_find_pin_name(pctldev, group); + of_property_for_each_string(np, "pins", prop, pin) { + gpio_name = abx500_find_pin_name(pctldev, pin); ret = abx500_dt_add_map_configs(map, reserved_maps, num_maps, gpio_name, configs, 1); @@@ -1104,7 -1112,6 +1104,7 @@@ out static const struct pinconf_ops abx500_pinconf_ops = { .pin_config_get = abx500_pin_config_get, .pin_config_set = abx500_pin_config_set, + .is_generic = true, }; static struct pinctrl_desc abx500_pinctrl_desc = { @@@ -1278,6 -1285,7 +1278,6 @@@ static int abx500_gpio_remove(struct pl static struct platform_driver abx500_gpio_driver = { .driver = { .name = "abx500-gpio", - .owner = THIS_MODULE, .of_match_table = abx500_gpio_match, }, .probe = abx500_gpio_probe, diff --combined drivers/scsi/ch.c index 0045742fab7d,9449f4aab78f..dad959fcf6d8 --- a/drivers/scsi/ch.c +++ b/drivers/scsi/ch.c @@@ -85,7 -85,8 +85,7 @@@ static const char * vendor_labels[CH_TY // module_param_string_array(vendor_labels, NULL, 0444); #define ch_printk(prefix, ch, fmt, a...) \ - sdev_printk(prefix, (ch)->device, "[%s] " fmt, \ - (ch)->name, ##a) + sdev_prefix_printk(prefix, (ch)->device, (ch)->name, fmt, ##a) #define DPRINTK(fmt, arg...) \ do { \ @@@ -182,7 -183,7 +182,7 @@@ static int ch_find_errno(struct scsi_se } static int -ch_do_scsi(scsi_changer *ch, unsigned char *cmd, +ch_do_scsi(scsi_changer *ch, unsigned char *cmd, int cmd_len, void *buffer, unsigned buflength, enum dma_data_direction direction) { @@@ -194,13 -195,19 +194,13 @@@ retry: errno = 0; - if (debug) { - DPRINTK("command: "); - __scsi_print_command(cmd); - } - result = scsi_execute_req(ch->device, cmd, direction, buffer, buflength, &sshdr, timeout * HZ, MAX_RETRIES, NULL); - DPRINTK("result: 0x%x\n",result); if (driver_byte(result) & DRIVER_SENSE) { if (debug) - scsi_print_sense_hdr(ch->name, &sshdr); + scsi_print_sense_hdr(ch->device, ch->name, &sshdr); errno = ch_find_errno(&sshdr); switch(sshdr.sense_key) { @@@ -251,8 -258,7 +251,8 @@@ ch_read_element_status(scsi_changer *ch cmd[3] = elem & 0xff; cmd[5] = 1; cmd[9] = 255; - if (0 == (result = ch_do_scsi(ch, cmd, buffer, 256, DMA_FROM_DEVICE))) { + if (0 == (result = ch_do_scsi(ch, cmd, 12, + buffer, 256, DMA_FROM_DEVICE))) { if (((buffer[16] << 8) | buffer[17]) != elem) { DPRINTK("asked for element 0x%02x, got 0x%02x\n", elem,(buffer[16] << 8) | buffer[17]); @@@ -282,7 -288,7 +282,7 @@@ ch_init_elem(scsi_changer *ch memset(cmd,0,sizeof(cmd)); cmd[0] = INITIALIZE_ELEMENT_STATUS; cmd[1] = (ch->device->lun & 0x7) << 5; - err = ch_do_scsi(ch, cmd, NULL, 0, DMA_NONE); + err = ch_do_scsi(ch, cmd, 6, NULL, 0, DMA_NONE); VPRINTK(KERN_INFO, "... finished\n"); return err; } @@@ -304,10 -310,10 +304,10 @@@ ch_readconfig(scsi_changer *ch cmd[1] = (ch->device->lun & 0x7) << 5; cmd[2] = 0x1d; cmd[4] = 255; - result = ch_do_scsi(ch, cmd, buffer, 255, DMA_FROM_DEVICE); + result = ch_do_scsi(ch, cmd, 10, buffer, 255, DMA_FROM_DEVICE); if (0 != result) { cmd[1] |= (1<<3); - result = ch_do_scsi(ch, cmd, buffer, 255, DMA_FROM_DEVICE); + result = ch_do_scsi(ch, cmd, 10, buffer, 255, DMA_FROM_DEVICE); } if (0 == result) { ch->firsts[CHET_MT] = @@@ -339,7 -345,7 +339,7 @@@ ch->firsts[CHET_DT], ch->counts[CHET_DT]); } else { - VPRINTK(KERN_INFO, "reading element address assigment page failed!\n"); + VPRINTK(KERN_INFO, "reading element address assignment page failed!\n"); } /* vendor specific element types */ @@@ -432,7 -438,7 +432,7 @@@ ch_position(scsi_changer *ch, u_int tra cmd[4] = (elem >> 8) & 0xff; cmd[5] = elem & 0xff; cmd[8] = rotate ? 1 : 0; - return ch_do_scsi(ch, cmd, NULL, 0, DMA_NONE); + return ch_do_scsi(ch, cmd, 10, NULL, 0, DMA_NONE); } static int @@@ -453,7 -459,7 +453,7 @@@ ch_move(scsi_changer *ch, u_int trans, cmd[6] = (dest >> 8) & 0xff; cmd[7] = dest & 0xff; cmd[10] = rotate ? 1 : 0; - return ch_do_scsi(ch, cmd, NULL,0, DMA_NONE); + return ch_do_scsi(ch, cmd, 12, NULL,0, DMA_NONE); } static int @@@ -479,7 -485,7 +479,7 @@@ ch_exchange(scsi_changer *ch, u_int tra cmd[9] = dest2 & 0xff; cmd[10] = (rotate1 ? 1 : 0) | (rotate2 ? 2 : 0); - return ch_do_scsi(ch, cmd, NULL,0, DMA_NONE); + return ch_do_scsi(ch, cmd, 12, NULL, 0, DMA_NONE); } static void @@@ -529,7 -535,7 +529,7 @@@ ch_set_voltag(scsi_changer *ch, u_int e memcpy(buffer,tag,32); ch_check_voltag(buffer); - result = ch_do_scsi(ch, cmd, buffer, 256, DMA_TO_DEVICE); + result = ch_do_scsi(ch, cmd, 12, buffer, 256, DMA_TO_DEVICE); kfree(buffer); return result; } @@@ -610,11 -616,6 +610,11 @@@ static long ch_ioctl(struct file *file int retval; void __user *argp = (void __user *)arg; + retval = scsi_ioctl_block_when_processing_errors(ch->device, cmd, + file->f_flags & O_NDELAY); + if (retval) + return retval; + switch (cmd) { case CHIOGPARAMS: { @@@ -765,8 -766,7 +765,8 @@@ ch_cmd[5] = 1; ch_cmd[9] = 255; - result = ch_do_scsi(ch, ch_cmd, buffer, 256, DMA_FROM_DEVICE); + result = ch_do_scsi(ch, ch_cmd, 12, + buffer, 256, DMA_FROM_DEVICE); if (!result) { cge.cge_status = buffer[18]; cge.cge_flags = 0; @@@ -966,9 -966,9 +966,9 @@@ static int ch_remove(struct device *dev } static struct scsi_driver ch_template = { - .owner = THIS_MODULE, .gendrv = { .name = "ch", + .owner = THIS_MODULE, .probe = ch_probe, .remove = ch_remove, }, diff --combined drivers/scsi/qla2xxx/qla_init.c index e59f25bff7ab,17d30adb07d1..5bb57c5282c9 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@@ -3237,6 -3237,8 +3237,6 @@@ qla2x00_reg_remote_port(scsi_qla_host_ struct fc_rport *rport; unsigned long flags; - qla2x00_rport_del(fcport); - rport_ids.node_name = wwn_to_u64(fcport->node_name); rport_ids.port_name = wwn_to_u64(fcport->port_name); rport_ids.port_id = fcport->d_id.b.domain << 16 | @@@ -5364,7 -5366,7 +5364,7 @@@ qla2x00_load_risc(scsi_qla_host_t *vha blob = qla2x00_request_firmware(vha); if (!blob) { ql_log(ql_log_info, vha, 0x0083, - "Fimware image unavailable.\n"); + "Firmware image unavailable.\n"); ql_log(ql_log_info, vha, 0x0084, "Firmware images can be retrieved from: "QLA_FW_URL ".\n"); return QLA_FUNCTION_FAILED; @@@ -5467,7 -5469,7 +5467,7 @@@ qla24xx_load_risc_blob(scsi_qla_host_t blob = qla2x00_request_firmware(vha); if (!blob) { ql_log(ql_log_warn, vha, 0x0090, - "Fimware image unavailable.\n"); + "Firmware image unavailable.\n"); ql_log(ql_log_warn, vha, 0x0091, "Firmware images can be retrieved from: " QLA_FW_URL ".\n"); diff --combined drivers/scsi/qla2xxx/qla_os.c index cce1cbc1a927,92fd85c8b09f..5319b3cb219e --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@@ -236,6 -236,8 +236,6 @@@ static int qla2xxx_eh_target_reset(stru static int qla2xxx_eh_bus_reset(struct scsi_cmnd *); static int qla2xxx_eh_host_reset(struct scsi_cmnd *); -static int qla2x00_change_queue_depth(struct scsi_device *, int, int); -static int qla2x00_change_queue_type(struct scsi_device *, int); static void qla2x00_clear_drv_active(struct qla_hw_data *); static void qla2x00_free_device(scsi_qla_host_t *); static void qla83xx_disable_laser(scsi_qla_host_t *vha); @@@ -257,7 -259,8 +257,7 @@@ struct scsi_host_template qla2xxx_drive .slave_destroy = qla2xxx_slave_destroy, .scan_finished = qla2xxx_scan_finished, .scan_start = qla2xxx_scan_start, - .change_queue_depth = qla2x00_change_queue_depth, - .change_queue_type = qla2x00_change_queue_type, + .change_queue_depth = scsi_change_queue_depth, .this_id = -1, .cmd_per_lun = 3, .use_clustering = ENABLE_CLUSTERING, @@@ -267,8 -270,6 +267,8 @@@ .shost_attrs = qla2x00_host_attrs, .supported_mode = MODE_INITIATOR, + .use_blk_tags = 1, + .track_queue_depth = 1, }; static struct scsi_transport_template *qla2xxx_transport_template = NULL; @@@ -446,11 -447,11 +446,11 @@@ static int qla25xx_setup_mode(struct sc } ha->flags.cpu_affinity_enabled = 1; ql_dbg(ql_dbg_multiq, vha, 0xc007, - "CPU affinity mode enalbed, " + "CPU affinity mode enabled, " "no. of response queues:%d no. of request queues:%d.\n", ha->max_rsp_queues, ha->max_req_queues); ql_dbg(ql_dbg_init, vha, 0x00e9, - "CPU affinity mode enalbed, " + "CPU affinity mode enabled, " "no. of response queues:%d no. of request queues:%d.\n", ha->max_rsp_queues, ha->max_req_queues); } @@@ -734,9 -735,7 +734,9 @@@ qla2xxx_queuecommand(struct Scsi_Host * * Return target busy if we've received a non-zero retry_delay_timer * in a FCP_RSP. */ - if (time_after(jiffies, fcport->retry_delay_timestamp)) + if (fcport->retry_delay_timestamp == 0) { + /* retry delay not set */ + } else if (time_after(jiffies, fcport->retry_delay_timestamp)) fcport->retry_delay_timestamp = 0; else goto qc24_target_busy; @@@ -1406,7 -1405,10 +1406,7 @@@ qla2xxx_slave_configure(struct scsi_dev if (IS_T10_PI_CAPABLE(vha->hw)) blk_queue_update_dma_alignment(sdev->request_queue, 0x7); - if (sdev->tagged_supported) - scsi_activate_tcq(sdev, req->max_q_depth); - else - scsi_deactivate_tcq(sdev, req->max_q_depth); + scsi_change_queue_depth(sdev, req->max_q_depth); return 0; } @@@ -1416,6 -1418,76 +1416,6 @@@ qla2xxx_slave_destroy(struct scsi_devic sdev->hostdata = NULL; } -static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth) -{ - fc_port_t *fcport = (struct fc_port *) sdev->hostdata; - - if (!scsi_track_queue_full(sdev, qdepth)) - return; - - ql_dbg(ql_dbg_io, fcport->vha, 0x3029, - "Queue depth adjusted-down to %d for nexus=%ld:%d:%llu.\n", - sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun); -} - -static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth) -{ - fc_port_t *fcport = sdev->hostdata; - struct scsi_qla_host *vha = fcport->vha; - struct req_que *req = NULL; - - req = vha->req; - if (!req) - return; - - if (req->max_q_depth <= sdev->queue_depth || req->max_q_depth < qdepth) - return; - - if (sdev->ordered_tags) - scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, qdepth); - else - scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth); - - ql_dbg(ql_dbg_io, vha, 0x302a, - "Queue depth adjusted-up to %d for nexus=%ld:%d:%llu.\n", - sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun); -} - -static int -qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) -{ - switch (reason) { - case SCSI_QDEPTH_DEFAULT: - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); - break; - case SCSI_QDEPTH_QFULL: - qla2x00_handle_queue_full(sdev, qdepth); - break; - case SCSI_QDEPTH_RAMP_UP: - qla2x00_adjust_sdev_qdepth_up(sdev, qdepth); - break; - default: - return -EOPNOTSUPP; - } - - return sdev->queue_depth; -} - -static int -qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type) -{ - if (sdev->tagged_supported) { - scsi_set_tag_type(sdev, tag_type); - if (tag_type) - scsi_activate_tcq(sdev, sdev->queue_depth); - else - scsi_deactivate_tcq(sdev, sdev->queue_depth); - } else - tag_type = 0; - - return tag_type; -} - /** * qla2x00_config_dma_addressing() - Configure OS DMA addressing method. * @ha: HA context @@@ -4102,7 -4174,7 +4102,7 @@@ qla83xx_schedule_work(scsi_qla_host_t * break; default: ql_log(ql_log_warn, base_vha, 0xb05f, - "Unknow work-code=0x%x.\n", work_code); + "Unknown work-code=0x%x.\n", work_code); } return; @@@ -4702,7 -4774,7 +4702,7 @@@ qla83xx_idc_state_handler(scsi_qla_host break; default: ql_log(ql_log_warn, base_vha, 0xb071, - "Unknow Device State: %x.\n", dev_state); + "Unknown Device State: %x.\n", dev_state); qla83xx_idc_unlock(base_vha, 0); qla8xxx_dev_failed_handler(base_vha); rval = QLA_FUNCTION_FAILED; diff --combined drivers/tty/goldfish.c index 967b2c2b7cf1,d6e332cba3ea..0655fecf8240 --- a/drivers/tty/goldfish.c +++ b/drivers/tty/goldfish.c @@@ -155,9 -155,9 +155,9 @@@ static struct tty_driver *goldfish_tty_ static int goldfish_tty_console_setup(struct console *co, char *options) { - if ((unsigned)co->index > goldfish_tty_line_count) + if ((unsigned)co->index >= goldfish_tty_line_count) return -ENODEV; - if (goldfish_ttys[co->index].base == 0) + if (!goldfish_ttys[co->index].base) return -ENODEV; return 0; } @@@ -229,7 -229,6 +229,6 @@@ static int goldfish_tty_probe(struct pl { struct goldfish_tty *qtty; int ret = -EINVAL; - int i; struct resource *r; struct device *ttydev; void __iomem *base; @@@ -293,7 -292,6 +292,6 @@@ mutex_unlock(&goldfish_tty_lock); return 0; - tty_unregister_device(goldfish_tty_driver, i); err_tty_register_device_failed: free_irq(irq, pdev); err_request_irq_failed: @@@ -317,7 -315,7 +315,7 @@@ static int goldfish_tty_remove(struct p unregister_console(&qtty->console); tty_unregister_device(goldfish_tty_driver, pdev->id); iounmap(qtty->base); - qtty->base = 0; + qtty->base = NULL; free_irq(qtty->irq, pdev); goldfish_tty_current_line_count--; if (goldfish_tty_current_line_count == 0) diff --combined drivers/tty/ipwireless/hardware.c index 5c77e1eac4ee,017bfb624e8e..ad7031a4f3c4 --- a/drivers/tty/ipwireless/hardware.c +++ b/drivers/tty/ipwireless/hardware.c @@@ -378,9 -378,9 +378,9 @@@ static void swap_packet_bitfield_to_le( /* * transform bits from aa.bbb.ccc to ccc.bbb.aa */ - ret |= tmp & 0xc0 >> 6; - ret |= tmp & 0x38 >> 1; - ret |= tmp & 0x07 << 5; + ret |= (tmp & 0xc0) >> 6; + ret |= (tmp & 0x38) >> 1; + ret |= (tmp & 0x07) << 5; *data = ret & 0xff; #endif } @@@ -393,9 -393,9 +393,9 @@@ static void swap_packet_bitfield_from_l /* * transform bits from ccc.bbb.aa to aa.bbb.ccc */ - ret |= tmp & 0xe0 >> 5; - ret |= tmp & 0x1c << 1; - ret |= tmp & 0x03 << 6; + ret |= (tmp & 0xe0) >> 5; + ret |= (tmp & 0x1c) << 1; + ret |= (tmp & 0x03) << 6; *data = ret & 0xff; #endif } @@@ -1455,7 -1455,7 +1455,7 @@@ static void __handle_setup_get_version_ return; } - set_RTS(hw, PRIO_SETUP, channel_idx, + ret = set_RTS(hw, PRIO_SETUP, channel_idx, (hw->control_lines [channel_idx] & IPW_CONTROL_LINE_RTS) != 0); if (ret) { diff --combined include/linux/stacktrace.h index 669045ab73f3,1fea0380e97f..0a34489a46b6 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@@ -1,14 -1,10 +1,12 @@@ #ifndef __LINUX_STACKTRACE_H #define __LINUX_STACKTRACE_H +#include + struct task_struct; struct pt_regs; #ifdef CONFIG_STACKTRACE - struct task_struct; - struct stack_trace { unsigned int nr_entries, max_entries; unsigned long *entries; @@@ -22,8 -18,6 +20,8 @@@ extern void save_stack_trace_tsk(struc struct stack_trace *trace); extern void print_stack_trace(struct stack_trace *trace, int spaces); +extern int snprint_stack_trace(char *buf, size_t size, + struct stack_trace *trace, int spaces); #ifdef CONFIG_USER_STACKTRACE_SUPPORT extern void save_stack_trace_user(struct stack_trace *trace); @@@ -36,7 -30,6 +34,7 @@@ # define save_stack_trace_tsk(tsk, trace) do { } while (0) # define save_stack_trace_user(trace) do { } while (0) # define print_stack_trace(trace, spaces) do { } while (0) +# define snprint_stack_trace(buf, size, trace, spaces) do { } while (0) #endif #endif diff --combined init/main.c index 4a6974e67839,b0c705515089..54565bf57beb --- a/init/main.c +++ b/init/main.c @@@ -51,7 -51,7 +51,7 @@@ #include #include #include -#include +#include #include #include #include @@@ -78,8 -78,6 +78,8 @@@ #include #include #include +#include +#include #include #include @@@ -87,6 -85,10 +87,6 @@@ #include #include -#ifdef CONFIG_X86_LOCAL_APIC -#include -#endif - static int kernel_init(void *); extern void init_IRQ(void); @@@ -143,7 -145,7 +143,7 @@@ EXPORT_SYMBOL_GPL(static_key_initialize * rely on the BIOS and skip the reset operation. * * This is useful if kernel is booting in an unreliable environment. - * For ex. kdump situaiton where previous kernel has crashed, BIOS has been + * For ex. kdump situation where previous kernel has crashed, BIOS has been * skipped and devices will be in unknown state. */ unsigned int reset_devices; @@@ -347,6 -349,15 +347,6 @@@ __setup("rdinit=", rdinit_setup) #ifndef CONFIG_SMP static const unsigned int setup_max_cpus = NR_CPUS; -#ifdef CONFIG_X86_LOCAL_APIC -static void __init smp_init(void) -{ - APIC_init_uniprocessor(); -} -#else -#define smp_init() do { } while (0) -#endif - static inline void setup_nr_cpu_ids(void) { } static inline void smp_prepare_cpus(unsigned int maxcpus) { } #endif @@@ -475,10 -486,10 +475,10 @@@ void __init __weak thread_info_cache_in static void __init mm_init(void) { /* - * page_cgroup requires contiguous pages, + * page_ext requires contiguous pages, * bigger than MAX_ORDER unless SPARSEMEM. */ - page_cgroup_init_flatmem(); + page_ext_init_flatmem(); mem_init(); kmem_cache_init(); percpu_init_late(); @@@ -566,10 -577,6 +566,10 @@@ asmlinkage __visible void __init start_ local_irq_disable(); idr_init_cache(); rcu_init(); + + /* trace_printk() and trace points may be used after this */ + trace_init(); + context_tracking_init(); radix_tree_init(); /* init some links before init_ISA_irqs() */ @@@ -620,7 -627,7 +620,7 @@@ initrd_start = 0; } #endif - page_cgroup_init(); + page_ext_init(); debug_objects_mem_init(); kmemleak_init(); setup_per_cpu_pageset(); @@@ -653,9 -660,8 +653,9 @@@ /* rootfs populating might need page-writeback */ page_writeback_init(); proc_root_init(); - cgroup_init(); + nsfs_init(); cpuset_init(); + cgroup_init(); taskstats_init_early(); delayacct_init(); @@@ -953,8 -959,8 +953,8 @@@ static int __ref kernel_init(void *unus ret = run_init_process(execute_command); if (!ret) return 0; - pr_err("Failed to execute %s (error %d). Attempting defaults...\n", - execute_command, ret); + panic("Requested init %s failed (error %d).", + execute_command, ret); } if (!try_to_run_init_process("/sbin/init") || !try_to_run_init_process("/etc/init") || @@@ -1020,11 -1026,8 +1020,11 @@@ static noinline void __init kernel_init * Ok, we have completed the initial bootup, and * we're essentially up and running. Get rid of the * initmem segments and start the user-mode stuff.. + * + * rootfs is available now, try loading the public keys + * and default modules */ - /* rootfs is available now, try loading default modules */ + integrity_load_keys(); load_default_modules(); } diff --combined net/sunrpc/xprtrdma/verbs.c index 124676c13780,3b91aedf439e..e28909fddd30 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@@ -49,7 -49,6 +49,7 @@@ #include #include +#include #include #include "xprt_rdma.h" @@@ -58,12 -57,11 +58,12 @@@ * Globals/Macros */ -#ifdef RPC_DEBUG +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) # define RPCDBG_FACILITY RPCDBG_TRANS #endif static void rpcrdma_reset_frmrs(struct rpcrdma_ia *); +static void rpcrdma_reset_fmrs(struct rpcrdma_ia *); /* * internal functions @@@ -107,54 -105,16 +107,54 @@@ rpcrdma_run_tasklet(unsigned long data static DECLARE_TASKLET(rpcrdma_tasklet_g, rpcrdma_run_tasklet, 0UL); +static const char * const async_event[] = { + "CQ error", + "QP fatal error", + "QP request error", + "QP access error", + "communication established", + "send queue drained", + "path migration successful", + "path mig error", + "device fatal error", + "port active", + "port error", + "LID change", + "P_key change", + "SM change", + "SRQ error", + "SRQ limit reached", + "last WQE reached", + "client reregister", + "GID change", +}; + +#define ASYNC_MSG(status) \ + ((status) < ARRAY_SIZE(async_event) ? \ + async_event[(status)] : "unknown async error") + +static void +rpcrdma_schedule_tasklet(struct list_head *sched_list) +{ + unsigned long flags; + + spin_lock_irqsave(&rpcrdma_tk_lock_g, flags); + list_splice_tail(sched_list, &rpcrdma_tasklets_g); + spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags); + tasklet_schedule(&rpcrdma_tasklet_g); +} + static void rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context) { struct rpcrdma_ep *ep = context; - dprintk("RPC: %s: QP error %X on device %s ep %p\n", - __func__, event->event, event->device->name, context); + pr_err("RPC: %s: %s on device %s ep %p\n", + __func__, ASYNC_MSG(event->event), + event->device->name, context); if (ep->rep_connected == 1) { ep->rep_connected = -EIO; - ep->rep_func(ep); + rpcrdma_conn_func(ep); wake_up_all(&ep->rep_connect_wait); } } @@@ -164,64 -124,27 +164,64 @@@ rpcrdma_cq_async_error_upcall(struct ib { struct rpcrdma_ep *ep = context; - dprintk("RPC: %s: CQ error %X on device %s ep %p\n", - __func__, event->event, event->device->name, context); + pr_err("RPC: %s: %s on device %s ep %p\n", + __func__, ASYNC_MSG(event->event), + event->device->name, context); if (ep->rep_connected == 1) { ep->rep_connected = -EIO; - ep->rep_func(ep); + rpcrdma_conn_func(ep); wake_up_all(&ep->rep_connect_wait); } } +static const char * const wc_status[] = { + "success", + "local length error", + "local QP operation error", + "local EE context operation error", + "local protection error", + "WR flushed", + "memory management operation error", + "bad response error", + "local access error", + "remote invalid request error", + "remote access error", + "remote operation error", + "transport retry counter exceeded", + "RNR retrycounter exceeded", + "local RDD violation error", + "remove invalid RD request", + "operation aborted", + "invalid EE context number", + "invalid EE context state", + "fatal error", + "response timeout error", + "general error", +}; + +#define COMPLETION_MSG(status) \ + ((status) < ARRAY_SIZE(wc_status) ? \ + wc_status[(status)] : "unexpected completion error") + static void rpcrdma_sendcq_process_wc(struct ib_wc *wc) { - struct rpcrdma_mw *frmr = (struct rpcrdma_mw *)(unsigned long)wc->wr_id; + if (likely(wc->status == IB_WC_SUCCESS)) + return; - dprintk("RPC: %s: frmr %p status %X opcode %d\n", - __func__, frmr, wc->status, wc->opcode); + /* WARNING: Only wr_id and status are reliable at this point */ + if (wc->wr_id == 0ULL) { + if (wc->status != IB_WC_WR_FLUSH_ERR) + pr_err("RPC: %s: SEND: %s\n", + __func__, COMPLETION_MSG(wc->status)); + } else { + struct rpcrdma_mw *r; - if (wc->wr_id == 0ULL) - return; - if (wc->status != IB_WC_SUCCESS) - frmr->r.frmr.fr_state = FRMR_IS_STALE; + r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id; + r->r.frmr.fr_state = FRMR_IS_STALE; + pr_err("RPC: %s: frmr %p (stale): %s\n", + __func__, r, COMPLETION_MSG(wc->status)); + } } static int @@@ -285,32 -208,33 +285,32 @@@ rpcrdma_recvcq_process_wc(struct ib_wc struct rpcrdma_rep *rep = (struct rpcrdma_rep *)(unsigned long)wc->wr_id; - dprintk("RPC: %s: rep %p status %X opcode %X length %u\n", - __func__, rep, wc->status, wc->opcode, wc->byte_len); + /* WARNING: Only wr_id and status are reliable at this point */ + if (wc->status != IB_WC_SUCCESS) + goto out_fail; - if (wc->status != IB_WC_SUCCESS) { - rep->rr_len = ~0U; - goto out_schedule; - } + /* status == SUCCESS means all fields in wc are trustworthy */ if (wc->opcode != IB_WC_RECV) return; + dprintk("RPC: %s: rep %p opcode 'recv', length %u: success\n", + __func__, rep, wc->byte_len); + rep->rr_len = wc->byte_len; ib_dma_sync_single_for_cpu(rdmab_to_ia(rep->rr_buffer)->ri_id->device, - rep->rr_iov.addr, rep->rr_len, DMA_FROM_DEVICE); - - if (rep->rr_len >= 16) { - struct rpcrdma_msg *p = (struct rpcrdma_msg *)rep->rr_base; - unsigned int credits = ntohl(p->rm_credit); - - if (credits == 0) - credits = 1; /* don't deadlock */ - else if (credits > rep->rr_buffer->rb_max_requests) - credits = rep->rr_buffer->rb_max_requests; - atomic_set(&rep->rr_buffer->rb_credits, credits); - } + rdmab_addr(rep->rr_rdmabuf), + rep->rr_len, DMA_FROM_DEVICE); + prefetch(rdmab_to_msg(rep->rr_rdmabuf)); out_schedule: list_add_tail(&rep->rr_list, sched_list); + return; +out_fail: + if (wc->status != IB_WC_WR_FLUSH_ERR) + pr_err("RPC: %s: rep %p: %s\n", + __func__, rep, COMPLETION_MSG(wc->status)); + rep->rr_len = ~0U; + goto out_schedule; } static int @@@ -319,6 -243,7 +319,6 @@@ rpcrdma_recvcq_poll(struct ib_cq *cq, s struct list_head sched_list; struct ib_wc *wcs; int budget, count, rc; - unsigned long flags; INIT_LIST_HEAD(&sched_list); budget = RPCRDMA_WC_BUDGET / RPCRDMA_POLLSIZE; @@@ -336,7 -261,10 +336,7 @@@ rc = 0; out_schedule: - spin_lock_irqsave(&rpcrdma_tk_lock_g, flags); - list_splice_tail(&sched_list, &rpcrdma_tasklets_g); - spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags); - tasklet_schedule(&rpcrdma_tasklet_g); + rpcrdma_schedule_tasklet(&sched_list); return rc; } @@@ -381,18 -309,11 +381,18 @@@ rpcrdma_recvcq_upcall(struct ib_cq *cq static void rpcrdma_flush_cqs(struct rpcrdma_ep *ep) { - rpcrdma_recvcq_upcall(ep->rep_attr.recv_cq, ep); - rpcrdma_sendcq_upcall(ep->rep_attr.send_cq, ep); + struct ib_wc wc; + LIST_HEAD(sched_list); + + while (ib_poll_cq(ep->rep_attr.recv_cq, 1, &wc) > 0) + rpcrdma_recvcq_process_wc(&wc, &sched_list); + if (!list_empty(&sched_list)) + rpcrdma_schedule_tasklet(&sched_list); + while (ib_poll_cq(ep->rep_attr.send_cq, 1, &wc) > 0) + rpcrdma_sendcq_process_wc(&wc); } -#ifdef RPC_DEBUG +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) static const char * const conn[] = { "address resolved", "address error", @@@ -423,11 -344,11 +423,11 @@@ rpcrdma_conn_upcall(struct rdma_cm_id * struct rpcrdma_xprt *xprt = id->context; struct rpcrdma_ia *ia = &xprt->rx_ia; struct rpcrdma_ep *ep = &xprt->rx_ep; -#ifdef RPC_DEBUG +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) struct sockaddr_in *addr = (struct sockaddr_in *) &ep->rep_remote_addr; #endif - struct ib_qp_attr attr; - struct ib_qp_init_attr iattr; + struct ib_qp_attr *attr = &ia->ri_qp_attr; + struct ib_qp_init_attr *iattr = &ia->ri_qp_init_attr; int connstate = 0; switch (event->event) { @@@ -450,13 -371,12 +450,13 @@@ break; case RDMA_CM_EVENT_ESTABLISHED: connstate = 1; - ib_query_qp(ia->ri_id->qp, &attr, - IB_QP_MAX_QP_RD_ATOMIC | IB_QP_MAX_DEST_RD_ATOMIC, - &iattr); + ib_query_qp(ia->ri_id->qp, attr, + IB_QP_MAX_QP_RD_ATOMIC | IB_QP_MAX_DEST_RD_ATOMIC, + iattr); dprintk("RPC: %s: %d responder resources" " (%d initiator)\n", - __func__, attr.max_dest_rd_atomic, attr.max_rd_atomic); + __func__, attr->max_dest_rd_atomic, + attr->max_rd_atomic); goto connected; case RDMA_CM_EVENT_CONNECT_ERROR: connstate = -ENOTCONN; @@@ -473,10 -393,11 +473,10 @@@ case RDMA_CM_EVENT_DEVICE_REMOVAL: connstate = -ENODEV; connected: - atomic_set(&rpcx_to_rdmax(ep->rep_xprt)->rx_buf.rb_credits, 1); dprintk("RPC: %s: %sconnected\n", __func__, connstate > 0 ? "" : "dis"); ep->rep_connected = connstate; - ep->rep_func(ep); + rpcrdma_conn_func(ep); wake_up_all(&ep->rep_connect_wait); /*FALLTHROUGH*/ default: @@@ -487,9 -408,9 +487,9 @@@ break; } -#ifdef RPC_DEBUG +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) if (connstate == 1) { - int ird = attr.max_dest_rd_atomic; + int ird = attr->max_dest_rd_atomic; int tird = ep->rep_remote_cma.responder_resources; printk(KERN_INFO "rpcrdma: connection to %pI4:%u " "on %s, memreg %d slots %d ird %d%s\n", @@@ -590,8 -511,8 +590,8 @@@ in rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) { int rc, mem_priv; - struct ib_device_attr devattr; struct rpcrdma_ia *ia = &xprt->rx_ia; + struct ib_device_attr *devattr = &ia->ri_devattr; ia->ri_id = rpcrdma_create_id(xprt, ia, addr); if (IS_ERR(ia->ri_id)) { @@@ -607,21 -528,26 +607,21 @@@ goto out2; } - /* - * Query the device to determine if the requested memory - * registration strategy is supported. If it isn't, set the - * strategy to a globally supported model. - */ - rc = ib_query_device(ia->ri_id->device, &devattr); + rc = ib_query_device(ia->ri_id->device, devattr); if (rc) { dprintk("RPC: %s: ib_query_device failed %d\n", __func__, rc); - goto out2; + goto out3; } - if (devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) { + if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) { ia->ri_have_dma_lkey = 1; ia->ri_dma_lkey = ia->ri_id->device->local_dma_lkey; } if (memreg == RPCRDMA_FRMR) { /* Requires both frmr reg and local dma lkey */ - if ((devattr.device_cap_flags & + if ((devattr->device_cap_flags & (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) != (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) { dprintk("RPC: %s: FRMR registration " @@@ -631,7 -557,7 +631,7 @@@ /* Mind the ia limit on FRMR page list depth */ ia->ri_max_frmr_depth = min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, - devattr.max_fast_reg_page_list_len); + devattr->max_fast_reg_page_list_len); } } if (memreg == RPCRDMA_MTHCAFMR) { @@@ -669,14 -595,14 +669,14 @@@ "phys register failed with %lX\n", __func__, PTR_ERR(ia->ri_bind_mem)); rc = -ENOMEM; - goto out2; + goto out3; } break; default: printk(KERN_ERR "RPC: Unsupported memory " "registration mode: %d\n", memreg); rc = -ENOMEM; - goto out2; + goto out3; } dprintk("RPC: %s: memory registration strategy is %d\n", __func__, memreg); @@@ -686,10 -612,6 +686,10 @@@ rwlock_init(&ia->ri_qplock); return 0; + +out3: + ib_dealloc_pd(ia->ri_pd); + ia->ri_pd = NULL; out2: rdma_destroy_id(ia->ri_id); ia->ri_id = NULL; @@@ -733,13 -655,20 +733,13 @@@ in rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, struct rpcrdma_create_data_internal *cdata) { - struct ib_device_attr devattr; + struct ib_device_attr *devattr = &ia->ri_devattr; struct ib_cq *sendcq, *recvcq; int rc, err; - rc = ib_query_device(ia->ri_id->device, &devattr); - if (rc) { - dprintk("RPC: %s: ib_query_device failed %d\n", - __func__, rc); - return rc; - } - /* check provider's send/recv wr limits */ - if (cdata->max_requests > devattr.max_qp_wr) - cdata->max_requests = devattr.max_qp_wr; + if (cdata->max_requests > devattr->max_qp_wr) + cdata->max_requests = devattr->max_qp_wr; ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall; ep->rep_attr.qp_context = ep; @@@ -774,8 -703,8 +774,8 @@@ } ep->rep_attr.cap.max_send_wr *= depth; - if (ep->rep_attr.cap.max_send_wr > devattr.max_qp_wr) { - cdata->max_requests = devattr.max_qp_wr / depth; + if (ep->rep_attr.cap.max_send_wr > devattr->max_qp_wr) { + cdata->max_requests = devattr->max_qp_wr / depth; if (!cdata->max_requests) return -EINVAL; ep->rep_attr.cap.max_send_wr = cdata->max_requests * @@@ -794,14 -723,6 +794,14 @@@ ep->rep_attr.qp_type = IB_QPT_RC; ep->rep_attr.port_num = ~0; + if (cdata->padding) { + ep->rep_padbuf = rpcrdma_alloc_regbuf(ia, cdata->padding, + GFP_KERNEL); + if (IS_ERR(ep->rep_padbuf)) + return PTR_ERR(ep->rep_padbuf); + } else + ep->rep_padbuf = NULL; + dprintk("RPC: %s: requested max: dtos: send %d recv %d; " "iovs: send %d recv %d\n", __func__, @@@ -812,11 -733,10 +812,11 @@@ /* set trigger for requesting send completion */ ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1; - if (ep->rep_cqinit <= 2) + if (ep->rep_cqinit > RPCRDMA_MAX_UNSIGNALED_SENDS) + ep->rep_cqinit = RPCRDMA_MAX_UNSIGNALED_SENDS; + else if (ep->rep_cqinit <= 2) ep->rep_cqinit = 0; INIT_CQCOUNT(ep); - ep->rep_ia = ia; init_waitqueue_head(&ep->rep_connect_wait); INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker); @@@ -866,11 -786,10 +866,11 @@@ /* Client offers RDMA Read but does not initiate */ ep->rep_remote_cma.initiator_depth = 0; - if (devattr.max_qp_rd_atom > 32) /* arbitrary but <= 255 */ + if (devattr->max_qp_rd_atom > 32) /* arbitrary but <= 255 */ ep->rep_remote_cma.responder_resources = 32; else - ep->rep_remote_cma.responder_resources = devattr.max_qp_rd_atom; + ep->rep_remote_cma.responder_resources = + devattr->max_qp_rd_atom; ep->rep_remote_cma.retry_count = 7; ep->rep_remote_cma.flow_control = 0; @@@ -884,7 -803,6 +884,7 @@@ out2 dprintk("RPC: %s: ib_destroy_cq returned %i\n", __func__, err); out1: + rpcrdma_free_regbuf(ia, ep->rep_padbuf); return rc; } @@@ -911,7 -829,11 +911,7 @@@ rpcrdma_ep_destroy(struct rpcrdma_ep *e ia->ri_id->qp = NULL; } - /* padding - could be done in rpcrdma_buffer_destroy... */ - if (ep->rep_pad_mr) { - rpcrdma_deregister_internal(ia, ep->rep_pad_mr, &ep->rep_pad); - ep->rep_pad_mr = NULL; - } + rpcrdma_free_regbuf(ia, ep->rep_padbuf); rpcrdma_clean_cq(ep->rep_attr.recv_cq); rc = ib_destroy_cq(ep->rep_attr.recv_cq); @@@ -944,19 -866,8 +944,19 @@@ retry rpcrdma_ep_disconnect(ep, ia); rpcrdma_flush_cqs(ep); - if (ia->ri_memreg_strategy == RPCRDMA_FRMR) + switch (ia->ri_memreg_strategy) { + case RPCRDMA_FRMR: rpcrdma_reset_frmrs(ia); + break; + case RPCRDMA_MTHCAFMR: + rpcrdma_reset_fmrs(ia); + break; + case RPCRDMA_ALLPHYSICAL: + break; + default: + rc = -EIO; + goto out; + } xprt = container_of(ia, struct rpcrdma_xprt, rx_ia); id = rpcrdma_create_id(xprt, ia, @@@ -1081,48 -992,6 +1081,48 @@@ rpcrdma_ep_disconnect(struct rpcrdma_e } } +static struct rpcrdma_req * +rpcrdma_create_req(struct rpcrdma_xprt *r_xprt) +{ + struct rpcrdma_req *req; + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (req == NULL) + return ERR_PTR(-ENOMEM); + + req->rl_buffer = &r_xprt->rx_buf; + return req; +} + +static struct rpcrdma_rep * +rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt) +{ + struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; + struct rpcrdma_ia *ia = &r_xprt->rx_ia; + struct rpcrdma_rep *rep; + int rc; + + rc = -ENOMEM; + rep = kzalloc(sizeof(*rep), GFP_KERNEL); + if (rep == NULL) + goto out; + + rep->rr_rdmabuf = rpcrdma_alloc_regbuf(ia, cdata->inline_rsize, + GFP_KERNEL); + if (IS_ERR(rep->rr_rdmabuf)) { + rc = PTR_ERR(rep->rr_rdmabuf); + goto out_free; + } + + rep->rr_buffer = &r_xprt->rx_buf; + return rep; + +out_free: + kfree(rep); +out: + return ERR_PTR(rc); +} + static int rpcrdma_init_fmrs(struct rpcrdma_ia *ia, struct rpcrdma_buffer *buf) { @@@ -1136,7 -1005,7 +1136,7 @@@ int i, rc; i = (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS; - dprintk("RPC: %s: initalizing %d FMRs\n", __func__, i); + dprintk("RPC: %s: initializing %d FMRs\n", __func__, i); while (i--) { r = kzalloc(sizeof(*r), GFP_KERNEL); @@@ -1169,7 -1038,7 +1169,7 @@@ rpcrdma_init_frmrs(struct rpcrdma_ia *i int i, rc; i = (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS; - dprintk("RPC: %s: initalizing %d FRMRs\n", __func__, i); + dprintk("RPC: %s: initializing %d FRMRs\n", __func__, i); while (i--) { r = kzalloc(sizeof(*r), GFP_KERNEL); @@@ -1209,26 -1078,27 +1209,26 @@@ out_free } int -rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep, - struct rpcrdma_ia *ia, struct rpcrdma_create_data_internal *cdata) +rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt) { + struct rpcrdma_buffer *buf = &r_xprt->rx_buf; + struct rpcrdma_ia *ia = &r_xprt->rx_ia; + struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; char *p; - size_t len, rlen, wlen; + size_t len; int i, rc; buf->rb_max_requests = cdata->max_requests; spin_lock_init(&buf->rb_lock); - atomic_set(&buf->rb_credits, 1); /* Need to allocate: * 1. arrays for send and recv pointers * 2. arrays of struct rpcrdma_req to fill in pointers * 3. array of struct rpcrdma_rep for replies - * 4. padding, if any * Send/recv buffers in req/rep need to be registered */ len = buf->rb_max_requests * (sizeof(struct rpcrdma_req *) + sizeof(struct rpcrdma_rep *)); - len += cdata->padding; p = kzalloc(len, GFP_KERNEL); if (p == NULL) { @@@ -1244,6 -1114,17 +1244,6 @@@ buf->rb_recv_bufs = (struct rpcrdma_rep **) p; p = (char *) &buf->rb_recv_bufs[buf->rb_max_requests]; - /* - * Register the zeroed pad buffer, if any. - */ - if (cdata->padding) { - rc = rpcrdma_register_internal(ia, p, cdata->padding, - &ep->rep_pad_mr, &ep->rep_pad); - if (rc) - goto out; - } - p += cdata->padding; - INIT_LIST_HEAD(&buf->rb_mws); INIT_LIST_HEAD(&buf->rb_all); switch (ia->ri_memreg_strategy) { @@@ -1261,56 -1142,68 +1261,56 @@@ break; } - /* - * Allocate/init the request/reply buffers. Doing this - * using kmalloc for now -- one for each buf. - */ - wlen = 1 << fls(cdata->inline_wsize + sizeof(struct rpcrdma_req)); - rlen = 1 << fls(cdata->inline_rsize + sizeof(struct rpcrdma_rep)); - dprintk("RPC: %s: wlen = %zu, rlen = %zu\n", - __func__, wlen, rlen); - for (i = 0; i < buf->rb_max_requests; i++) { struct rpcrdma_req *req; struct rpcrdma_rep *rep; - req = kmalloc(wlen, GFP_KERNEL); - if (req == NULL) { + req = rpcrdma_create_req(r_xprt); + if (IS_ERR(req)) { dprintk("RPC: %s: request buffer %d alloc" " failed\n", __func__, i); - rc = -ENOMEM; + rc = PTR_ERR(req); goto out; } - memset(req, 0, sizeof(struct rpcrdma_req)); buf->rb_send_bufs[i] = req; - buf->rb_send_bufs[i]->rl_buffer = buf; - - rc = rpcrdma_register_internal(ia, req->rl_base, - wlen - offsetof(struct rpcrdma_req, rl_base), - &buf->rb_send_bufs[i]->rl_handle, - &buf->rb_send_bufs[i]->rl_iov); - if (rc) - goto out; - buf->rb_send_bufs[i]->rl_size = wlen - - sizeof(struct rpcrdma_req); - - rep = kmalloc(rlen, GFP_KERNEL); - if (rep == NULL) { + rep = rpcrdma_create_rep(r_xprt); + if (IS_ERR(rep)) { dprintk("RPC: %s: reply buffer %d alloc failed\n", __func__, i); - rc = -ENOMEM; + rc = PTR_ERR(rep); goto out; } - memset(rep, 0, sizeof(struct rpcrdma_rep)); buf->rb_recv_bufs[i] = rep; - buf->rb_recv_bufs[i]->rr_buffer = buf; - - rc = rpcrdma_register_internal(ia, rep->rr_base, - rlen - offsetof(struct rpcrdma_rep, rr_base), - &buf->rb_recv_bufs[i]->rr_handle, - &buf->rb_recv_bufs[i]->rr_iov); - if (rc) - goto out; - } - dprintk("RPC: %s: max_requests %d\n", - __func__, buf->rb_max_requests); - /* done */ + return 0; out: rpcrdma_buffer_destroy(buf); return rc; } +static void +rpcrdma_destroy_rep(struct rpcrdma_ia *ia, struct rpcrdma_rep *rep) +{ + if (!rep) + return; + + rpcrdma_free_regbuf(ia, rep->rr_rdmabuf); + kfree(rep); +} + +static void +rpcrdma_destroy_req(struct rpcrdma_ia *ia, struct rpcrdma_req *req) +{ + if (!req) + return; + + rpcrdma_free_regbuf(ia, req->rl_sendbuf); + rpcrdma_free_regbuf(ia, req->rl_rdmabuf); + kfree(req); +} + static void rpcrdma_destroy_fmrs(struct rpcrdma_buffer *buf) { @@@ -1366,10 -1259,18 +1366,10 @@@ rpcrdma_buffer_destroy(struct rpcrdma_b dprintk("RPC: %s: entering\n", __func__); for (i = 0; i < buf->rb_max_requests; i++) { - if (buf->rb_recv_bufs && buf->rb_recv_bufs[i]) { - rpcrdma_deregister_internal(ia, - buf->rb_recv_bufs[i]->rr_handle, - &buf->rb_recv_bufs[i]->rr_iov); - kfree(buf->rb_recv_bufs[i]); - } - if (buf->rb_send_bufs && buf->rb_send_bufs[i]) { - rpcrdma_deregister_internal(ia, - buf->rb_send_bufs[i]->rl_handle, - &buf->rb_send_bufs[i]->rl_iov); - kfree(buf->rb_send_bufs[i]); - } + if (buf->rb_recv_bufs) + rpcrdma_destroy_rep(ia, buf->rb_recv_bufs[i]); + if (buf->rb_send_bufs) + rpcrdma_destroy_req(ia, buf->rb_send_bufs[i]); } switch (ia->ri_memreg_strategy) { @@@ -1386,34 -1287,6 +1386,34 @@@ kfree(buf->rb_pool); } +/* After a disconnect, unmap all FMRs. + * + * This is invoked only in the transport connect worker in order + * to serialize with rpcrdma_register_fmr_external(). + */ +static void +rpcrdma_reset_fmrs(struct rpcrdma_ia *ia) +{ + struct rpcrdma_xprt *r_xprt = + container_of(ia, struct rpcrdma_xprt, rx_ia); + struct rpcrdma_buffer *buf = &r_xprt->rx_buf; + struct list_head *pos; + struct rpcrdma_mw *r; + LIST_HEAD(l); + int rc; + + list_for_each(pos, &buf->rb_all) { + r = list_entry(pos, struct rpcrdma_mw, mw_all); + + INIT_LIST_HEAD(&l); + list_add(&r->r.fmr->list, &l); + rc = ib_unmap_fmr(&l); + if (rc) + dprintk("RPC: %s: ib_unmap_fmr failed %i\n", + __func__, rc); + } +} + /* After a disconnect, a flushed FAST_REG_MR can leave an FRMR in * an unusable state. Find FRMRs in this state and dereg / reg * each. FRMRs that are VALID and attached to an rpcrdma_req are @@@ -1493,8 -1366,8 +1493,8 @@@ rpcrdma_buffer_put_mrs(struct rpcrdma_r int i; for (i = 1, seg++; i < RPCRDMA_MAX_SEGS; seg++, i++) - rpcrdma_buffer_put_mr(&seg->mr_chunk.rl_mw, buf); - rpcrdma_buffer_put_mr(&seg1->mr_chunk.rl_mw, buf); + rpcrdma_buffer_put_mr(&seg->rl_mw, buf); + rpcrdma_buffer_put_mr(&seg1->rl_mw, buf); } static void @@@ -1580,7 -1453,7 +1580,7 @@@ rpcrdma_buffer_get_frmrs(struct rpcrdma list_add(&r->mw_list, stale); continue; } - req->rl_segments[i].mr_chunk.rl_mw = r; + req->rl_segments[i].rl_mw = r; if (unlikely(i-- == 0)) return req; /* Success */ } @@@ -1602,7 -1475,7 +1602,7 @@@ rpcrdma_buffer_get_fmrs(struct rpcrdma_ r = list_entry(buf->rb_mws.next, struct rpcrdma_mw, mw_list); list_del(&r->mw_list); - req->rl_segments[i].mr_chunk.rl_mw = r; + req->rl_segments[i].rl_mw = r; if (unlikely(i-- == 0)) return req; /* Success */ } @@@ -1701,6 -1574,8 +1701,6 @@@ rpcrdma_recv_buffer_get(struct rpcrdma_ struct rpcrdma_buffer *buffers = req->rl_buffer; unsigned long flags; - if (req->rl_iov.length == 0) /* special case xprt_rdma_allocate() */ - buffers = ((struct rpcrdma_req *) buffers)->rl_buffer; spin_lock_irqsave(&buffers->rb_lock, flags); if (buffers->rb_recv_index < buffers->rb_max_requests) { req->rl_reply = buffers->rb_recv_bufs[buffers->rb_recv_index]; @@@ -1729,7 -1604,7 +1729,7 @@@ rpcrdma_recv_buffer_put(struct rpcrdma_ * Wrappers for internal-use kmalloc memory registration, used by buffer code. */ -int +static int rpcrdma_register_internal(struct rpcrdma_ia *ia, void *va, int len, struct ib_mr **mrp, struct ib_sge *iov) { @@@ -1780,7 -1655,7 +1780,7 @@@ return rc; } -int +static int rpcrdma_deregister_internal(struct rpcrdma_ia *ia, struct ib_mr *mr, struct ib_sge *iov) { @@@ -1798,61 -1673,6 +1798,61 @@@ return rc; } +/** + * rpcrdma_alloc_regbuf - kmalloc and register memory for SEND/RECV buffers + * @ia: controlling rpcrdma_ia + * @size: size of buffer to be allocated, in bytes + * @flags: GFP flags + * + * Returns pointer to private header of an area of internally + * registered memory, or an ERR_PTR. The registered buffer follows + * the end of the private header. + * + * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for + * receiving the payload of RDMA RECV operations. regbufs are not + * used for RDMA READ/WRITE operations, thus are registered only for + * LOCAL access. + */ +struct rpcrdma_regbuf * +rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags) +{ + struct rpcrdma_regbuf *rb; + int rc; + + rc = -ENOMEM; + rb = kmalloc(sizeof(*rb) + size, flags); + if (rb == NULL) + goto out; + + rb->rg_size = size; + rb->rg_owner = NULL; + rc = rpcrdma_register_internal(ia, rb->rg_base, size, + &rb->rg_mr, &rb->rg_iov); + if (rc) + goto out_free; + + return rb; + +out_free: + kfree(rb); +out: + return ERR_PTR(rc); +} + +/** + * rpcrdma_free_regbuf - deregister and free registered buffer + * @ia: controlling rpcrdma_ia + * @rb: regbuf to be deregistered and freed + */ +void +rpcrdma_free_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb) +{ + if (rb) { + rpcrdma_deregister_internal(ia, rb->rg_mr, &rb->rg_iov); + kfree(rb); + } +} + /* * Wrappers for chunk registration, shared by read/write chunk code. */ @@@ -1895,7 -1715,7 +1895,7 @@@ rpcrdma_register_frmr_external(struct r struct rpcrdma_xprt *r_xprt) { struct rpcrdma_mr_seg *seg1 = seg; - struct rpcrdma_mw *mw = seg1->mr_chunk.rl_mw; + struct rpcrdma_mw *mw = seg1->rl_mw; struct rpcrdma_frmr *frmr = &mw->r.frmr; struct ib_mr *mr = frmr->fr_mr; struct ib_send_wr fastreg_wr, *bad_wr; @@@ -1984,12 -1804,12 +1984,12 @@@ rpcrdma_deregister_frmr_external(struc struct ib_send_wr invalidate_wr, *bad_wr; int rc; - seg1->mr_chunk.rl_mw->r.frmr.fr_state = FRMR_IS_INVALID; + seg1->rl_mw->r.frmr.fr_state = FRMR_IS_INVALID; memset(&invalidate_wr, 0, sizeof invalidate_wr); - invalidate_wr.wr_id = (unsigned long)(void *)seg1->mr_chunk.rl_mw; + invalidate_wr.wr_id = (unsigned long)(void *)seg1->rl_mw; invalidate_wr.opcode = IB_WR_LOCAL_INV; - invalidate_wr.ex.invalidate_rkey = seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey; + invalidate_wr.ex.invalidate_rkey = seg1->rl_mw->r.frmr.fr_mr->rkey; DECR_CQCOUNT(&r_xprt->rx_ep); read_lock(&ia->ri_qplock); @@@ -1999,7 -1819,7 +1999,7 @@@ read_unlock(&ia->ri_qplock); if (rc) { /* Force rpcrdma_buffer_get() to retry */ - seg1->mr_chunk.rl_mw->r.frmr.fr_state = FRMR_IS_STALE; + seg1->rl_mw->r.frmr.fr_state = FRMR_IS_STALE; dprintk("RPC: %s: failed ib_post_send for invalidate," " status %i\n", __func__, rc); } @@@ -2031,7 -1851,8 +2031,7 @@@ rpcrdma_register_fmr_external(struct rp offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) break; } - rc = ib_map_phys_fmr(seg1->mr_chunk.rl_mw->r.fmr, - physaddrs, i, seg1->mr_dma); + rc = ib_map_phys_fmr(seg1->rl_mw->r.fmr, physaddrs, i, seg1->mr_dma); if (rc) { dprintk("RPC: %s: failed ib_map_phys_fmr " "%u@0x%llx+%i (%d)... status %i\n", __func__, @@@ -2040,7 -1861,7 +2040,7 @@@ while (i--) rpcrdma_unmap_one(ia, --seg); } else { - seg1->mr_rkey = seg1->mr_chunk.rl_mw->r.fmr->rkey; + seg1->mr_rkey = seg1->rl_mw->r.fmr->rkey; seg1->mr_base = seg1->mr_dma + pageoff; seg1->mr_nsegs = i; seg1->mr_len = len; @@@ -2057,7 -1878,7 +2057,7 @@@ rpcrdma_deregister_fmr_external(struct LIST_HEAD(l); int rc; - list_add(&seg1->mr_chunk.rl_mw->r.fmr->list, &l); + list_add(&seg1->rl_mw->r.fmr->list, &l); rc = ib_unmap_fmr(&l); read_lock(&ia->ri_qplock); while (seg1->mr_nsegs--) @@@ -2097,10 -1918,10 +2097,10 @@@ rpcrdma_register_external(struct rpcrdm break; default: - return -1; + return -EIO; } if (rc) - return -1; + return rc; return nsegs; } @@@ -2199,13 -2020,11 +2199,13 @@@ rpcrdma_ep_post_recv(struct rpcrdma_ia recv_wr.next = NULL; recv_wr.wr_id = (u64) (unsigned long) rep; - recv_wr.sg_list = &rep->rr_iov; + recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov; recv_wr.num_sge = 1; ib_dma_sync_single_for_cpu(ia->ri_id->device, - rep->rr_iov.addr, rep->rr_iov.length, DMA_BIDIRECTIONAL); + rdmab_addr(rep->rr_rdmabuf), + rdmab_length(rep->rr_rdmabuf), + DMA_BIDIRECTIONAL); rc = ib_post_recv(ia->ri_id->qp, &recv_wr, &recv_wr_fail); diff --combined sound/soc/soc-core.c index e5c990889dcc,69e655e1ce11..10f0886e78ec --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@@ -34,6 -34,9 +34,6 @@@ #include #include #include -#include -#include -#include #include #include #include @@@ -66,6 -69,16 +66,6 @@@ static int pmdown_time = 5000 module_param(pmdown_time, int, 0); MODULE_PARM_DESC(pmdown_time, "DAPM stream powerdown time (msecs)"); -struct snd_ac97_reset_cfg { - struct pinctrl *pctl; - struct pinctrl_state *pstate_reset; - struct pinctrl_state *pstate_warm_reset; - struct pinctrl_state *pstate_run; - int gpio_sdata; - int gpio_sync; - int gpio_reset; -}; - /* returns the minimum number of bytes needed to represent * a particular given value */ static int min_bytes_needed(unsigned long val) @@@ -191,39 -204,6 +191,39 @@@ static ssize_t pmdown_time_set(struct d static DEVICE_ATTR(pmdown_time, 0644, pmdown_time_show, pmdown_time_set); +static struct attribute *soc_dev_attrs[] = { + &dev_attr_codec_reg.attr, + &dev_attr_pmdown_time.attr, + NULL +}; + +static umode_t soc_dev_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int idx) +{ + struct device *dev = kobj_to_dev(kobj); + struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev); + + if (attr == &dev_attr_pmdown_time.attr) + return attr->mode; /* always visible */ + return rtd->codec ? attr->mode : 0; /* enabled only with codec */ +} + +static const struct attribute_group soc_dapm_dev_group = { + .attrs = soc_dapm_dev_attrs, + .is_visible = soc_dev_attr_is_visible, +}; + +static const struct attribute_group soc_dev_roup = { + .attrs = soc_dev_attrs, + .is_visible = soc_dev_attr_is_visible, +}; + +static const struct attribute_group *soc_dev_attr_groups[] = { + &soc_dapm_dev_group, + &soc_dev_roup, + NULL +}; + #ifdef CONFIG_DEBUG_FS static ssize_t codec_reg_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@@ -329,6 -309,9 +329,6 @@@ static void soc_init_codec_debugfs(stru { struct snd_soc_codec *codec = snd_soc_component_to_codec(component); - debugfs_create_bool("cache_sync", 0444, codec->component.debugfs_root, - &codec->cache_sync); - codec->debugfs_reg = debugfs_create_file("codec_reg", 0644, codec->component.debugfs_root, codec, &codec_reg_fops); @@@ -347,8 -330,6 +347,8 @@@ static ssize_t codec_list_read_file(str if (!buf) return -ENOMEM; + mutex_lock(&client_mutex); + list_for_each_entry(codec, &codec_list, list) { len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", codec->component.name); @@@ -360,8 -341,6 +360,8 @@@ } } + mutex_unlock(&client_mutex); + if (ret >= 0) ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); @@@ -386,8 -365,6 +386,8 @@@ static ssize_t dai_list_read_file(struc if (!buf) return -ENOMEM; + mutex_lock(&client_mutex); + list_for_each_entry(component, &component_list, list) { list_for_each_entry(dai, &component->dai_list, list) { len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", @@@ -401,8 -378,6 +401,8 @@@ } } + mutex_unlock(&client_mutex); + ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); kfree(buf); @@@ -426,8 -401,6 +426,8 @@@ static ssize_t platform_list_read_file( if (!buf) return -ENOMEM; + mutex_lock(&client_mutex); + list_for_each_entry(platform, &platform_list, list) { len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", platform->component.name); @@@ -439,8 -412,6 +439,8 @@@ } } + mutex_unlock(&client_mutex); + ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); kfree(buf); @@@ -528,6 -499,40 +528,6 @@@ struct snd_soc_pcm_runtime *snd_soc_get } EXPORT_SYMBOL_GPL(snd_soc_get_pcm_runtime); -#ifdef CONFIG_SND_SOC_AC97_BUS -/* unregister ac97 codec */ -static int soc_ac97_dev_unregister(struct snd_soc_codec *codec) -{ - if (codec->ac97->dev.bus) - device_unregister(&codec->ac97->dev); - return 0; -} - -/* stop no dev release warning */ -static void soc_ac97_device_release(struct device *dev){} - -/* register ac97 codec to bus */ -static int soc_ac97_dev_register(struct snd_soc_codec *codec) -{ - int err; - - codec->ac97->dev.bus = &ac97_bus_type; - codec->ac97->dev.parent = codec->component.card->dev; - codec->ac97->dev.release = soc_ac97_device_release; - - dev_set_name(&codec->ac97->dev, "%d-%d:%s", - codec->component.card->snd_card->number, 0, - codec->component.name); - err = device_register(&codec->ac97->dev); - if (err < 0) { - dev_err(codec->dev, "ASoC: Can't register ac97 bus\n"); - codec->ac97->dev.bus = NULL; - return err; - } - return 0; -} -#endif - static void codec2codec_close_delayed_work(struct work_struct *work) { /* Currently nothing to do for c2c links @@@ -587,12 -592,17 +587,12 @@@ int snd_soc_suspend(struct device *dev for (i = 0; i < card->num_rtd; i++) { struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai; - struct snd_soc_platform *platform = card->rtd[i].platform; if (card->rtd[i].dai_link->ignore_suspend) continue; - if (cpu_dai->driver->suspend && !cpu_dai->driver->ac97_control) + if (cpu_dai->driver->suspend && !cpu_dai->driver->bus_control) cpu_dai->driver->suspend(cpu_dai); - if (platform->driver->suspend && !platform->suspended) { - platform->driver->suspend(cpu_dai); - platform->suspended = 1; - } } /* close any waiting streams and save state */ @@@ -619,8 -629,8 +619,8 @@@ SND_SOC_DAPM_STREAM_SUSPEND); } - /* Recheck all analogue paths too */ - dapm_mark_io_dirty(&card->dapm); + /* Recheck all endpoints too, their state is affected by suspend */ + dapm_mark_endpoints_dirty(card); snd_soc_dapm_sync(&card->dapm); /* suspend all CODECs */ @@@ -646,6 -656,7 +646,6 @@@ if (codec->driver->suspend) codec->driver->suspend(codec); codec->suspended = 1; - codec->cache_sync = 1; if (codec->component.regmap) regcache_mark_dirty(codec->component.regmap); /* deactivate pins to sleep state */ @@@ -665,7 -676,7 +665,7 @@@ if (card->rtd[i].dai_link->ignore_suspend) continue; - if (cpu_dai->driver->suspend && cpu_dai->driver->ac97_control) + if (cpu_dai->driver->suspend && cpu_dai->driver->bus_control) cpu_dai->driver->suspend(cpu_dai); /* deactivate pins to sleep state */ @@@ -701,14 -712,14 +701,14 @@@ static void soc_resume_deferred(struct if (card->resume_pre) card->resume_pre(card); - /* resume AC97 DAIs */ + /* resume control bus DAIs */ for (i = 0; i < card->num_rtd; i++) { struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai; if (card->rtd[i].dai_link->ignore_suspend) continue; - if (cpu_dai->driver->resume && cpu_dai->driver->ac97_control) + if (cpu_dai->driver->resume && cpu_dai->driver->bus_control) cpu_dai->driver->resume(cpu_dai); } @@@ -764,12 -775,17 +764,12 @@@ for (i = 0; i < card->num_rtd; i++) { struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai; - struct snd_soc_platform *platform = card->rtd[i].platform; if (card->rtd[i].dai_link->ignore_suspend) continue; - if (cpu_dai->driver->resume && !cpu_dai->driver->ac97_control) + if (cpu_dai->driver->resume && !cpu_dai->driver->bus_control) cpu_dai->driver->resume(cpu_dai); - if (platform->driver->resume && platform->suspended) { - platform->driver->resume(cpu_dai); - platform->suspended = 0; - } } if (card->resume_post) @@@ -780,8 -796,8 +780,8 @@@ /* userspace can access us now we are back as we were before */ snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D0); - /* Recheck all analogue paths too */ - dapm_mark_io_dirty(&card->dapm); + /* Recheck all endpoints too, their state is affected by suspend */ + dapm_mark_endpoints_dirty(card); snd_soc_dapm_sync(&card->dapm); } @@@ -789,8 -805,7 +789,8 @@@ int snd_soc_resume(struct device *dev) { struct snd_soc_card *card = dev_get_drvdata(dev); - int i, ac97_control = 0; + bool bus_control = false; + int i; /* If the card is not initialized yet there is nothing to do */ if (!card->instantiated) @@@ -813,18 -828,17 +813,18 @@@ } } - /* AC97 devices might have other drivers hanging off them so - * need to resume immediately. Other drivers don't have that - * problem and may take a substantial amount of time to resume + /* + * DAIs that also act as the control bus master might have other drivers + * hanging off them so need to resume immediately. Other drivers don't + * have that problem and may take a substantial amount of time to resume * due to I/O costs and anti-pop so handle them out of line. */ for (i = 0; i < card->num_rtd; i++) { struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai; - ac97_control |= cpu_dai->driver->ac97_control; + bus_control |= cpu_dai->driver->bus_control; } - if (ac97_control) { - dev_dbg(dev, "ASoC: Resuming AC97 immediately\n"); + if (bus_control) { + dev_dbg(dev, "ASoC: Resuming control bus master immediately\n"); soc_resume_deferred(&card->deferred_resume_work); } else { dev_dbg(dev, "ASoC: Scheduling resume work\n"); @@@ -848,8 -862,6 +848,8 @@@ static struct snd_soc_component *soc_fi { struct snd_soc_component *component; + lockdep_assert_held(&client_mutex); + list_for_each_entry(component, &component_list, list) { if (of_node) { if (component->dev->of_node == of_node) @@@ -868,13 -880,11 +868,13 @@@ static struct snd_soc_dai *snd_soc_find struct snd_soc_component *component; struct snd_soc_dai *dai; + lockdep_assert_held(&client_mutex); + /* Find CPU DAI from registered DAIs*/ list_for_each_entry(component, &component_list, list) { if (dlc->of_node && component->dev->of_node != dlc->of_node) continue; - if (dlc->name && strcmp(dev_name(component->dev), dlc->name)) + if (dlc->name && strcmp(component->name, dlc->name)) continue; list_for_each_entry(dai, &component->dai_list, list) { if (dlc->dai_name && strcmp(dai->name, dlc->dai_name)) @@@ -998,6 -1008,8 +998,6 @@@ static void soc_remove_link_dais(struc /* unregister the rtd device */ if (rtd->dev_registered) { - device_remove_file(rtd->dev, &dev_attr_pmdown_time); - device_remove_file(rtd->dev, &dev_attr_codec_reg); device_unregister(rtd->dev); rtd->dev_registered = 0; } @@@ -1167,7 -1179,6 +1167,7 @@@ static int soc_post_component_init(stru device_initialize(rtd->dev); rtd->dev->parent = rtd->card->dev; rtd->dev->release = rtd_release; + rtd->dev->groups = soc_dev_attr_groups; dev_set_name(rtd->dev, "%s", name); dev_set_drvdata(rtd->dev, rtd); mutex_init(&rtd->pcm_mutex); @@@ -1184,6 -1195,23 +1184,6 @@@ return ret; } rtd->dev_registered = 1; - - if (rtd->codec) { - /* add DAPM sysfs entries for this codec */ - ret = snd_soc_dapm_sys_add(rtd->dev); - if (ret < 0) - dev_err(rtd->dev, - "ASoC: failed to add codec dapm sysfs entries: %d\n", - ret); - - /* add codec sysfs entries */ - ret = device_create_file(rtd->dev, &dev_attr_codec_reg); - if (ret < 0) - dev_err(rtd->dev, - "ASoC: failed to add codec sysfs files: %d\n", - ret); - } - return 0; } @@@ -1223,22 -1251,25 +1223,22 @@@ static int soc_probe_link_components(st return 0; } -static int soc_probe_codec_dai(struct snd_soc_card *card, - struct snd_soc_dai *codec_dai, - int order) +static int soc_probe_dai(struct snd_soc_dai *dai, int order) { int ret; - if (!codec_dai->probed && codec_dai->driver->probe_order == order) { - if (codec_dai->driver->probe) { - ret = codec_dai->driver->probe(codec_dai); + if (!dai->probed && dai->driver->probe_order == order) { + if (dai->driver->probe) { + ret = dai->driver->probe(dai); if (ret < 0) { - dev_err(codec_dai->dev, - "ASoC: failed to probe CODEC DAI %s: %d\n", - codec_dai->name, ret); + dev_err(dai->dev, + "ASoC: failed to probe DAI %s: %d\n", + dai->name, ret); return ret; } } - /* mark codec_dai as probed and add to card dai list */ - codec_dai->probed = 1; + dai->probed = 1; } return 0; @@@ -1288,22 -1319,40 +1288,22 @@@ static int soc_probe_link_dais(struct s { struct snd_soc_dai_link *dai_link = &card->dai_link[num]; struct snd_soc_pcm_runtime *rtd = &card->rtd[num]; - struct snd_soc_platform *platform = rtd->platform; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; int i, ret; dev_dbg(card->dev, "ASoC: probe %s dai link %d late %d\n", card->name, num, order); - /* config components */ - cpu_dai->platform = platform; - cpu_dai->card = card; - for (i = 0; i < rtd->num_codecs; i++) - rtd->codec_dais[i]->card = card; - /* set default power off timeout */ rtd->pmdown_time = pmdown_time; - /* probe the cpu_dai */ - if (!cpu_dai->probed && - cpu_dai->driver->probe_order == order) { - if (cpu_dai->driver->probe) { - ret = cpu_dai->driver->probe(cpu_dai); - if (ret < 0) { - dev_err(cpu_dai->dev, - "ASoC: failed to probe CPU DAI %s: %d\n", - cpu_dai->name, ret); - return ret; - } - } - cpu_dai->probed = 1; - } + ret = soc_probe_dai(cpu_dai, order); + if (ret) + return ret; /* probe the CODEC DAI */ for (i = 0; i < rtd->num_codecs; i++) { - ret = soc_probe_codec_dai(card, rtd->codec_dais[i], order); + ret = soc_probe_dai(rtd->codec_dais[i], order); if (ret) return ret; } @@@ -1339,6 -1388,11 +1339,6 @@@ } #endif - ret = device_create_file(rtd->dev, &dev_attr_pmdown_time); - if (ret < 0) - dev_warn(rtd->dev, "ASoC: failed to add pmdown_time sysfs: %d\n", - ret); - if (cpu_dai->driver->compress_dai) { /*create compress_device"*/ ret = soc_new_compress(rtd, num); @@@ -1368,9 -1422,84 +1368,9 @@@ } } - /* add platform data for AC97 devices */ - for (i = 0; i < rtd->num_codecs; i++) { - if (rtd->codec_dais[i]->driver->ac97_control) - snd_ac97_dev_add_pdata(rtd->codec_dais[i]->codec->ac97, - rtd->cpu_dai->ac97_pdata); - } - - return 0; -} - -#ifdef CONFIG_SND_SOC_AC97_BUS -static int soc_register_ac97_codec(struct snd_soc_codec *codec, - struct snd_soc_dai *codec_dai) -{ - int ret; - - /* Only instantiate AC97 if not already done by the adaptor - * for the generic AC97 subsystem. - */ - if (codec_dai->driver->ac97_control && !codec->ac97_registered) { - /* - * It is possible that the AC97 device is already registered to - * the device subsystem. This happens when the device is created - * via snd_ac97_mixer(). Currently only SoC codec that does so - * is the generic AC97 glue but others migh emerge. - * - * In those cases we don't try to register the device again. - */ - if (!codec->ac97_created) - return 0; - - ret = soc_ac97_dev_register(codec); - if (ret < 0) { - dev_err(codec->dev, - "ASoC: AC97 device register failed: %d\n", ret); - return ret; - } - - codec->ac97_registered = 1; - } - return 0; -} - -static void soc_unregister_ac97_codec(struct snd_soc_codec *codec) -{ - if (codec->ac97_registered) { - soc_ac97_dev_unregister(codec); - codec->ac97_registered = 0; - } -} - -static int soc_register_ac97_dai_link(struct snd_soc_pcm_runtime *rtd) -{ - int i, ret; - - for (i = 0; i < rtd->num_codecs; i++) { - struct snd_soc_dai *codec_dai = rtd->codec_dais[i]; - - ret = soc_register_ac97_codec(codec_dai->codec, codec_dai); - if (ret) { - while (--i >= 0) - soc_unregister_ac97_codec(codec_dai->codec); - return ret; - } - } - return 0; } -static void soc_unregister_ac97_dai_link(struct snd_soc_pcm_runtime *rtd) -{ - int i; - - for (i = 0; i < rtd->num_codecs; i++) - soc_unregister_ac97_codec(rtd->codec_dais[i]->codec); -} -#endif - static int soc_bind_aux_dev(struct snd_soc_card *card, int num) { struct snd_soc_pcm_runtime *rtd = &card->rtd_aux[num]; @@@ -1453,78 -1582,12 +1453,78 @@@ static int snd_soc_init_codec_cache(str return 0; } +/** + * snd_soc_runtime_set_dai_fmt() - Change DAI link format for a ASoC runtime + * @rtd: The runtime for which the DAI link format should be changed + * @dai_fmt: The new DAI link format + * + * This function updates the DAI link format for all DAIs connected to the DAI + * link for the specified runtime. + * + * Note: For setups with a static format set the dai_fmt field in the + * corresponding snd_dai_link struct instead of using this function. + * + * Returns 0 on success, otherwise a negative error code. + */ +int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd, + unsigned int dai_fmt) +{ + struct snd_soc_dai **codec_dais = rtd->codec_dais; + struct snd_soc_dai *cpu_dai = rtd->cpu_dai; + unsigned int i; + int ret; + + for (i = 0; i < rtd->num_codecs; i++) { + struct snd_soc_dai *codec_dai = codec_dais[i]; + + ret = snd_soc_dai_set_fmt(codec_dai, dai_fmt); + if (ret != 0 && ret != -ENOTSUPP) { + dev_warn(codec_dai->dev, + "ASoC: Failed to set DAI format: %d\n", ret); + return ret; + } + } + + /* Flip the polarity for the "CPU" end of a CODEC<->CODEC link */ + if (cpu_dai->codec) { + unsigned int inv_dai_fmt; + + inv_dai_fmt = dai_fmt & ~SND_SOC_DAIFMT_MASTER_MASK; + switch (dai_fmt & SND_SOC_DAIFMT_MASTER_MASK) { + case SND_SOC_DAIFMT_CBM_CFM: + inv_dai_fmt |= SND_SOC_DAIFMT_CBS_CFS; + break; + case SND_SOC_DAIFMT_CBM_CFS: + inv_dai_fmt |= SND_SOC_DAIFMT_CBS_CFM; + break; + case SND_SOC_DAIFMT_CBS_CFM: + inv_dai_fmt |= SND_SOC_DAIFMT_CBM_CFS; + break; + case SND_SOC_DAIFMT_CBS_CFS: + inv_dai_fmt |= SND_SOC_DAIFMT_CBM_CFM; + break; + } + + dai_fmt = inv_dai_fmt; + } + + ret = snd_soc_dai_set_fmt(cpu_dai, dai_fmt); + if (ret != 0 && ret != -ENOTSUPP) { + dev_warn(cpu_dai->dev, + "ASoC: Failed to set DAI format: %d\n", ret); + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(snd_soc_runtime_set_dai_fmt); + static int snd_soc_instantiate_card(struct snd_soc_card *card) { struct snd_soc_codec *codec; - struct snd_soc_dai_link *dai_link; - int ret, i, order, dai_fmt; + int ret, i, order; + mutex_lock(&client_mutex); mutex_lock_nested(&card->mutex, SND_SOC_CARD_CLASS_INIT); /* bind DAIs */ @@@ -1634,9 -1697,60 +1634,9 @@@ card->num_dapm_routes); for (i = 0; i < card->num_links; i++) { - struct snd_soc_pcm_runtime *rtd = &card->rtd[i]; - dai_link = &card->dai_link[i]; - dai_fmt = dai_link->dai_fmt; - - if (dai_fmt) { - struct snd_soc_dai **codec_dais = rtd->codec_dais; - int j; - - for (j = 0; j < rtd->num_codecs; j++) { - struct snd_soc_dai *codec_dai = codec_dais[j]; - - ret = snd_soc_dai_set_fmt(codec_dai, dai_fmt); - if (ret != 0 && ret != -ENOTSUPP) - dev_warn(codec_dai->dev, - "ASoC: Failed to set DAI format: %d\n", - ret); - } - } - - /* If this is a regular CPU link there will be a platform */ - if (dai_fmt && - (dai_link->platform_name || dai_link->platform_of_node)) { - ret = snd_soc_dai_set_fmt(card->rtd[i].cpu_dai, - dai_fmt); - if (ret != 0 && ret != -ENOTSUPP) - dev_warn(card->rtd[i].cpu_dai->dev, - "ASoC: Failed to set DAI format: %d\n", - ret); - } else if (dai_fmt) { - /* Flip the polarity for the "CPU" end */ - dai_fmt &= ~SND_SOC_DAIFMT_MASTER_MASK; - switch (dai_link->dai_fmt & - SND_SOC_DAIFMT_MASTER_MASK) { - case SND_SOC_DAIFMT_CBM_CFM: - dai_fmt |= SND_SOC_DAIFMT_CBS_CFS; - break; - case SND_SOC_DAIFMT_CBM_CFS: - dai_fmt |= SND_SOC_DAIFMT_CBS_CFM; - break; - case SND_SOC_DAIFMT_CBS_CFM: - dai_fmt |= SND_SOC_DAIFMT_CBM_CFS; - break; - case SND_SOC_DAIFMT_CBS_CFS: - dai_fmt |= SND_SOC_DAIFMT_CBM_CFM; - break; - } - - ret = snd_soc_dai_set_fmt(card->rtd[i].cpu_dai, - dai_fmt); - if (ret != 0 && ret != -ENOTSUPP) - dev_warn(card->rtd[i].cpu_dai->dev, - "ASoC: Failed to set DAI format: %d\n", - ret); - } + if (card->dai_link[i].dai_fmt) + snd_soc_runtime_set_dai_fmt(&card->rtd[i], + card->dai_link[i].dai_fmt); } snprintf(card->snd_card->shortname, sizeof(card->snd_card->shortname), @@@ -1667,6 -1781,9 +1667,6 @@@ } } - if (card->fully_routed) - snd_soc_dapm_auto_nc_pins(card); - snd_soc_dapm_new_widgets(card); ret = snd_card_register(card->snd_card); @@@ -1676,10 -1793,23 +1676,10 @@@ goto probe_aux_dev_err; } -#ifdef CONFIG_SND_SOC_AC97_BUS - /* register any AC97 codecs */ - for (i = 0; i < card->num_rtd; i++) { - ret = soc_register_ac97_dai_link(&card->rtd[i]); - if (ret < 0) { - dev_err(card->dev, - "ASoC: failed to register AC97: %d\n", ret); - while (--i >= 0) - soc_unregister_ac97_dai_link(&card->rtd[i]); - goto probe_aux_dev_err; - } - } -#endif - card->instantiated = 1; snd_soc_dapm_sync(&card->dapm); mutex_unlock(&card->mutex); + mutex_unlock(&client_mutex); return 0; @@@ -1698,7 -1828,6 +1698,7 @@@ card_probe_error base_error: mutex_unlock(&card->mutex); + mutex_unlock(&client_mutex); return ret; } @@@ -1812,6 -1941,7 +1812,6 @@@ EXPORT_SYMBOL_GPL(snd_soc_pm_ops) static struct platform_driver soc_driver = { .driver = { .name = "soc-audio", - .owner = THIS_MODULE, .pm = &snd_soc_pm_ops, }, .probe = soc_probe, @@@ -1819,179 -1949,1403 +1819,179 @@@ }; /** - * snd_soc_new_ac97_codec - initailise AC97 device - * @codec: audio codec - * @ops: AC97 bus operations - * @num: AC97 codec number + * snd_soc_cnew - create new control + * @_template: control template + * @data: control private data + * @long_name: control long name + * @prefix: control name prefix + * + * Create a new mixer control from a template control. * - * Initialises AC97 codec resources for use by ad-hoc devices only. + * Returns 0 for success, else error. */ -int snd_soc_new_ac97_codec(struct snd_soc_codec *codec, - struct snd_ac97_bus_ops *ops, int num) +struct snd_kcontrol *snd_soc_cnew(const struct snd_kcontrol_new *_template, + void *data, const char *long_name, + const char *prefix) { - codec->ac97 = kzalloc(sizeof(struct snd_ac97), GFP_KERNEL); - if (codec->ac97 == NULL) - return -ENOMEM; - - codec->ac97->bus = kzalloc(sizeof(struct snd_ac97_bus), GFP_KERNEL); - if (codec->ac97->bus == NULL) { - kfree(codec->ac97); - codec->ac97 = NULL; - return -ENOMEM; - } - - codec->ac97->bus->ops = ops; - codec->ac97->num = num; - - /* - * Mark the AC97 device to be created by us. This way we ensure that the - * device will be registered with the device subsystem later on. - */ - codec->ac97_created = 1; - - return 0; -} -EXPORT_SYMBOL_GPL(snd_soc_new_ac97_codec); + struct snd_kcontrol_new template; + struct snd_kcontrol *kcontrol; + char *name = NULL; -static struct snd_ac97_reset_cfg snd_ac97_rst_cfg; + memcpy(&template, _template, sizeof(template)); + template.index = 0; -static void snd_soc_ac97_warm_reset(struct snd_ac97 *ac97) -{ - struct pinctrl *pctl = snd_ac97_rst_cfg.pctl; + if (!long_name) + long_name = template.name; - pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_warm_reset); + if (prefix) { + name = kasprintf(GFP_KERNEL, "%s %s", prefix, long_name); + if (!name) + return NULL; - gpio_direction_output(snd_ac97_rst_cfg.gpio_sync, 1); + template.name = name; + } else { + template.name = long_name; + } - udelay(10); + kcontrol = snd_ctl_new1(&template, data); - gpio_direction_output(snd_ac97_rst_cfg.gpio_sync, 0); + kfree(name); - pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_run); - msleep(2); + return kcontrol; } +EXPORT_SYMBOL_GPL(snd_soc_cnew); -static void snd_soc_ac97_reset(struct snd_ac97 *ac97) +static int snd_soc_add_controls(struct snd_card *card, struct device *dev, + const struct snd_kcontrol_new *controls, int num_controls, + const char *prefix, void *data) { - struct pinctrl *pctl = snd_ac97_rst_cfg.pctl; + int err, i; - pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_reset); + for (i = 0; i < num_controls; i++) { + const struct snd_kcontrol_new *control = &controls[i]; + err = snd_ctl_add(card, snd_soc_cnew(control, data, + control->name, prefix)); + if (err < 0) { + dev_err(dev, "ASoC: Failed to add %s: %d\n", + control->name, err); + return err; + } + } - gpio_direction_output(snd_ac97_rst_cfg.gpio_sync, 0); - gpio_direction_output(snd_ac97_rst_cfg.gpio_sdata, 0); - gpio_direction_output(snd_ac97_rst_cfg.gpio_reset, 0); + return 0; +} - udelay(10); +struct snd_kcontrol *snd_soc_card_get_kcontrol(struct snd_soc_card *soc_card, + const char *name) +{ + struct snd_card *card = soc_card->snd_card; + struct snd_kcontrol *kctl; - gpio_direction_output(snd_ac97_rst_cfg.gpio_reset, 1); + if (unlikely(!name)) + return NULL; - pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_run); - msleep(2); + list_for_each_entry(kctl, &card->controls, list) + if (!strncmp(kctl->id.name, name, sizeof(kctl->id.name))) + return kctl; + return NULL; } +EXPORT_SYMBOL_GPL(snd_soc_card_get_kcontrol); -static int snd_soc_ac97_parse_pinctl(struct device *dev, - struct snd_ac97_reset_cfg *cfg) +/** + * snd_soc_add_component_controls - Add an array of controls to a component. + * + * @component: Component to add controls to + * @controls: Array of controls to add + * @num_controls: Number of elements in the array + * + * Return: 0 for success, else error. + */ +int snd_soc_add_component_controls(struct snd_soc_component *component, + const struct snd_kcontrol_new *controls, unsigned int num_controls) { - struct pinctrl *p; - struct pinctrl_state *state; - int gpio; - int ret; + struct snd_card *card = component->card->snd_card; - p = devm_pinctrl_get(dev); - if (IS_ERR(p)) { - dev_err(dev, "Failed to get pinctrl\n"); - return PTR_ERR(p); - } - cfg->pctl = p; - - state = pinctrl_lookup_state(p, "ac97-reset"); - if (IS_ERR(state)) { - dev_err(dev, "Can't find pinctrl state ac97-reset\n"); - return PTR_ERR(state); - } - cfg->pstate_reset = state; - - state = pinctrl_lookup_state(p, "ac97-warm-reset"); - if (IS_ERR(state)) { - dev_err(dev, "Can't find pinctrl state ac97-warm-reset\n"); - return PTR_ERR(state); - } - cfg->pstate_warm_reset = state; - - state = pinctrl_lookup_state(p, "ac97-running"); - if (IS_ERR(state)) { - dev_err(dev, "Can't find pinctrl state ac97-running\n"); - return PTR_ERR(state); - } - cfg->pstate_run = state; - - gpio = of_get_named_gpio(dev->of_node, "ac97-gpios", 0); - if (gpio < 0) { - dev_err(dev, "Can't find ac97-sync gpio\n"); - return gpio; - } - ret = devm_gpio_request(dev, gpio, "AC97 link sync"); - if (ret) { - dev_err(dev, "Failed requesting ac97-sync gpio\n"); - return ret; - } - cfg->gpio_sync = gpio; - - gpio = of_get_named_gpio(dev->of_node, "ac97-gpios", 1); - if (gpio < 0) { - dev_err(dev, "Can't find ac97-sdata gpio %d\n", gpio); - return gpio; - } - ret = devm_gpio_request(dev, gpio, "AC97 link sdata"); - if (ret) { - dev_err(dev, "Failed requesting ac97-sdata gpio\n"); - return ret; - } - cfg->gpio_sdata = gpio; - - gpio = of_get_named_gpio(dev->of_node, "ac97-gpios", 2); - if (gpio < 0) { - dev_err(dev, "Can't find ac97-reset gpio\n"); - return gpio; - } - ret = devm_gpio_request(dev, gpio, "AC97 link reset"); - if (ret) { - dev_err(dev, "Failed requesting ac97-reset gpio\n"); - return ret; - } - cfg->gpio_reset = gpio; - - return 0; -} - -struct snd_ac97_bus_ops *soc_ac97_ops; -EXPORT_SYMBOL_GPL(soc_ac97_ops); - -int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops) -{ - if (ops == soc_ac97_ops) - return 0; - - if (soc_ac97_ops && ops) - return -EBUSY; - - soc_ac97_ops = ops; - - return 0; -} -EXPORT_SYMBOL_GPL(snd_soc_set_ac97_ops); - -/** - * snd_soc_set_ac97_ops_of_reset - Set ac97 ops with generic ac97 reset functions - * - * This function sets the reset and warm_reset properties of ops and parses - * the device node of pdev to get pinctrl states and gpio numbers to use. - */ -int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops, - struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct snd_ac97_reset_cfg cfg; - int ret; - - ret = snd_soc_ac97_parse_pinctl(dev, &cfg); - if (ret) - return ret; - - ret = snd_soc_set_ac97_ops(ops); - if (ret) - return ret; - - ops->warm_reset = snd_soc_ac97_warm_reset; - ops->reset = snd_soc_ac97_reset; - - snd_ac97_rst_cfg = cfg; - return 0; -} -EXPORT_SYMBOL_GPL(snd_soc_set_ac97_ops_of_reset); - -/** - * snd_soc_free_ac97_codec - free AC97 codec device - * @codec: audio codec - * - * Frees AC97 codec device resources. - */ -void snd_soc_free_ac97_codec(struct snd_soc_codec *codec) -{ -#ifdef CONFIG_SND_SOC_AC97_BUS - soc_unregister_ac97_codec(codec); -#endif - kfree(codec->ac97->bus); - kfree(codec->ac97); - codec->ac97 = NULL; - codec->ac97_created = 0; -} -EXPORT_SYMBOL_GPL(snd_soc_free_ac97_codec); - -/** - * snd_soc_cnew - create new control - * @_template: control template - * @data: control private data - * @long_name: control long name - * @prefix: control name prefix - * - * Create a new mixer control from a template control. - * - * Returns 0 for success, else error. - */ -struct snd_kcontrol *snd_soc_cnew(const struct snd_kcontrol_new *_template, - void *data, const char *long_name, - const char *prefix) -{ - struct snd_kcontrol_new template; - struct snd_kcontrol *kcontrol; - char *name = NULL; - - memcpy(&template, _template, sizeof(template)); - template.index = 0; - - if (!long_name) - long_name = template.name; - - if (prefix) { - name = kasprintf(GFP_KERNEL, "%s %s", prefix, long_name); - if (!name) - return NULL; - - template.name = name; - } else { - template.name = long_name; - } - - kcontrol = snd_ctl_new1(&template, data); - - kfree(name); - - return kcontrol; -} -EXPORT_SYMBOL_GPL(snd_soc_cnew); - -static int snd_soc_add_controls(struct snd_card *card, struct device *dev, - const struct snd_kcontrol_new *controls, int num_controls, - const char *prefix, void *data) -{ - int err, i; - - for (i = 0; i < num_controls; i++) { - const struct snd_kcontrol_new *control = &controls[i]; - err = snd_ctl_add(card, snd_soc_cnew(control, data, - control->name, prefix)); - if (err < 0) { - dev_err(dev, "ASoC: Failed to add %s: %d\n", - control->name, err); - return err; - } - } - - return 0; -} - -struct snd_kcontrol *snd_soc_card_get_kcontrol(struct snd_soc_card *soc_card, - const char *name) -{ - struct snd_card *card = soc_card->snd_card; - struct snd_kcontrol *kctl; - - if (unlikely(!name)) - return NULL; - - list_for_each_entry(kctl, &card->controls, list) - if (!strncmp(kctl->id.name, name, sizeof(kctl->id.name))) - return kctl; - return NULL; -} -EXPORT_SYMBOL_GPL(snd_soc_card_get_kcontrol); - -/** - * snd_soc_add_component_controls - Add an array of controls to a component. - * - * @component: Component to add controls to - * @controls: Array of controls to add - * @num_controls: Number of elements in the array - * - * Return: 0 for success, else error. - */ -int snd_soc_add_component_controls(struct snd_soc_component *component, - const struct snd_kcontrol_new *controls, unsigned int num_controls) -{ - struct snd_card *card = component->card->snd_card; - - return snd_soc_add_controls(card, component->dev, controls, - num_controls, component->name_prefix, component); -} -EXPORT_SYMBOL_GPL(snd_soc_add_component_controls); - -/** - * snd_soc_add_codec_controls - add an array of controls to a codec. - * Convenience function to add a list of controls. Many codecs were - * duplicating this code. - * - * @codec: codec to add controls to - * @controls: array of controls to add - * @num_controls: number of elements in the array - * - * Return 0 for success, else error. - */ -int snd_soc_add_codec_controls(struct snd_soc_codec *codec, - const struct snd_kcontrol_new *controls, unsigned int num_controls) -{ - return snd_soc_add_component_controls(&codec->component, controls, - num_controls); -} -EXPORT_SYMBOL_GPL(snd_soc_add_codec_controls); - -/** - * snd_soc_add_platform_controls - add an array of controls to a platform. - * Convenience function to add a list of controls. - * - * @platform: platform to add controls to - * @controls: array of controls to add - * @num_controls: number of elements in the array - * - * Return 0 for success, else error. - */ -int snd_soc_add_platform_controls(struct snd_soc_platform *platform, - const struct snd_kcontrol_new *controls, unsigned int num_controls) -{ - return snd_soc_add_component_controls(&platform->component, controls, - num_controls); -} -EXPORT_SYMBOL_GPL(snd_soc_add_platform_controls); - -/** - * snd_soc_add_card_controls - add an array of controls to a SoC card. - * Convenience function to add a list of controls. - * - * @soc_card: SoC card to add controls to - * @controls: array of controls to add - * @num_controls: number of elements in the array - * - * Return 0 for success, else error. - */ -int snd_soc_add_card_controls(struct snd_soc_card *soc_card, - const struct snd_kcontrol_new *controls, int num_controls) -{ - struct snd_card *card = soc_card->snd_card; - - return snd_soc_add_controls(card, soc_card->dev, controls, num_controls, - NULL, soc_card); -} -EXPORT_SYMBOL_GPL(snd_soc_add_card_controls); - -/** - * snd_soc_add_dai_controls - add an array of controls to a DAI. - * Convienience function to add a list of controls. - * - * @dai: DAI to add controls to - * @controls: array of controls to add - * @num_controls: number of elements in the array - * - * Return 0 for success, else error. - */ -int snd_soc_add_dai_controls(struct snd_soc_dai *dai, - const struct snd_kcontrol_new *controls, int num_controls) -{ - struct snd_card *card = dai->card->snd_card; - - return snd_soc_add_controls(card, dai->dev, controls, num_controls, - NULL, dai); -} -EXPORT_SYMBOL_GPL(snd_soc_add_dai_controls); - -/** - * snd_soc_info_enum_double - enumerated double mixer info callback - * @kcontrol: mixer control - * @uinfo: control element information - * - * Callback to provide information about a double enumerated - * mixer control. - * - * Returns 0 for success. - */ -int snd_soc_info_enum_double(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_info *uinfo) -{ - struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; - - uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; - uinfo->count = e->shift_l == e->shift_r ? 1 : 2; - uinfo->value.enumerated.items = e->items; - - if (uinfo->value.enumerated.item >= e->items) - uinfo->value.enumerated.item = e->items - 1; - strlcpy(uinfo->value.enumerated.name, - e->texts[uinfo->value.enumerated.item], - sizeof(uinfo->value.enumerated.name)); - return 0; -} -EXPORT_SYMBOL_GPL(snd_soc_info_enum_double); - -/** - * snd_soc_get_enum_double - enumerated double mixer get callback - * @kcontrol: mixer control - * @ucontrol: control element information - * - * Callback to get the value of a double enumerated mixer. - * - * Returns 0 for success. - */ -int snd_soc_get_enum_double(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); - struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; - unsigned int val, item; - unsigned int reg_val; - int ret; - - ret = snd_soc_component_read(component, e->reg, ®_val); - if (ret) - return ret; - val = (reg_val >> e->shift_l) & e->mask; - item = snd_soc_enum_val_to_item(e, val); - ucontrol->value.enumerated.item[0] = item; - if (e->shift_l != e->shift_r) { - val = (reg_val >> e->shift_l) & e->mask; - item = snd_soc_enum_val_to_item(e, val); - ucontrol->value.enumerated.item[1] = item; - } - - return 0; -} -EXPORT_SYMBOL_GPL(snd_soc_get_enum_double); - -/** - * snd_soc_put_enum_double - enumerated double mixer put callback - * @kcontrol: mixer control - * @ucontrol: control element information - * - * Callback to set the value of a double enumerated mixer. - * - * Returns 0 for success. - */ -int snd_soc_put_enum_double(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); - struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; - unsigned int *item = ucontrol->value.enumerated.item; - unsigned int val; - unsigned int mask; - - if (item[0] >= e->items) - return -EINVAL; - val = snd_soc_enum_item_to_val(e, item[0]) << e->shift_l; - mask = e->mask << e->shift_l; - if (e->shift_l != e->shift_r) { - if (item[1] >= e->items) - return -EINVAL; - val |= snd_soc_enum_item_to_val(e, item[1]) << e->shift_r; - mask |= e->mask << e->shift_r; - } - - return snd_soc_component_update_bits(component, e->reg, mask, val); -} -EXPORT_SYMBOL_GPL(snd_soc_put_enum_double); - -/** - * snd_soc_read_signed - Read a codec register and interprete as signed value - * @component: component - * @reg: Register to read - * @mask: Mask to use after shifting the register value - * @shift: Right shift of register value - * @sign_bit: Bit that describes if a number is negative or not. - * @signed_val: Pointer to where the read value should be stored - * - * This functions reads a codec register. The register value is shifted right - * by 'shift' bits and masked with the given 'mask'. Afterwards it translates - * the given registervalue into a signed integer if sign_bit is non-zero. - * - * Returns 0 on sucess, otherwise an error value - */ -static int snd_soc_read_signed(struct snd_soc_component *component, - unsigned int reg, unsigned int mask, unsigned int shift, - unsigned int sign_bit, int *signed_val) -{ - int ret; - unsigned int val; - - ret = snd_soc_component_read(component, reg, &val); - if (ret < 0) - return ret; - - val = (val >> shift) & mask; - - if (!sign_bit) { - *signed_val = val; - return 0; - } - - /* non-negative number */ - if (!(val & BIT(sign_bit))) { - *signed_val = val; - return 0; - } - - ret = val; - - /* - * The register most probably does not contain a full-sized int. - * Instead we have an arbitrary number of bits in a signed - * representation which has to be translated into a full-sized int. - * This is done by filling up all bits above the sign-bit. - */ - ret |= ~((int)(BIT(sign_bit) - 1)); - - *signed_val = ret; - - return 0; -} - -/** - * snd_soc_info_volsw - single mixer info callback - * @kcontrol: mixer control - * @uinfo: control element information - * - * Callback to provide information about a single mixer control, or a double - * mixer control that spans 2 registers. - * - * Returns 0 for success. - */ -int snd_soc_info_volsw(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_info *uinfo) -{ - struct soc_mixer_control *mc = - (struct soc_mixer_control *)kcontrol->private_value; - int platform_max; - - if (!mc->platform_max) - mc->platform_max = mc->max; - platform_max = mc->platform_max; - - if (platform_max == 1 && !strstr(kcontrol->id.name, " Volume")) - uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; - else - uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; - - uinfo->count = snd_soc_volsw_is_stereo(mc) ? 2 : 1; - uinfo->value.integer.min = 0; - uinfo->value.integer.max = platform_max - mc->min; - return 0; -} -EXPORT_SYMBOL_GPL(snd_soc_info_volsw); - -/** - * snd_soc_get_volsw - single mixer get callback - * @kcontrol: mixer control - * @ucontrol: control element information - * - * Callback to get the value of a single mixer control, or a double mixer - * control that spans 2 registers. - * - * Returns 0 for success. - */ -int snd_soc_get_volsw(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); - struct soc_mixer_control *mc = - (struct soc_mixer_control *)kcontrol->private_value; - unsigned int reg = mc->reg; - unsigned int reg2 = mc->rreg; - unsigned int shift = mc->shift; - unsigned int rshift = mc->rshift; - int max = mc->max; - int min = mc->min; - int sign_bit = mc->sign_bit; - unsigned int mask = (1 << fls(max)) - 1; - unsigned int invert = mc->invert; - int val; - int ret; - - if (sign_bit) - mask = BIT(sign_bit + 1) - 1; - - ret = snd_soc_read_signed(component, reg, mask, shift, sign_bit, &val); - if (ret) - return ret; - - ucontrol->value.integer.value[0] = val - min; - if (invert) - ucontrol->value.integer.value[0] = - max - ucontrol->value.integer.value[0]; - - if (snd_soc_volsw_is_stereo(mc)) { - if (reg == reg2) - ret = snd_soc_read_signed(component, reg, mask, rshift, - sign_bit, &val); - else - ret = snd_soc_read_signed(component, reg2, mask, shift, - sign_bit, &val); - if (ret) - return ret; - - ucontrol->value.integer.value[1] = val - min; - if (invert) - ucontrol->value.integer.value[1] = - max - ucontrol->value.integer.value[1]; - } - - return 0; -} -EXPORT_SYMBOL_GPL(snd_soc_get_volsw); - -/** - * snd_soc_put_volsw - single mixer put callback - * @kcontrol: mixer control - * @ucontrol: control element information - * - * Callback to set the value of a single mixer control, or a double mixer - * control that spans 2 registers. - * - * Returns 0 for success. - */ -int snd_soc_put_volsw(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); - struct soc_mixer_control *mc = - (struct soc_mixer_control *)kcontrol->private_value; - unsigned int reg = mc->reg; - unsigned int reg2 = mc->rreg; - unsigned int shift = mc->shift; - unsigned int rshift = mc->rshift; - int max = mc->max; - int min = mc->min; - unsigned int sign_bit = mc->sign_bit; - unsigned int mask = (1 << fls(max)) - 1; - unsigned int invert = mc->invert; - int err; - bool type_2r = false; - unsigned int val2 = 0; - unsigned int val, val_mask; - - if (sign_bit) - mask = BIT(sign_bit + 1) - 1; - - val = ((ucontrol->value.integer.value[0] + min) & mask); - if (invert) - val = max - val; - val_mask = mask << shift; - val = val << shift; - if (snd_soc_volsw_is_stereo(mc)) { - val2 = ((ucontrol->value.integer.value[1] + min) & mask); - if (invert) - val2 = max - val2; - if (reg == reg2) { - val_mask |= mask << rshift; - val |= val2 << rshift; - } else { - val2 = val2 << shift; - type_2r = true; - } - } - err = snd_soc_component_update_bits(component, reg, val_mask, val); - if (err < 0) - return err; - - if (type_2r) - err = snd_soc_component_update_bits(component, reg2, val_mask, - val2); - - return err; -} -EXPORT_SYMBOL_GPL(snd_soc_put_volsw); - -/** - * snd_soc_get_volsw_sx - single mixer get callback - * @kcontrol: mixer control - * @ucontrol: control element information - * - * Callback to get the value of a single mixer control, or a double mixer - * control that spans 2 registers. - * - * Returns 0 for success. - */ -int snd_soc_get_volsw_sx(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); - struct soc_mixer_control *mc = - (struct soc_mixer_control *)kcontrol->private_value; - unsigned int reg = mc->reg; - unsigned int reg2 = mc->rreg; - unsigned int shift = mc->shift; - unsigned int rshift = mc->rshift; - int max = mc->max; - int min = mc->min; - int mask = (1 << (fls(min + max) - 1)) - 1; - unsigned int val; - int ret; - - ret = snd_soc_component_read(component, reg, &val); - if (ret < 0) - return ret; - - ucontrol->value.integer.value[0] = ((val >> shift) - min) & mask; - - if (snd_soc_volsw_is_stereo(mc)) { - ret = snd_soc_component_read(component, reg2, &val); - if (ret < 0) - return ret; - - val = ((val >> rshift) - min) & mask; - ucontrol->value.integer.value[1] = val; - } - - return 0; -} -EXPORT_SYMBOL_GPL(snd_soc_get_volsw_sx); - -/** - * snd_soc_put_volsw_sx - double mixer set callback - * @kcontrol: mixer control - * @uinfo: control element information - * - * Callback to set the value of a double mixer control that spans 2 registers. - * - * Returns 0 for success. - */ -int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); - struct soc_mixer_control *mc = - (struct soc_mixer_control *)kcontrol->private_value; - - unsigned int reg = mc->reg; - unsigned int reg2 = mc->rreg; - unsigned int shift = mc->shift; - unsigned int rshift = mc->rshift; - int max = mc->max; - int min = mc->min; - int mask = (1 << (fls(min + max) - 1)) - 1; - int err = 0; - unsigned int val, val_mask, val2 = 0; - - val_mask = mask << shift; - val = (ucontrol->value.integer.value[0] + min) & mask; - val = val << shift; - - err = snd_soc_component_update_bits(component, reg, val_mask, val); - if (err < 0) - return err; - - if (snd_soc_volsw_is_stereo(mc)) { - val_mask = mask << rshift; - val2 = (ucontrol->value.integer.value[1] + min) & mask; - val2 = val2 << rshift; - - err = snd_soc_component_update_bits(component, reg2, val_mask, - val2); - } - return err; -} -EXPORT_SYMBOL_GPL(snd_soc_put_volsw_sx); - -/** - * snd_soc_info_volsw_s8 - signed mixer info callback - * @kcontrol: mixer control - * @uinfo: control element information - * - * Callback to provide information about a signed mixer control. - * - * Returns 0 for success. - */ -int snd_soc_info_volsw_s8(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_info *uinfo) -{ - struct soc_mixer_control *mc = - (struct soc_mixer_control *)kcontrol->private_value; - int platform_max; - int min = mc->min; - - if (!mc->platform_max) - mc->platform_max = mc->max; - platform_max = mc->platform_max; - - uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; - uinfo->count = 2; - uinfo->value.integer.min = 0; - uinfo->value.integer.max = platform_max - min; - return 0; -} -EXPORT_SYMBOL_GPL(snd_soc_info_volsw_s8); - -/** - * snd_soc_get_volsw_s8 - signed mixer get callback - * @kcontrol: mixer control - * @ucontrol: control element information - * - * Callback to get the value of a signed mixer control. - * - * Returns 0 for success. - */ -int snd_soc_get_volsw_s8(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct soc_mixer_control *mc = - (struct soc_mixer_control *)kcontrol->private_value; - struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); - unsigned int reg = mc->reg; - unsigned int val; - int min = mc->min; - int ret; - - ret = snd_soc_component_read(component, reg, &val); - if (ret) - return ret; - - ucontrol->value.integer.value[0] = - ((signed char)(val & 0xff))-min; - ucontrol->value.integer.value[1] = - ((signed char)((val >> 8) & 0xff))-min; - return 0; -} -EXPORT_SYMBOL_GPL(snd_soc_get_volsw_s8); - -/** - * snd_soc_put_volsw_sgn - signed mixer put callback - * @kcontrol: mixer control - * @ucontrol: control element information - * - * Callback to set the value of a signed mixer control. - * - * Returns 0 for success. - */ -int snd_soc_put_volsw_s8(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct soc_mixer_control *mc = - (struct soc_mixer_control *)kcontrol->private_value; - struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); - unsigned int reg = mc->reg; - int min = mc->min; - unsigned int val; - - val = (ucontrol->value.integer.value[0]+min) & 0xff; - val |= ((ucontrol->value.integer.value[1]+min) & 0xff) << 8; - - return snd_soc_component_update_bits(component, reg, 0xffff, val); -} -EXPORT_SYMBOL_GPL(snd_soc_put_volsw_s8); - -/** - * snd_soc_info_volsw_range - single mixer info callback with range. - * @kcontrol: mixer control - * @uinfo: control element information - * - * Callback to provide information, within a range, about a single - * mixer control. - * - * returns 0 for success. - */ -int snd_soc_info_volsw_range(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_info *uinfo) -{ - struct soc_mixer_control *mc = - (struct soc_mixer_control *)kcontrol->private_value; - int platform_max; - int min = mc->min; - - if (!mc->platform_max) - mc->platform_max = mc->max; - platform_max = mc->platform_max; - - uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; - uinfo->count = snd_soc_volsw_is_stereo(mc) ? 2 : 1; - uinfo->value.integer.min = 0; - uinfo->value.integer.max = platform_max - min; - - return 0; -} -EXPORT_SYMBOL_GPL(snd_soc_info_volsw_range); - -/** - * snd_soc_put_volsw_range - single mixer put value callback with range. - * @kcontrol: mixer control - * @ucontrol: control element information - * - * Callback to set the value, within a range, for a single mixer control. - * - * Returns 0 for success. - */ -int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct soc_mixer_control *mc = - (struct soc_mixer_control *)kcontrol->private_value; - struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); - unsigned int reg = mc->reg; - unsigned int rreg = mc->rreg; - unsigned int shift = mc->shift; - int min = mc->min; - int max = mc->max; - unsigned int mask = (1 << fls(max)) - 1; - unsigned int invert = mc->invert; - unsigned int val, val_mask; - int ret; - - if (invert) - val = (max - ucontrol->value.integer.value[0]) & mask; - else - val = ((ucontrol->value.integer.value[0] + min) & mask); - val_mask = mask << shift; - val = val << shift; - - ret = snd_soc_component_update_bits(component, reg, val_mask, val); - if (ret < 0) - return ret; - - if (snd_soc_volsw_is_stereo(mc)) { - if (invert) - val = (max - ucontrol->value.integer.value[1]) & mask; - else - val = ((ucontrol->value.integer.value[1] + min) & mask); - val_mask = mask << shift; - val = val << shift; - - ret = snd_soc_component_update_bits(component, rreg, val_mask, - val); - } - - return ret; -} -EXPORT_SYMBOL_GPL(snd_soc_put_volsw_range); - -/** - * snd_soc_get_volsw_range - single mixer get callback with range - * @kcontrol: mixer control - * @ucontrol: control element information - * - * Callback to get the value, within a range, of a single mixer control. - * - * Returns 0 for success. - */ -int snd_soc_get_volsw_range(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); - struct soc_mixer_control *mc = - (struct soc_mixer_control *)kcontrol->private_value; - unsigned int reg = mc->reg; - unsigned int rreg = mc->rreg; - unsigned int shift = mc->shift; - int min = mc->min; - int max = mc->max; - unsigned int mask = (1 << fls(max)) - 1; - unsigned int invert = mc->invert; - unsigned int val; - int ret; - - ret = snd_soc_component_read(component, reg, &val); - if (ret) - return ret; - - ucontrol->value.integer.value[0] = (val >> shift) & mask; - if (invert) - ucontrol->value.integer.value[0] = - max - ucontrol->value.integer.value[0]; - else - ucontrol->value.integer.value[0] = - ucontrol->value.integer.value[0] - min; - - if (snd_soc_volsw_is_stereo(mc)) { - ret = snd_soc_component_read(component, rreg, &val); - if (ret) - return ret; - - ucontrol->value.integer.value[1] = (val >> shift) & mask; - if (invert) - ucontrol->value.integer.value[1] = - max - ucontrol->value.integer.value[1]; - else - ucontrol->value.integer.value[1] = - ucontrol->value.integer.value[1] - min; - } - - return 0; + return snd_soc_add_controls(card, component->dev, controls, + num_controls, component->name_prefix, component); } -EXPORT_SYMBOL_GPL(snd_soc_get_volsw_range); +EXPORT_SYMBOL_GPL(snd_soc_add_component_controls); /** - * snd_soc_limit_volume - Set new limit to an existing volume control. + * snd_soc_add_codec_controls - add an array of controls to a codec. + * Convenience function to add a list of controls. Many codecs were + * duplicating this code. * - * @codec: where to look for the control - * @name: Name of the control - * @max: new maximum limit + * @codec: codec to add controls to + * @controls: array of controls to add + * @num_controls: number of elements in the array * * Return 0 for success, else error. */ -int snd_soc_limit_volume(struct snd_soc_codec *codec, - const char *name, int max) -{ - struct snd_card *card = codec->component.card->snd_card; - struct snd_kcontrol *kctl; - struct soc_mixer_control *mc; - int found = 0; - int ret = -EINVAL; - - /* Sanity check for name and max */ - if (unlikely(!name || max <= 0)) - return -EINVAL; - - list_for_each_entry(kctl, &card->controls, list) { - if (!strncmp(kctl->id.name, name, sizeof(kctl->id.name))) { - found = 1; - break; - } - } - if (found) { - mc = (struct soc_mixer_control *)kctl->private_value; - if (max <= mc->max) { - mc->platform_max = max; - ret = 0; - } - } - return ret; -} -EXPORT_SYMBOL_GPL(snd_soc_limit_volume); - -int snd_soc_bytes_info(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_info *uinfo) -{ - struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); - struct soc_bytes *params = (void *)kcontrol->private_value; - - uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES; - uinfo->count = params->num_regs * component->val_bytes; - - return 0; -} -EXPORT_SYMBOL_GPL(snd_soc_bytes_info); - -int snd_soc_bytes_get(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); - struct soc_bytes *params = (void *)kcontrol->private_value; - int ret; - - if (component->regmap) - ret = regmap_raw_read(component->regmap, params->base, - ucontrol->value.bytes.data, - params->num_regs * component->val_bytes); - else - ret = -EINVAL; - - /* Hide any masked bytes to ensure consistent data reporting */ - if (ret == 0 && params->mask) { - switch (component->val_bytes) { - case 1: - ucontrol->value.bytes.data[0] &= ~params->mask; - break; - case 2: - ((u16 *)(&ucontrol->value.bytes.data))[0] - &= cpu_to_be16(~params->mask); - break; - case 4: - ((u32 *)(&ucontrol->value.bytes.data))[0] - &= cpu_to_be32(~params->mask); - break; - default: - return -EINVAL; - } - } - - return ret; -} -EXPORT_SYMBOL_GPL(snd_soc_bytes_get); - -int snd_soc_bytes_put(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); - struct soc_bytes *params = (void *)kcontrol->private_value; - int ret, len; - unsigned int val, mask; - void *data; - - if (!component->regmap || !params->num_regs) - return -EINVAL; - - len = params->num_regs * component->val_bytes; - - data = kmemdup(ucontrol->value.bytes.data, len, GFP_KERNEL | GFP_DMA); - if (!data) - return -ENOMEM; - - /* - * If we've got a mask then we need to preserve the register - * bits. We shouldn't modify the incoming data so take a - * copy. - */ - if (params->mask) { - ret = regmap_read(component->regmap, params->base, &val); - if (ret != 0) - goto out; - - val &= params->mask; - - switch (component->val_bytes) { - case 1: - ((u8 *)data)[0] &= ~params->mask; - ((u8 *)data)[0] |= val; - break; - case 2: - mask = ~params->mask; - ret = regmap_parse_val(component->regmap, - &mask, &mask); - if (ret != 0) - goto out; - - ((u16 *)data)[0] &= mask; - - ret = regmap_parse_val(component->regmap, - &val, &val); - if (ret != 0) - goto out; - - ((u16 *)data)[0] |= val; - break; - case 4: - mask = ~params->mask; - ret = regmap_parse_val(component->regmap, - &mask, &mask); - if (ret != 0) - goto out; - - ((u32 *)data)[0] &= mask; - - ret = regmap_parse_val(component->regmap, - &val, &val); - if (ret != 0) - goto out; - - ((u32 *)data)[0] |= val; - break; - default: - ret = -EINVAL; - goto out; - } - } - - ret = regmap_raw_write(component->regmap, params->base, - data, len); - -out: - kfree(data); - - return ret; -} -EXPORT_SYMBOL_GPL(snd_soc_bytes_put); - -int snd_soc_bytes_info_ext(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_info *ucontrol) -{ - struct soc_bytes_ext *params = (void *)kcontrol->private_value; - - ucontrol->type = SNDRV_CTL_ELEM_TYPE_BYTES; - ucontrol->count = params->max; - - return 0; -} -EXPORT_SYMBOL_GPL(snd_soc_bytes_info_ext); - -int snd_soc_bytes_tlv_callback(struct snd_kcontrol *kcontrol, int op_flag, - unsigned int size, unsigned int __user *tlv) -{ - struct soc_bytes_ext *params = (void *)kcontrol->private_value; - unsigned int count = size < params->max ? size : params->max; - int ret = -ENXIO; - - switch (op_flag) { - case SNDRV_CTL_TLV_OP_READ: - if (params->get) - ret = params->get(tlv, count); - break; - case SNDRV_CTL_TLV_OP_WRITE: - if (params->put) - ret = params->put(tlv, count); - break; - } - return ret; -} -EXPORT_SYMBOL_GPL(snd_soc_bytes_tlv_callback); - -/** - * snd_soc_info_xr_sx - signed multi register info callback - * @kcontrol: mreg control - * @uinfo: control element information - * - * Callback to provide information of a control that can - * span multiple codec registers which together - * forms a single signed value in a MSB/LSB manner. - * - * Returns 0 for success. - */ -int snd_soc_info_xr_sx(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_info *uinfo) -{ - struct soc_mreg_control *mc = - (struct soc_mreg_control *)kcontrol->private_value; - uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; - uinfo->count = 1; - uinfo->value.integer.min = mc->min; - uinfo->value.integer.max = mc->max; - - return 0; -} -EXPORT_SYMBOL_GPL(snd_soc_info_xr_sx); - -/** - * snd_soc_get_xr_sx - signed multi register get callback - * @kcontrol: mreg control - * @ucontrol: control element information - * - * Callback to get the value of a control that can span - * multiple codec registers which together forms a single - * signed value in a MSB/LSB manner. The control supports - * specifying total no of bits used to allow for bitfields - * across the multiple codec registers. - * - * Returns 0 for success. - */ -int snd_soc_get_xr_sx(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) +int snd_soc_add_codec_controls(struct snd_soc_codec *codec, + const struct snd_kcontrol_new *controls, unsigned int num_controls) { - struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); - struct soc_mreg_control *mc = - (struct soc_mreg_control *)kcontrol->private_value; - unsigned int regbase = mc->regbase; - unsigned int regcount = mc->regcount; - unsigned int regwshift = component->val_bytes * BITS_PER_BYTE; - unsigned int regwmask = (1<invert; - unsigned long mask = (1UL<nbits)-1; - long min = mc->min; - long max = mc->max; - long val = 0; - unsigned int regval; - unsigned int i; - int ret; - - for (i = 0; i < regcount; i++) { - ret = snd_soc_component_read(component, regbase+i, ®val); - if (ret) - return ret; - val |= (regval & regwmask) << (regwshift*(regcount-i-1)); - } - val &= mask; - if (min < 0 && val > max) - val |= ~mask; - if (invert) - val = max - val; - ucontrol->value.integer.value[0] = val; - - return 0; + return snd_soc_add_component_controls(&codec->component, controls, + num_controls); } -EXPORT_SYMBOL_GPL(snd_soc_get_xr_sx); +EXPORT_SYMBOL_GPL(snd_soc_add_codec_controls); /** - * snd_soc_put_xr_sx - signed multi register get callback - * @kcontrol: mreg control - * @ucontrol: control element information + * snd_soc_add_platform_controls - add an array of controls to a platform. + * Convenience function to add a list of controls. * - * Callback to set the value of a control that can span - * multiple codec registers which together forms a single - * signed value in a MSB/LSB manner. The control supports - * specifying total no of bits used to allow for bitfields - * across the multiple codec registers. + * @platform: platform to add controls to + * @controls: array of controls to add + * @num_controls: number of elements in the array * - * Returns 0 for success. + * Return 0 for success, else error. */ -int snd_soc_put_xr_sx(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) +int snd_soc_add_platform_controls(struct snd_soc_platform *platform, + const struct snd_kcontrol_new *controls, unsigned int num_controls) { - struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); - struct soc_mreg_control *mc = - (struct soc_mreg_control *)kcontrol->private_value; - unsigned int regbase = mc->regbase; - unsigned int regcount = mc->regcount; - unsigned int regwshift = component->val_bytes * BITS_PER_BYTE; - unsigned int regwmask = (1<invert; - unsigned long mask = (1UL<nbits)-1; - long max = mc->max; - long val = ucontrol->value.integer.value[0]; - unsigned int i, regval, regmask; - int err; - - if (invert) - val = max - val; - val &= mask; - for (i = 0; i < regcount; i++) { - regval = (val >> (regwshift*(regcount-i-1))) & regwmask; - regmask = (mask >> (regwshift*(regcount-i-1))) & regwmask; - err = snd_soc_component_update_bits(component, regbase+i, - regmask, regval); - if (err < 0) - return err; - } - - return 0; + return snd_soc_add_component_controls(&platform->component, controls, + num_controls); } -EXPORT_SYMBOL_GPL(snd_soc_put_xr_sx); +EXPORT_SYMBOL_GPL(snd_soc_add_platform_controls); /** - * snd_soc_get_strobe - strobe get callback - * @kcontrol: mixer control - * @ucontrol: control element information + * snd_soc_add_card_controls - add an array of controls to a SoC card. + * Convenience function to add a list of controls. * - * Callback get the value of a strobe mixer control. + * @soc_card: SoC card to add controls to + * @controls: array of controls to add + * @num_controls: number of elements in the array * - * Returns 0 for success. + * Return 0 for success, else error. */ -int snd_soc_get_strobe(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) +int snd_soc_add_card_controls(struct snd_soc_card *soc_card, + const struct snd_kcontrol_new *controls, int num_controls) { - struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); - struct soc_mixer_control *mc = - (struct soc_mixer_control *)kcontrol->private_value; - unsigned int reg = mc->reg; - unsigned int shift = mc->shift; - unsigned int mask = 1 << shift; - unsigned int invert = mc->invert != 0; - unsigned int val; - int ret; - - ret = snd_soc_component_read(component, reg, &val); - if (ret) - return ret; - - val &= mask; - - if (shift != 0 && val != 0) - val = val >> shift; - ucontrol->value.enumerated.item[0] = val ^ invert; + struct snd_card *card = soc_card->snd_card; - return 0; + return snd_soc_add_controls(card, soc_card->dev, controls, num_controls, + NULL, soc_card); } -EXPORT_SYMBOL_GPL(snd_soc_get_strobe); +EXPORT_SYMBOL_GPL(snd_soc_add_card_controls); /** - * snd_soc_put_strobe - strobe put callback - * @kcontrol: mixer control - * @ucontrol: control element information + * snd_soc_add_dai_controls - add an array of controls to a DAI. + * Convienience function to add a list of controls. * - * Callback strobe a register bit to high then low (or the inverse) - * in one pass of a single mixer enum control. + * @dai: DAI to add controls to + * @controls: array of controls to add + * @num_controls: number of elements in the array * - * Returns 1 for success. + * Return 0 for success, else error. */ -int snd_soc_put_strobe(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) +int snd_soc_add_dai_controls(struct snd_soc_dai *dai, + const struct snd_kcontrol_new *controls, int num_controls) { - struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); - struct soc_mixer_control *mc = - (struct soc_mixer_control *)kcontrol->private_value; - unsigned int reg = mc->reg; - unsigned int shift = mc->shift; - unsigned int mask = 1 << shift; - unsigned int invert = mc->invert != 0; - unsigned int strobe = ucontrol->value.enumerated.item[0] != 0; - unsigned int val1 = (strobe ^ invert) ? mask : 0; - unsigned int val2 = (strobe ^ invert) ? 0 : mask; - int err; - - err = snd_soc_component_update_bits(component, reg, mask, val1); - if (err < 0) - return err; + struct snd_card *card = dai->component->card->snd_card; - return snd_soc_component_update_bits(component, reg, mask, val2); + return snd_soc_add_controls(card, dai->dev, controls, num_controls, + NULL, dai); } -EXPORT_SYMBOL_GPL(snd_soc_put_strobe); +EXPORT_SYMBOL_GPL(snd_soc_add_dai_controls); /** * snd_soc_dai_set_sysclk - configure DAI system or master clock. @@@ -2159,27 -3513,15 +2159,27 @@@ static int snd_soc_xlate_tdm_slot_mask( } /** - * snd_soc_dai_set_tdm_slot - configure DAI TDM. - * @dai: DAI + * snd_soc_dai_set_tdm_slot() - Configures a DAI for TDM operation + * @dai: The DAI to configure * @tx_mask: bitmask representing active TX slots. * @rx_mask: bitmask representing active RX slots. * @slots: Number of slots in use. * @slot_width: Width in bits for each slot. * - * Configures a DAI for TDM operation. Both mask and slots are codec and DAI - * specific. + * This function configures the specified DAI for TDM operation. @slot contains + * the total number of slots of the TDM stream and @slot_with the width of each + * slot in bit clock cycles. @tx_mask and @rx_mask are bitmasks specifying the + * active slots of the TDM stream for the specified DAI, i.e. which slots the + * DAI should write to or read from. If a bit is set the corresponding slot is + * active, if a bit is cleared the corresponding slot is inactive. Bit 0 maps to + * the first slot, bit 1 to the second slot and so on. The first active slot + * maps to the first channel of the DAI, the second active slot to the second + * channel and so on. + * + * TDM mode can be disabled by passing 0 for @slots. In this case @tx_mask, + * @rx_mask and @slot_width will be ignored. + * + * Returns 0 on success, a negative error code otherwise. */ int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width) @@@ -2438,8 -3780,8 +2438,8 @@@ int snd_soc_unregister_card(struct snd_ card->instantiated = false; snd_soc_dapm_shutdown(card); soc_cleanup_card_resources(card); + dev_dbg(card->dev, "ASoC: Unregistered card '%s'\n", card->name); } - dev_dbg(card->dev, "ASoC: Unregistered card '%s'\n", card->name); return 0; } @@@ -2654,62 -3996,22 +2654,62 @@@ static int snd_soc_component_initialize return 0; } -static void snd_soc_component_init_regmap(struct snd_soc_component *component) +static void snd_soc_component_setup_regmap(struct snd_soc_component *component) { - if (!component->regmap) - component->regmap = dev_get_regmap(component->dev, NULL); - if (component->regmap) { - int val_bytes = regmap_get_val_bytes(component->regmap); - /* Errors are legitimate for non-integer byte multiples */ - if (val_bytes > 0) - component->val_bytes = val_bytes; - } + int val_bytes = regmap_get_val_bytes(component->regmap); + + /* Errors are legitimate for non-integer byte multiples */ + if (val_bytes > 0) + component->val_bytes = val_bytes; +} + +#ifdef CONFIG_REGMAP + +/** + * snd_soc_component_init_regmap() - Initialize regmap instance for the component + * @component: The component for which to initialize the regmap instance + * @regmap: The regmap instance that should be used by the component + * + * This function allows deferred assignment of the regmap instance that is + * associated with the component. Only use this if the regmap instance is not + * yet ready when the component is registered. The function must also be called + * before the first IO attempt of the component. + */ +void snd_soc_component_init_regmap(struct snd_soc_component *component, + struct regmap *regmap) +{ + component->regmap = regmap; + snd_soc_component_setup_regmap(component); } +EXPORT_SYMBOL_GPL(snd_soc_component_init_regmap); + +/** + * snd_soc_component_exit_regmap() - De-initialize regmap instance for the component + * @component: The component for which to de-initialize the regmap instance + * + * Calls regmap_exit() on the regmap instance associated to the component and + * removes the regmap instance from the component. + * + * This function should only be used if snd_soc_component_init_regmap() was used + * to initialize the regmap instance. + */ +void snd_soc_component_exit_regmap(struct snd_soc_component *component) +{ + regmap_exit(component->regmap); + component->regmap = NULL; +} +EXPORT_SYMBOL_GPL(snd_soc_component_exit_regmap); + +#endif static void snd_soc_component_add_unlocked(struct snd_soc_component *component) { - if (!component->write && !component->read) - snd_soc_component_init_regmap(component); + if (!component->write && !component->read) { + if (!component->regmap) + component->regmap = dev_get_regmap(component->dev, NULL); + if (component->regmap) + snd_soc_component_setup_regmap(component); + } list_add(&component->list, &component_list); } @@@ -2732,6 -4034,13 +2732,6 @@@ static void snd_soc_component_del_unloc list_del(&component->list); } -static void snd_soc_component_del(struct snd_soc_component *component) -{ - mutex_lock(&client_mutex); - snd_soc_component_del_unlocked(component); - mutex_unlock(&client_mutex); -} - int snd_soc_register_component(struct device *dev, const struct snd_soc_component_driver *cmpnt_drv, struct snd_soc_dai_driver *dai_drv, @@@ -2755,7 -4064,7 +2755,7 @@@ ret = snd_soc_register_dais(cmpnt, dai_drv, num_dai, true); if (ret < 0) { - dev_err(dev, "ASoC: Failed to regster DAIs: %d\n", ret); + dev_err(dev, "ASoC: Failed to register DAIs: %d\n", ret); goto err_cleanup; } @@@ -2779,17 -4088,14 +2779,17 @@@ void snd_soc_unregister_component(struc { struct snd_soc_component *cmpnt; + mutex_lock(&client_mutex); list_for_each_entry(cmpnt, &component_list, list) { if (dev == cmpnt->dev && cmpnt->registered_as_component) goto found; } + mutex_unlock(&client_mutex); return; found: - snd_soc_component_del(cmpnt); + snd_soc_component_del_unlocked(cmpnt); + mutex_unlock(&client_mutex); snd_soc_component_cleanup(cmpnt); kfree(cmpnt); } @@@ -2897,14 -4203,10 +2897,14 @@@ struct snd_soc_platform *snd_soc_lookup { struct snd_soc_platform *platform; + mutex_lock(&client_mutex); list_for_each_entry(platform, &platform_list, list) { - if (dev == platform->dev) + if (dev == platform->dev) { + mutex_unlock(&client_mutex); return platform; + } } + mutex_unlock(&client_mutex); return NULL; } @@@ -3060,6 -4362,7 +3060,6 @@@ int snd_soc_register_codec(struct devic codec->dev = dev; codec->driver = codec_drv; codec->component.val_bytes = codec_drv->reg_word_size; - mutex_init(&codec->mutex); #ifdef CONFIG_DEBUG_FS codec->component.init_debugfs = soc_init_codec_debugfs; @@@ -3076,7 -4379,7 +3076,7 @@@ ret = snd_soc_register_dais(&codec->component, dai_drv, num_dai, false); if (ret < 0) { - dev_err(dev, "ASoC: Failed to regster DAIs: %d\n", ret); + dev_err(dev, "ASoC: Failed to register DAIs: %d\n", ret); goto err_cleanup; } @@@ -3109,15 -4412,15 +3109,15 @@@ void snd_soc_unregister_codec(struct de { struct snd_soc_codec *codec; + mutex_lock(&client_mutex); list_for_each_entry(codec, &codec_list, list) { if (dev == codec->dev) goto found; } + mutex_unlock(&client_mutex); return; found: - - mutex_lock(&client_mutex); list_del(&codec->list); snd_soc_component_del_unlocked(&codec->component); mutex_unlock(&client_mutex); @@@ -3447,30 -4750,36 +3447,30 @@@ unsigned int snd_soc_of_parse_daifmt(st } EXPORT_SYMBOL_GPL(snd_soc_of_parse_daifmt); -int snd_soc_of_get_dai_name(struct device_node *of_node, - const char **dai_name) +static int snd_soc_get_dai_name(struct of_phandle_args *args, + const char **dai_name) { struct snd_soc_component *pos; - struct of_phandle_args args; - int ret; - - ret = of_parse_phandle_with_args(of_node, "sound-dai", - "#sound-dai-cells", 0, &args); - if (ret) - return ret; - - ret = -EPROBE_DEFER; + int ret = -EPROBE_DEFER; mutex_lock(&client_mutex); list_for_each_entry(pos, &component_list, list) { - if (pos->dev->of_node != args.np) + if (pos->dev->of_node != args->np) continue; if (pos->driver->of_xlate_dai_name) { - ret = pos->driver->of_xlate_dai_name(pos, &args, dai_name); + ret = pos->driver->of_xlate_dai_name(pos, + args, + dai_name); } else { int id = -1; - switch (args.args_count) { + switch (args->args_count) { case 0: id = 0; /* same as dai_drv[0] */ break; case 1: - id = args.args[0]; + id = args->args[0]; break; default: /* not supported */ @@@ -3492,21 -4801,6 +3492,21 @@@ break; } mutex_unlock(&client_mutex); + return ret; +} + +int snd_soc_of_get_dai_name(struct device_node *of_node, + const char **dai_name) +{ + struct of_phandle_args args; + int ret; + + ret = of_parse_phandle_with_args(of_node, "sound-dai", + "#sound-dai-cells", 0, &args); + if (ret) + return ret; + + ret = snd_soc_get_dai_name(&args, dai_name); of_node_put(args.np); @@@ -3514,77 -4808,6 +3514,77 @@@ } EXPORT_SYMBOL_GPL(snd_soc_of_get_dai_name); +/* + * snd_soc_of_get_dai_link_codecs - Parse a list of CODECs in the devicetree + * @dev: Card device + * @of_node: Device node + * @dai_link: DAI link + * + * Builds an array of CODEC DAI components from the DAI link property + * 'sound-dai'. + * The array is set in the DAI link and the number of DAIs is set accordingly. + * The device nodes in the array (of_node) must be dereferenced by the caller. + * + * Returns 0 for success + */ +int snd_soc_of_get_dai_link_codecs(struct device *dev, + struct device_node *of_node, + struct snd_soc_dai_link *dai_link) +{ + struct of_phandle_args args; + struct snd_soc_dai_link_component *component; + char *name; + int index, num_codecs, ret; + + /* Count the number of CODECs */ + name = "sound-dai"; + num_codecs = of_count_phandle_with_args(of_node, name, + "#sound-dai-cells"); + if (num_codecs <= 0) { + if (num_codecs == -ENOENT) + dev_err(dev, "No 'sound-dai' property\n"); + else + dev_err(dev, "Bad phandle in 'sound-dai'\n"); + return num_codecs; + } + component = devm_kzalloc(dev, + sizeof *component * num_codecs, + GFP_KERNEL); + if (!component) + return -ENOMEM; + dai_link->codecs = component; + dai_link->num_codecs = num_codecs; + + /* Parse the list */ + for (index = 0, component = dai_link->codecs; + index < dai_link->num_codecs; + index++, component++) { + ret = of_parse_phandle_with_args(of_node, name, + "#sound-dai-cells", + index, &args); + if (ret) + goto err; + component->of_node = args.np; + ret = snd_soc_get_dai_name(&args, &component->dai_name); + if (ret < 0) + goto err; + } + return 0; +err: + for (index = 0, component = dai_link->codecs; + index < dai_link->num_codecs; + index++, component++) { + if (!component->of_node) + break; + of_node_put(component->of_node); + component->of_node = NULL; + } + dai_link->codecs = NULL; + dai_link->num_codecs = 0; + return ret; +} +EXPORT_SYMBOL_GPL(snd_soc_of_get_dai_link_codecs); + static int __init snd_soc_init(void) { #ifdef CONFIG_DEBUG_FS diff --combined tools/perf/util/probe-finder.c index b5247d777f0e,1cb1ef765ca0..d32cf93565da --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@@ -456,7 -456,7 +456,7 @@@ static int convert_variable_fields(Dwar return -EINVAL; } if (field->name[0] == '[') { - pr_err("Semantic error: %s is not a pointor" + pr_err("Semantic error: %s is not a pointer" " nor array.\n", varname); return -EINVAL; } @@@ -989,24 -989,8 +989,24 @@@ static int debuginfo__find_probes(struc int ret = 0; #if _ELFUTILS_PREREQ(0, 142) + Elf *elf; + GElf_Ehdr ehdr; + GElf_Shdr shdr; + /* Get the call frame information from this dwarf */ - pf->cfi = dwarf_getcfi_elf(dwarf_getelf(dbg->dbg)); + elf = dwarf_getelf(dbg->dbg); + if (elf == NULL) + return -EINVAL; + + if (gelf_getehdr(elf, &ehdr) == NULL) + return -EINVAL; + + if (elf_section_by_name(elf, &ehdr, &shdr, ".eh_frame", NULL) && + shdr.sh_type == SHT_PROGBITS) { + pf->cfi = dwarf_getcfi_elf(elf); + } else { + pf->cfi = dwarf_getcfi(dbg->dbg); + } #endif off = 0;