4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 #define DEBUG_SUBSYSTEM S_LLITE
34 #include "../include/lprocfs_status.h"
35 #include <linux/seq_file.h>
36 #include "../include/obd_support.h"
38 #include "llite_internal.h"
39 #include "vvp_internal.h"
41 /* debugfs llite mount point registration */
42 static struct file_operations ll_rw_extents_stats_fops;
43 static struct file_operations ll_rw_extents_stats_pp_fops;
44 static struct file_operations ll_rw_offset_stats_fops;
46 static ssize_t blocksize_show(struct kobject *kobj, struct attribute *attr,
49 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
51 struct obd_statfs osfs;
54 rc = ll_statfs_internal(sbi->ll_sb, &osfs,
55 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
58 return sprintf(buf, "%u\n", osfs.os_bsize);
62 LUSTRE_RO_ATTR(blocksize);
64 static ssize_t kbytestotal_show(struct kobject *kobj, struct attribute *attr,
67 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
69 struct obd_statfs osfs;
72 rc = ll_statfs_internal(sbi->ll_sb, &osfs,
73 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
76 __u32 blk_size = osfs.os_bsize >> 10;
77 __u64 result = osfs.os_blocks;
79 while (blk_size >>= 1)
82 rc = sprintf(buf, "%llu\n", result);
87 LUSTRE_RO_ATTR(kbytestotal);
89 static ssize_t kbytesfree_show(struct kobject *kobj, struct attribute *attr,
92 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
94 struct obd_statfs osfs;
97 rc = ll_statfs_internal(sbi->ll_sb, &osfs,
98 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
101 __u32 blk_size = osfs.os_bsize >> 10;
102 __u64 result = osfs.os_bfree;
104 while (blk_size >>= 1)
107 rc = sprintf(buf, "%llu\n", result);
112 LUSTRE_RO_ATTR(kbytesfree);
114 static ssize_t kbytesavail_show(struct kobject *kobj, struct attribute *attr,
117 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
119 struct obd_statfs osfs;
122 rc = ll_statfs_internal(sbi->ll_sb, &osfs,
123 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
126 __u32 blk_size = osfs.os_bsize >> 10;
127 __u64 result = osfs.os_bavail;
129 while (blk_size >>= 1)
132 rc = sprintf(buf, "%llu\n", result);
137 LUSTRE_RO_ATTR(kbytesavail);
139 static ssize_t filestotal_show(struct kobject *kobj, struct attribute *attr,
142 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
144 struct obd_statfs osfs;
147 rc = ll_statfs_internal(sbi->ll_sb, &osfs,
148 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
151 return sprintf(buf, "%llu\n", osfs.os_files);
155 LUSTRE_RO_ATTR(filestotal);
157 static ssize_t filesfree_show(struct kobject *kobj, struct attribute *attr,
160 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
162 struct obd_statfs osfs;
165 rc = ll_statfs_internal(sbi->ll_sb, &osfs,
166 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
169 return sprintf(buf, "%llu\n", osfs.os_ffree);
173 LUSTRE_RO_ATTR(filesfree);
175 static ssize_t client_type_show(struct kobject *kobj, struct attribute *attr,
178 return sprintf(buf, "local client\n");
180 LUSTRE_RO_ATTR(client_type);
182 static ssize_t fstype_show(struct kobject *kobj, struct attribute *attr,
185 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
188 return sprintf(buf, "%s\n", sbi->ll_sb->s_type->name);
190 LUSTRE_RO_ATTR(fstype);
192 static ssize_t uuid_show(struct kobject *kobj, struct attribute *attr,
195 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
198 return sprintf(buf, "%s\n", sbi->ll_sb_uuid.uuid);
200 LUSTRE_RO_ATTR(uuid);
202 static int ll_site_stats_seq_show(struct seq_file *m, void *v)
204 struct super_block *sb = m->private;
207 * See description of statistical counters in struct cl_site, and
210 return cl_site_stats_print(lu2cl_site(ll_s2sbi(sb)->ll_site), m);
213 LPROC_SEQ_FOPS_RO(ll_site_stats);
215 static ssize_t max_read_ahead_mb_show(struct kobject *kobj,
216 struct attribute *attr, char *buf)
218 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
223 spin_lock(&sbi->ll_lock);
224 pages_number = sbi->ll_ra_info.ra_max_pages;
225 spin_unlock(&sbi->ll_lock);
227 mult = 1 << (20 - PAGE_SHIFT);
228 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
231 static ssize_t max_read_ahead_mb_store(struct kobject *kobj,
232 struct attribute *attr,
236 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
239 unsigned long pages_number;
241 rc = kstrtoul(buffer, 10, &pages_number);
245 pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
247 if (pages_number > totalram_pages / 2) {
248 CERROR("can't set file readahead more than %lu MB\n",
249 totalram_pages >> (20 - PAGE_SHIFT + 1)); /*1/2 of RAM*/
253 spin_lock(&sbi->ll_lock);
254 sbi->ll_ra_info.ra_max_pages = pages_number;
255 spin_unlock(&sbi->ll_lock);
259 LUSTRE_RW_ATTR(max_read_ahead_mb);
261 static ssize_t max_read_ahead_per_file_mb_show(struct kobject *kobj,
262 struct attribute *attr,
265 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
270 spin_lock(&sbi->ll_lock);
271 pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
272 spin_unlock(&sbi->ll_lock);
274 mult = 1 << (20 - PAGE_SHIFT);
275 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
278 static ssize_t max_read_ahead_per_file_mb_store(struct kobject *kobj,
279 struct attribute *attr,
283 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
286 unsigned long pages_number;
288 rc = kstrtoul(buffer, 10, &pages_number);
292 if (pages_number > sbi->ll_ra_info.ra_max_pages) {
293 CERROR("can't set file readahead more than max_read_ahead_mb %lu MB\n",
294 sbi->ll_ra_info.ra_max_pages);
298 spin_lock(&sbi->ll_lock);
299 sbi->ll_ra_info.ra_max_pages_per_file = pages_number;
300 spin_unlock(&sbi->ll_lock);
304 LUSTRE_RW_ATTR(max_read_ahead_per_file_mb);
306 static ssize_t max_read_ahead_whole_mb_show(struct kobject *kobj,
307 struct attribute *attr,
310 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
315 spin_lock(&sbi->ll_lock);
316 pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
317 spin_unlock(&sbi->ll_lock);
319 mult = 1 << (20 - PAGE_SHIFT);
320 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
323 static ssize_t max_read_ahead_whole_mb_store(struct kobject *kobj,
324 struct attribute *attr,
328 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
331 unsigned long pages_number;
333 rc = kstrtoul(buffer, 10, &pages_number);
337 /* Cap this at the current max readahead window size, the readahead
338 * algorithm does this anyway so it's pointless to set it larger.
340 if (pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
341 CERROR("can't set max_read_ahead_whole_mb more than max_read_ahead_per_file_mb: %lu\n",
342 sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_SHIFT));
346 spin_lock(&sbi->ll_lock);
347 sbi->ll_ra_info.ra_max_read_ahead_whole_pages = pages_number;
348 spin_unlock(&sbi->ll_lock);
352 LUSTRE_RW_ATTR(max_read_ahead_whole_mb);
354 static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
356 struct super_block *sb = m->private;
357 struct ll_sb_info *sbi = ll_s2sbi(sb);
358 struct cl_client_cache *cache = sbi->ll_cache;
359 int shift = 20 - PAGE_SHIFT;
363 max_cached_mb = cache->ccc_lru_max >> shift;
364 unused_mb = atomic_long_read(&cache->ccc_lru_left) >> shift;
367 "max_cached_mb: %ld\n"
370 "reclaim_count: %u\n",
371 atomic_read(&cache->ccc_users),
373 max_cached_mb - unused_mb,
375 cache->ccc_lru_shrinkers);
379 static ssize_t ll_max_cached_mb_seq_write(struct file *file,
380 const char __user *buffer,
381 size_t count, loff_t *off)
383 struct super_block *sb = ((struct seq_file *)file->private_data)->private;
384 struct ll_sb_info *sbi = ll_s2sbi(sb);
385 struct cl_client_cache *cache = sbi->ll_cache;
396 if (count >= sizeof(kernbuf))
399 if (copy_from_user(kernbuf, buffer, count))
403 mult = 1 << (20 - PAGE_SHIFT);
404 buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) -
406 rc = lprocfs_write_frac_u64_helper(buffer, count, &val, mult);
412 pages_number = (long)val;
414 if (pages_number < 0 || pages_number > totalram_pages) {
415 CERROR("%s: can't set max cache more than %lu MB\n",
416 ll_get_fsname(sb, NULL, 0),
417 totalram_pages >> (20 - PAGE_SHIFT));
421 spin_lock(&sbi->ll_lock);
422 diff = pages_number - cache->ccc_lru_max;
423 spin_unlock(&sbi->ll_lock);
425 /* easy - add more LRU slots. */
427 atomic_long_add(diff, &cache->ccc_lru_left);
432 env = cl_env_get(&refcheck);
440 /* reduce LRU budget from free slots. */
444 ov = atomic_long_read(&cache->ccc_lru_left);
448 nv = ov > diff ? ov - diff : 0;
449 rc = atomic_long_cmpxchg(&cache->ccc_lru_left, ov, nv);
450 if (likely(ov == rc)) {
460 if (!sbi->ll_dt_exp) { /* being initialized */
465 /* difficult - have to ask OSCs to drop LRU slots. */
467 rc = obd_set_info_async(env, sbi->ll_dt_exp,
468 sizeof(KEY_CACHE_LRU_SHRINK),
469 KEY_CACHE_LRU_SHRINK,
470 sizeof(tmp), &tmp, NULL);
474 cl_env_put(env, &refcheck);
478 spin_lock(&sbi->ll_lock);
479 cache->ccc_lru_max = pages_number;
480 spin_unlock(&sbi->ll_lock);
483 atomic_long_add(nrpages, &cache->ccc_lru_left);
488 LPROC_SEQ_FOPS(ll_max_cached_mb);
490 static ssize_t checksum_pages_show(struct kobject *kobj, struct attribute *attr,
493 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
496 return sprintf(buf, "%u\n", (sbi->ll_flags & LL_SBI_CHECKSUM) ? 1 : 0);
499 static ssize_t checksum_pages_store(struct kobject *kobj,
500 struct attribute *attr,
504 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
513 rc = kstrtoul(buffer, 10, &val);
517 sbi->ll_flags |= LL_SBI_CHECKSUM;
519 sbi->ll_flags &= ~LL_SBI_CHECKSUM;
521 rc = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
522 KEY_CHECKSUM, sizeof(val), &val, NULL);
524 CWARN("Failed to set OSC checksum flags: %d\n", rc);
528 LUSTRE_RW_ATTR(checksum_pages);
530 static ssize_t ll_rd_track_id(struct kobject *kobj, char *buf,
531 enum stats_track_type type)
533 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
536 if (sbi->ll_stats_track_type == type)
537 return sprintf(buf, "%d\n", sbi->ll_stats_track_id);
538 else if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
539 return sprintf(buf, "0 (all)\n");
541 return sprintf(buf, "untracked\n");
544 static ssize_t ll_wr_track_id(struct kobject *kobj, const char *buffer,
546 enum stats_track_type type)
548 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
553 rc = kstrtoul(buffer, 10, &pid);
556 sbi->ll_stats_track_id = pid;
558 sbi->ll_stats_track_type = STATS_TRACK_ALL;
560 sbi->ll_stats_track_type = type;
561 lprocfs_clear_stats(sbi->ll_stats);
565 static ssize_t stats_track_pid_show(struct kobject *kobj,
566 struct attribute *attr,
569 return ll_rd_track_id(kobj, buf, STATS_TRACK_PID);
572 static ssize_t stats_track_pid_store(struct kobject *kobj,
573 struct attribute *attr,
577 return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PID);
579 LUSTRE_RW_ATTR(stats_track_pid);
581 static ssize_t stats_track_ppid_show(struct kobject *kobj,
582 struct attribute *attr,
585 return ll_rd_track_id(kobj, buf, STATS_TRACK_PPID);
588 static ssize_t stats_track_ppid_store(struct kobject *kobj,
589 struct attribute *attr,
593 return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PPID);
595 LUSTRE_RW_ATTR(stats_track_ppid);
597 static ssize_t stats_track_gid_show(struct kobject *kobj,
598 struct attribute *attr,
601 return ll_rd_track_id(kobj, buf, STATS_TRACK_GID);
604 static ssize_t stats_track_gid_store(struct kobject *kobj,
605 struct attribute *attr,
609 return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_GID);
611 LUSTRE_RW_ATTR(stats_track_gid);
613 static ssize_t statahead_max_show(struct kobject *kobj,
614 struct attribute *attr,
617 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
620 return sprintf(buf, "%u\n", sbi->ll_sa_max);
623 static ssize_t statahead_max_store(struct kobject *kobj,
624 struct attribute *attr,
628 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
633 rc = kstrtoul(buffer, 10, &val);
637 if (val <= LL_SA_RPC_MAX)
638 sbi->ll_sa_max = val;
640 CERROR("Bad statahead_max value %lu. Valid values are in the range [0, %d]\n",
645 LUSTRE_RW_ATTR(statahead_max);
647 static ssize_t statahead_agl_show(struct kobject *kobj,
648 struct attribute *attr,
651 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
654 return sprintf(buf, "%u\n", sbi->ll_flags & LL_SBI_AGL_ENABLED ? 1 : 0);
657 static ssize_t statahead_agl_store(struct kobject *kobj,
658 struct attribute *attr,
662 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
667 rc = kstrtoul(buffer, 10, &val);
672 sbi->ll_flags |= LL_SBI_AGL_ENABLED;
674 sbi->ll_flags &= ~LL_SBI_AGL_ENABLED;
678 LUSTRE_RW_ATTR(statahead_agl);
680 static int ll_statahead_stats_seq_show(struct seq_file *m, void *v)
682 struct super_block *sb = m->private;
683 struct ll_sb_info *sbi = ll_s2sbi(sb);
686 "statahead total: %u\n"
687 "statahead wrong: %u\n"
689 atomic_read(&sbi->ll_sa_total),
690 atomic_read(&sbi->ll_sa_wrong),
691 atomic_read(&sbi->ll_agl_total));
695 LPROC_SEQ_FOPS_RO(ll_statahead_stats);
697 static ssize_t lazystatfs_show(struct kobject *kobj,
698 struct attribute *attr,
701 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
704 return sprintf(buf, "%u\n", sbi->ll_flags & LL_SBI_LAZYSTATFS ? 1 : 0);
707 static ssize_t lazystatfs_store(struct kobject *kobj,
708 struct attribute *attr,
712 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
717 rc = kstrtoul(buffer, 10, &val);
722 sbi->ll_flags |= LL_SBI_LAZYSTATFS;
724 sbi->ll_flags &= ~LL_SBI_LAZYSTATFS;
728 LUSTRE_RW_ATTR(lazystatfs);
730 static ssize_t max_easize_show(struct kobject *kobj,
731 struct attribute *attr,
734 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
739 rc = ll_get_max_mdsize(sbi, &ealen);
743 return sprintf(buf, "%u\n", ealen);
745 LUSTRE_RO_ATTR(max_easize);
748 * Get default_easize.
750 * \see client_obd::cl_default_mds_easize
752 * \param[in] kobj kernel object for sysfs tree
753 * \param[in] attr attribute of this kernel object
754 * \param[in] buf buffer to write data into
756 * \retval positive \a count on success
757 * \retval negative negated errno on failure
759 static ssize_t default_easize_show(struct kobject *kobj,
760 struct attribute *attr,
763 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
768 rc = ll_get_default_mdsize(sbi, &ealen);
772 return sprintf(buf, "%u\n", ealen);
776 * Set default_easize.
778 * Range checking on the passed value is handled by
779 * ll_set_default_mdsize().
781 * \see client_obd::cl_default_mds_easize
783 * \param[in] kobj kernel object for sysfs tree
784 * \param[in] attr attribute of this kernel object
785 * \param[in] buffer string passed from user space
786 * \param[in] count \a buffer length
788 * \retval positive \a count on success
789 * \retval negative negated errno on failure
791 static ssize_t default_easize_store(struct kobject *kobj,
792 struct attribute *attr,
796 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
801 rc = kstrtoul(buffer, 10, &val);
805 rc = ll_set_default_mdsize(sbi, val);
811 LUSTRE_RW_ATTR(default_easize);
813 static int ll_sbi_flags_seq_show(struct seq_file *m, void *v)
815 const char *str[] = LL_SBI_FLAGS;
816 struct super_block *sb = m->private;
817 int flags = ll_s2sbi(sb)->ll_flags;
821 if (ARRAY_SIZE(str) <= i) {
822 CERROR("%s: Revise array LL_SBI_FLAGS to match sbi flags please.\n",
823 ll_get_fsname(sb, NULL, 0));
828 seq_printf(m, "%s ", str[i]);
836 LPROC_SEQ_FOPS_RO(ll_sbi_flags);
838 static ssize_t xattr_cache_show(struct kobject *kobj,
839 struct attribute *attr,
842 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
845 return sprintf(buf, "%u\n", sbi->ll_xattr_cache_enabled);
848 static ssize_t xattr_cache_store(struct kobject *kobj,
849 struct attribute *attr,
853 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
858 rc = kstrtoul(buffer, 10, &val);
862 if (val != 0 && val != 1)
865 if (val == 1 && !(sbi->ll_flags & LL_SBI_XATTR_CACHE))
868 sbi->ll_xattr_cache_enabled = val;
872 LUSTRE_RW_ATTR(xattr_cache);
874 static ssize_t unstable_stats_show(struct kobject *kobj,
875 struct attribute *attr,
878 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
880 struct cl_client_cache *cache = sbi->ll_cache;
884 pages = atomic_long_read(&cache->ccc_unstable_nr);
885 mb = (pages * PAGE_SIZE) >> 20;
887 return sprintf(buf, "unstable_check: %8d\n"
888 "unstable_pages: %12ld\n"
889 "unstable_mb: %8d\n",
890 cache->ccc_unstable_check, pages, mb);
893 static ssize_t unstable_stats_store(struct kobject *kobj,
894 struct attribute *attr,
898 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
905 if (count >= sizeof(kernbuf))
908 if (copy_from_user(kernbuf, buffer, count))
912 buffer += lprocfs_find_named_value(kernbuf, "unstable_check:", &count) -
914 rc = lprocfs_write_helper(buffer, count, &val);
918 /* borrow lru lock to set the value */
919 spin_lock(&sbi->ll_cache->ccc_lru_lock);
920 sbi->ll_cache->ccc_unstable_check = !!val;
921 spin_unlock(&sbi->ll_cache->ccc_lru_lock);
925 LUSTRE_RW_ATTR(unstable_stats);
927 static ssize_t root_squash_show(struct kobject *kobj, struct attribute *attr,
930 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
932 struct root_squash_info *squash = &sbi->ll_squash;
934 return sprintf(buf, "%u:%u\n", squash->rsi_uid, squash->rsi_gid);
937 static ssize_t root_squash_store(struct kobject *kobj, struct attribute *attr,
938 const char *buffer, size_t count)
940 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
942 struct root_squash_info *squash = &sbi->ll_squash;
944 return lprocfs_wr_root_squash(buffer, count, squash,
945 ll_get_fsname(sbi->ll_sb, NULL, 0));
947 LUSTRE_RW_ATTR(root_squash);
949 static int ll_nosquash_nids_seq_show(struct seq_file *m, void *v)
951 struct super_block *sb = m->private;
952 struct ll_sb_info *sbi = ll_s2sbi(sb);
953 struct root_squash_info *squash = &sbi->ll_squash;
956 down_read(&squash->rsi_sem);
957 if (!list_empty(&squash->rsi_nosquash_nids)) {
958 len = cfs_print_nidlist(m->buf + m->count, m->size - m->count,
959 &squash->rsi_nosquash_nids);
963 seq_puts(m, "NONE\n");
965 up_read(&squash->rsi_sem);
970 static ssize_t ll_nosquash_nids_seq_write(struct file *file,
971 const char __user *buffer,
972 size_t count, loff_t *off)
974 struct seq_file *m = file->private_data;
975 struct super_block *sb = m->private;
976 struct ll_sb_info *sbi = ll_s2sbi(sb);
977 struct root_squash_info *squash = &sbi->ll_squash;
980 rc = lprocfs_wr_nosquash_nids(buffer, count, squash,
981 ll_get_fsname(sb, NULL, 0));
985 ll_compute_rootsquash_state(sbi);
990 LPROC_SEQ_FOPS(ll_nosquash_nids);
992 static struct lprocfs_vars lprocfs_llite_obd_vars[] = {
993 /* { "mntpt_path", ll_rd_path, 0, 0 }, */
994 { "site", &ll_site_stats_fops, NULL, 0 },
995 /* { "filegroups", lprocfs_rd_filegroups, 0, 0 }, */
996 { "max_cached_mb", &ll_max_cached_mb_fops, NULL },
997 { "statahead_stats", &ll_statahead_stats_fops, NULL, 0 },
998 { "sbi_flags", &ll_sbi_flags_fops, NULL, 0 },
999 { .name = "nosquash_nids",
1000 .fops = &ll_nosquash_nids_fops },
1004 #define MAX_STRING_SIZE 128
1006 static struct attribute *llite_attrs[] = {
1007 &lustre_attr_blocksize.attr,
1008 &lustre_attr_kbytestotal.attr,
1009 &lustre_attr_kbytesfree.attr,
1010 &lustre_attr_kbytesavail.attr,
1011 &lustre_attr_filestotal.attr,
1012 &lustre_attr_filesfree.attr,
1013 &lustre_attr_client_type.attr,
1014 &lustre_attr_fstype.attr,
1015 &lustre_attr_uuid.attr,
1016 &lustre_attr_max_read_ahead_mb.attr,
1017 &lustre_attr_max_read_ahead_per_file_mb.attr,
1018 &lustre_attr_max_read_ahead_whole_mb.attr,
1019 &lustre_attr_checksum_pages.attr,
1020 &lustre_attr_stats_track_pid.attr,
1021 &lustre_attr_stats_track_ppid.attr,
1022 &lustre_attr_stats_track_gid.attr,
1023 &lustre_attr_statahead_max.attr,
1024 &lustre_attr_statahead_agl.attr,
1025 &lustre_attr_lazystatfs.attr,
1026 &lustre_attr_max_easize.attr,
1027 &lustre_attr_default_easize.attr,
1028 &lustre_attr_xattr_cache.attr,
1029 &lustre_attr_unstable_stats.attr,
1030 &lustre_attr_root_squash.attr,
1034 static void llite_sb_release(struct kobject *kobj)
1036 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1038 complete(&sbi->ll_kobj_unregister);
1041 static struct kobj_type llite_ktype = {
1042 .default_attrs = llite_attrs,
1043 .sysfs_ops = &lustre_sysfs_ops,
1044 .release = llite_sb_release,
1047 static const struct llite_file_opcode {
1051 } llite_opcode_table[LPROC_LL_FILE_OPCODES] = {
1052 /* file operation */
1053 { LPROC_LL_DIRTY_HITS, LPROCFS_TYPE_REGS, "dirty_pages_hits" },
1054 { LPROC_LL_DIRTY_MISSES, LPROCFS_TYPE_REGS, "dirty_pages_misses" },
1055 { LPROC_LL_READ_BYTES, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES,
1057 { LPROC_LL_WRITE_BYTES, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES,
1059 { LPROC_LL_BRW_READ, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_PAGES,
1061 { LPROC_LL_BRW_WRITE, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_PAGES,
1063 { LPROC_LL_OSC_READ, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES,
1065 { LPROC_LL_OSC_WRITE, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES,
1067 { LPROC_LL_IOCTL, LPROCFS_TYPE_REGS, "ioctl" },
1068 { LPROC_LL_OPEN, LPROCFS_TYPE_REGS, "open" },
1069 { LPROC_LL_RELEASE, LPROCFS_TYPE_REGS, "close" },
1070 { LPROC_LL_MAP, LPROCFS_TYPE_REGS, "mmap" },
1071 { LPROC_LL_LLSEEK, LPROCFS_TYPE_REGS, "seek" },
1072 { LPROC_LL_FSYNC, LPROCFS_TYPE_REGS, "fsync" },
1073 { LPROC_LL_READDIR, LPROCFS_TYPE_REGS, "readdir" },
1074 /* inode operation */
1075 { LPROC_LL_SETATTR, LPROCFS_TYPE_REGS, "setattr" },
1076 { LPROC_LL_TRUNC, LPROCFS_TYPE_REGS, "truncate" },
1077 { LPROC_LL_FLOCK, LPROCFS_TYPE_REGS, "flock" },
1078 { LPROC_LL_GETATTR, LPROCFS_TYPE_REGS, "getattr" },
1079 /* dir inode operation */
1080 { LPROC_LL_CREATE, LPROCFS_TYPE_REGS, "create" },
1081 { LPROC_LL_LINK, LPROCFS_TYPE_REGS, "link" },
1082 { LPROC_LL_UNLINK, LPROCFS_TYPE_REGS, "unlink" },
1083 { LPROC_LL_SYMLINK, LPROCFS_TYPE_REGS, "symlink" },
1084 { LPROC_LL_MKDIR, LPROCFS_TYPE_REGS, "mkdir" },
1085 { LPROC_LL_RMDIR, LPROCFS_TYPE_REGS, "rmdir" },
1086 { LPROC_LL_MKNOD, LPROCFS_TYPE_REGS, "mknod" },
1087 { LPROC_LL_RENAME, LPROCFS_TYPE_REGS, "rename" },
1088 /* special inode operation */
1089 { LPROC_LL_STAFS, LPROCFS_TYPE_REGS, "statfs" },
1090 { LPROC_LL_ALLOC_INODE, LPROCFS_TYPE_REGS, "alloc_inode" },
1091 { LPROC_LL_SETXATTR, LPROCFS_TYPE_REGS, "setxattr" },
1092 { LPROC_LL_GETXATTR, LPROCFS_TYPE_REGS, "getxattr" },
1093 { LPROC_LL_GETXATTR_HITS, LPROCFS_TYPE_REGS, "getxattr_hits" },
1094 { LPROC_LL_LISTXATTR, LPROCFS_TYPE_REGS, "listxattr" },
1095 { LPROC_LL_REMOVEXATTR, LPROCFS_TYPE_REGS, "removexattr" },
1096 { LPROC_LL_INODE_PERM, LPROCFS_TYPE_REGS, "inode_permission" },
1099 void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count)
1103 if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
1104 lprocfs_counter_add(sbi->ll_stats, op, count);
1105 else if (sbi->ll_stats_track_type == STATS_TRACK_PID &&
1106 sbi->ll_stats_track_id == current->pid)
1107 lprocfs_counter_add(sbi->ll_stats, op, count);
1108 else if (sbi->ll_stats_track_type == STATS_TRACK_PPID &&
1109 sbi->ll_stats_track_id == current->real_parent->pid)
1110 lprocfs_counter_add(sbi->ll_stats, op, count);
1111 else if (sbi->ll_stats_track_type == STATS_TRACK_GID &&
1112 sbi->ll_stats_track_id ==
1113 from_kgid(&init_user_ns, current_gid()))
1114 lprocfs_counter_add(sbi->ll_stats, op, count);
1116 EXPORT_SYMBOL(ll_stats_ops_tally);
1118 static const char *ra_stat_string[] = {
1119 [RA_STAT_HIT] = "hits",
1120 [RA_STAT_MISS] = "misses",
1121 [RA_STAT_DISTANT_READPAGE] = "readpage not consecutive",
1122 [RA_STAT_MISS_IN_WINDOW] = "miss inside window",
1123 [RA_STAT_FAILED_GRAB_PAGE] = "failed grab_cache_page",
1124 [RA_STAT_FAILED_MATCH] = "failed lock match",
1125 [RA_STAT_DISCARDED] = "read but discarded",
1126 [RA_STAT_ZERO_LEN] = "zero length file",
1127 [RA_STAT_ZERO_WINDOW] = "zero size window",
1128 [RA_STAT_EOF] = "read-ahead to EOF",
1129 [RA_STAT_MAX_IN_FLIGHT] = "hit max r-a issue",
1130 [RA_STAT_WRONG_GRAB_PAGE] = "wrong page from grab_cache_page",
1131 [RA_STAT_FAILED_REACH_END] = "failed to reach end"
1134 int ldebugfs_register_mountpoint(struct dentry *parent,
1135 struct super_block *sb, char *osc, char *mdc)
1137 struct lustre_sb_info *lsi = s2lsi(sb);
1138 struct ll_sb_info *sbi = ll_s2sbi(sb);
1139 struct obd_device *obd;
1141 char name[MAX_STRING_SIZE + 1], *ptr;
1142 int err, id, len, rc;
1144 name[MAX_STRING_SIZE] = '\0';
1151 len = strlen(lsi->lsi_lmd->lmd_profile);
1152 ptr = strrchr(lsi->lsi_lmd->lmd_profile, '-');
1153 if (ptr && (strcmp(ptr, "-client") == 0))
1157 snprintf(name, MAX_STRING_SIZE, "%.*s-%p", len,
1158 lsi->lsi_lmd->lmd_profile, sb);
1160 dir = ldebugfs_register(name, parent, NULL, NULL);
1161 if (IS_ERR_OR_NULL(dir)) {
1162 err = dir ? PTR_ERR(dir) : -ENOMEM;
1163 sbi->ll_debugfs_entry = NULL;
1166 sbi->ll_debugfs_entry = dir;
1168 rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "dump_page_cache", 0444,
1169 &vvp_dump_pgcache_file_ops, sbi);
1171 CWARN("Error adding the dump_page_cache file\n");
1173 rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "extents_stats", 0644,
1174 &ll_rw_extents_stats_fops, sbi);
1176 CWARN("Error adding the extent_stats file\n");
1178 rc = ldebugfs_seq_create(sbi->ll_debugfs_entry,
1179 "extents_stats_per_process",
1180 0644, &ll_rw_extents_stats_pp_fops, sbi);
1182 CWARN("Error adding the extents_stats_per_process file\n");
1184 rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "offset_stats", 0644,
1185 &ll_rw_offset_stats_fops, sbi);
1187 CWARN("Error adding the offset_stats file\n");
1189 /* File operations stats */
1190 sbi->ll_stats = lprocfs_alloc_stats(LPROC_LL_FILE_OPCODES,
1191 LPROCFS_STATS_FLAG_NONE);
1192 if (!sbi->ll_stats) {
1196 /* do counter init */
1197 for (id = 0; id < LPROC_LL_FILE_OPCODES; id++) {
1198 __u32 type = llite_opcode_table[id].type;
1201 if (type & LPROCFS_TYPE_REGS)
1203 else if (type & LPROCFS_TYPE_BYTES)
1205 else if (type & LPROCFS_TYPE_PAGES)
1207 lprocfs_counter_init(sbi->ll_stats,
1208 llite_opcode_table[id].opcode,
1209 (type & LPROCFS_CNTR_AVGMINMAX),
1210 llite_opcode_table[id].opname, ptr);
1212 err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "stats",
1217 sbi->ll_ra_stats = lprocfs_alloc_stats(ARRAY_SIZE(ra_stat_string),
1218 LPROCFS_STATS_FLAG_NONE);
1219 if (!sbi->ll_ra_stats) {
1224 for (id = 0; id < ARRAY_SIZE(ra_stat_string); id++)
1225 lprocfs_counter_init(sbi->ll_ra_stats, id, 0,
1226 ra_stat_string[id], "pages");
1228 err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "read_ahead_stats",
1233 err = ldebugfs_add_vars(sbi->ll_debugfs_entry,
1234 lprocfs_llite_obd_vars, sb);
1238 sbi->ll_kobj.kset = llite_kset;
1239 init_completion(&sbi->ll_kobj_unregister);
1240 err = kobject_init_and_add(&sbi->ll_kobj, &llite_ktype, NULL,
1246 obd = class_name2obd(mdc);
1248 err = sysfs_create_link(&sbi->ll_kobj, &obd->obd_kobj,
1249 obd->obd_type->typ_name);
1254 obd = class_name2obd(osc);
1256 err = sysfs_create_link(&sbi->ll_kobj, &obd->obd_kobj,
1257 obd->obd_type->typ_name);
1260 ldebugfs_remove(&sbi->ll_debugfs_entry);
1261 lprocfs_free_stats(&sbi->ll_ra_stats);
1262 lprocfs_free_stats(&sbi->ll_stats);
1267 void ldebugfs_unregister_mountpoint(struct ll_sb_info *sbi)
1269 if (sbi->ll_debugfs_entry) {
1270 ldebugfs_remove(&sbi->ll_debugfs_entry);
1271 kobject_put(&sbi->ll_kobj);
1272 wait_for_completion(&sbi->ll_kobj_unregister);
1273 lprocfs_free_stats(&sbi->ll_ra_stats);
1274 lprocfs_free_stats(&sbi->ll_stats);
1278 #undef MAX_STRING_SIZE
1280 #define pct(a, b) (b ? a * 100 / b : 0)
1282 static void ll_display_extents_info(struct ll_rw_extents_info *io_extents,
1283 struct seq_file *seq, int which)
1285 unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
1286 unsigned long start, end, r, w;
1287 char *unitp = "KMGTPEZY";
1289 struct per_process_info *pp_info = &io_extents->pp_extents[which];
1295 for (i = 0; i < LL_HIST_MAX; i++) {
1296 read_tot += pp_info->pp_r_hist.oh_buckets[i];
1297 write_tot += pp_info->pp_w_hist.oh_buckets[i];
1300 for (i = 0; i < LL_HIST_MAX; i++) {
1301 r = pp_info->pp_r_hist.oh_buckets[i];
1302 w = pp_info->pp_w_hist.oh_buckets[i];
1305 end = 1 << (i + LL_HIST_START - units);
1306 seq_printf(seq, "%4lu%c - %4lu%c%c: %14lu %4lu %4lu | %14lu %4lu %4lu\n",
1307 start, *unitp, end, *unitp,
1308 (i == LL_HIST_MAX - 1) ? '+' : ' ',
1309 r, pct(r, read_tot), pct(read_cum, read_tot),
1310 w, pct(w, write_tot), pct(write_cum, write_tot));
1312 if (start == 1 << 10) {
1317 if (read_cum == read_tot && write_cum == write_tot)
1322 static int ll_rw_extents_stats_pp_seq_show(struct seq_file *seq, void *v)
1324 struct timespec64 now;
1325 struct ll_sb_info *sbi = seq->private;
1326 struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1329 ktime_get_real_ts64(&now);
1331 if (!sbi->ll_rw_stats_on) {
1332 seq_printf(seq, "disabled\n"
1333 "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n");
1336 seq_printf(seq, "snapshot_time: %llu.%09lu (secs.usecs)\n",
1337 (s64)now.tv_sec, (unsigned long)now.tv_nsec);
1338 seq_printf(seq, "%15s %19s | %20s\n", " ", "read", "write");
1339 seq_printf(seq, "%13s %14s %4s %4s | %14s %4s %4s\n",
1340 "extents", "calls", "%", "cum%",
1341 "calls", "%", "cum%");
1342 spin_lock(&sbi->ll_pp_extent_lock);
1343 for (k = 0; k < LL_PROCESS_HIST_MAX; k++) {
1344 if (io_extents->pp_extents[k].pid != 0) {
1345 seq_printf(seq, "\nPID: %d\n",
1346 io_extents->pp_extents[k].pid);
1347 ll_display_extents_info(io_extents, seq, k);
1350 spin_unlock(&sbi->ll_pp_extent_lock);
1354 static ssize_t ll_rw_extents_stats_pp_seq_write(struct file *file,
1355 const char __user *buf,
1359 struct seq_file *seq = file->private_data;
1360 struct ll_sb_info *sbi = seq->private;
1361 struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1363 int value = 1, rc = 0;
1368 rc = lprocfs_write_helper(buf, len, &value);
1369 if (rc < 0 && len < 16) {
1372 if (copy_from_user(kernbuf, buf, len))
1376 if (kernbuf[len - 1] == '\n')
1377 kernbuf[len - 1] = 0;
1379 if (strcmp(kernbuf, "disabled") == 0 ||
1380 strcmp(kernbuf, "Disabled") == 0)
1385 sbi->ll_rw_stats_on = 0;
1387 sbi->ll_rw_stats_on = 1;
1389 spin_lock(&sbi->ll_pp_extent_lock);
1390 for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1391 io_extents->pp_extents[i].pid = 0;
1392 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
1393 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
1395 spin_unlock(&sbi->ll_pp_extent_lock);
1399 LPROC_SEQ_FOPS(ll_rw_extents_stats_pp);
1401 static int ll_rw_extents_stats_seq_show(struct seq_file *seq, void *v)
1403 struct timespec64 now;
1404 struct ll_sb_info *sbi = seq->private;
1405 struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1407 ktime_get_real_ts64(&now);
1409 if (!sbi->ll_rw_stats_on) {
1410 seq_printf(seq, "disabled\n"
1411 "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n");
1414 seq_printf(seq, "snapshot_time: %llu.%09lu (secs.usecs)\n",
1415 (u64)now.tv_sec, (unsigned long)now.tv_nsec);
1417 seq_printf(seq, "%15s %19s | %20s\n", " ", "read", "write");
1418 seq_printf(seq, "%13s %14s %4s %4s | %14s %4s %4s\n",
1419 "extents", "calls", "%", "cum%",
1420 "calls", "%", "cum%");
1421 spin_lock(&sbi->ll_lock);
1422 ll_display_extents_info(io_extents, seq, LL_PROCESS_HIST_MAX);
1423 spin_unlock(&sbi->ll_lock);
1428 static ssize_t ll_rw_extents_stats_seq_write(struct file *file,
1429 const char __user *buf,
1430 size_t len, loff_t *off)
1432 struct seq_file *seq = file->private_data;
1433 struct ll_sb_info *sbi = seq->private;
1434 struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1436 int value = 1, rc = 0;
1441 rc = lprocfs_write_helper(buf, len, &value);
1442 if (rc < 0 && len < 16) {
1445 if (copy_from_user(kernbuf, buf, len))
1449 if (kernbuf[len - 1] == '\n')
1450 kernbuf[len - 1] = 0;
1452 if (strcmp(kernbuf, "disabled") == 0 ||
1453 strcmp(kernbuf, "Disabled") == 0)
1458 sbi->ll_rw_stats_on = 0;
1460 sbi->ll_rw_stats_on = 1;
1462 spin_lock(&sbi->ll_pp_extent_lock);
1463 for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
1464 io_extents->pp_extents[i].pid = 0;
1465 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
1466 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
1468 spin_unlock(&sbi->ll_pp_extent_lock);
1473 LPROC_SEQ_FOPS(ll_rw_extents_stats);
1475 void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
1476 struct ll_file_data *file, loff_t pos,
1477 size_t count, int rw)
1480 struct ll_rw_process_info *process;
1481 struct ll_rw_process_info *offset;
1482 int *off_count = &sbi->ll_rw_offset_entry_count;
1483 int *process_count = &sbi->ll_offset_process_count;
1484 struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1486 if (!sbi->ll_rw_stats_on)
1488 process = sbi->ll_rw_process_info;
1489 offset = sbi->ll_rw_offset_info;
1491 spin_lock(&sbi->ll_pp_extent_lock);
1492 /* Extent statistics */
1493 for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1494 if (io_extents->pp_extents[i].pid == pid) {
1502 sbi->ll_extent_process_count =
1503 (sbi->ll_extent_process_count + 1) % LL_PROCESS_HIST_MAX;
1504 cur = sbi->ll_extent_process_count;
1505 io_extents->pp_extents[cur].pid = pid;
1506 lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_r_hist);
1507 lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_w_hist);
1510 for (i = 0; (count >= (1 << LL_HIST_START << i)) &&
1511 (i < (LL_HIST_MAX - 1)); i++)
1514 io_extents->pp_extents[cur].pp_r_hist.oh_buckets[i]++;
1515 io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_r_hist.oh_buckets[i]++;
1517 io_extents->pp_extents[cur].pp_w_hist.oh_buckets[i]++;
1518 io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_w_hist.oh_buckets[i]++;
1520 spin_unlock(&sbi->ll_pp_extent_lock);
1522 spin_lock(&sbi->ll_process_lock);
1523 /* Offset statistics */
1524 for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1525 if (process[i].rw_pid == pid) {
1526 if (process[i].rw_last_file != file) {
1527 process[i].rw_range_start = pos;
1528 process[i].rw_last_file_pos = pos + count;
1529 process[i].rw_smallest_extent = count;
1530 process[i].rw_largest_extent = count;
1531 process[i].rw_offset = 0;
1532 process[i].rw_last_file = file;
1533 spin_unlock(&sbi->ll_process_lock);
1536 if (process[i].rw_last_file_pos != pos) {
1538 (*off_count + 1) % LL_OFFSET_HIST_MAX;
1539 offset[*off_count].rw_op = process[i].rw_op;
1540 offset[*off_count].rw_pid = pid;
1541 offset[*off_count].rw_range_start =
1542 process[i].rw_range_start;
1543 offset[*off_count].rw_range_end =
1544 process[i].rw_last_file_pos;
1545 offset[*off_count].rw_smallest_extent =
1546 process[i].rw_smallest_extent;
1547 offset[*off_count].rw_largest_extent =
1548 process[i].rw_largest_extent;
1549 offset[*off_count].rw_offset =
1550 process[i].rw_offset;
1551 process[i].rw_op = rw;
1552 process[i].rw_range_start = pos;
1553 process[i].rw_smallest_extent = count;
1554 process[i].rw_largest_extent = count;
1555 process[i].rw_offset = pos -
1556 process[i].rw_last_file_pos;
1558 if (process[i].rw_smallest_extent > count)
1559 process[i].rw_smallest_extent = count;
1560 if (process[i].rw_largest_extent < count)
1561 process[i].rw_largest_extent = count;
1562 process[i].rw_last_file_pos = pos + count;
1563 spin_unlock(&sbi->ll_process_lock);
1567 *process_count = (*process_count + 1) % LL_PROCESS_HIST_MAX;
1568 process[*process_count].rw_pid = pid;
1569 process[*process_count].rw_op = rw;
1570 process[*process_count].rw_range_start = pos;
1571 process[*process_count].rw_last_file_pos = pos + count;
1572 process[*process_count].rw_smallest_extent = count;
1573 process[*process_count].rw_largest_extent = count;
1574 process[*process_count].rw_offset = 0;
1575 process[*process_count].rw_last_file = file;
1576 spin_unlock(&sbi->ll_process_lock);
1579 static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
1581 struct timespec64 now;
1582 struct ll_sb_info *sbi = seq->private;
1583 struct ll_rw_process_info *offset = sbi->ll_rw_offset_info;
1584 struct ll_rw_process_info *process = sbi->ll_rw_process_info;
1587 ktime_get_real_ts64(&now);
1589 if (!sbi->ll_rw_stats_on) {
1590 seq_printf(seq, "disabled\n"
1591 "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n");
1594 spin_lock(&sbi->ll_process_lock);
1596 seq_printf(seq, "snapshot_time: %llu.%09lu (secs.usecs)\n",
1597 (s64)now.tv_sec, (unsigned long)now.tv_nsec);
1598 seq_printf(seq, "%3s %10s %14s %14s %17s %17s %14s\n",
1599 "R/W", "PID", "RANGE START", "RANGE END",
1600 "SMALLEST EXTENT", "LARGEST EXTENT", "OFFSET");
1601 /* We stored the discontiguous offsets here; print them first */
1602 for (i = 0; i < LL_OFFSET_HIST_MAX; i++) {
1603 if (offset[i].rw_pid != 0)
1605 "%3c %10d %14Lu %14Lu %17lu %17lu %14Lu",
1606 offset[i].rw_op == READ ? 'R' : 'W',
1608 offset[i].rw_range_start,
1609 offset[i].rw_range_end,
1610 (unsigned long)offset[i].rw_smallest_extent,
1611 (unsigned long)offset[i].rw_largest_extent,
1612 offset[i].rw_offset);
1614 /* Then print the current offsets for each process */
1615 for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1616 if (process[i].rw_pid != 0)
1618 "%3c %10d %14Lu %14Lu %17lu %17lu %14Lu",
1619 process[i].rw_op == READ ? 'R' : 'W',
1621 process[i].rw_range_start,
1622 process[i].rw_last_file_pos,
1623 (unsigned long)process[i].rw_smallest_extent,
1624 (unsigned long)process[i].rw_largest_extent,
1625 process[i].rw_offset);
1627 spin_unlock(&sbi->ll_process_lock);
1632 static ssize_t ll_rw_offset_stats_seq_write(struct file *file,
1633 const char __user *buf,
1634 size_t len, loff_t *off)
1636 struct seq_file *seq = file->private_data;
1637 struct ll_sb_info *sbi = seq->private;
1638 struct ll_rw_process_info *process_info = sbi->ll_rw_process_info;
1639 struct ll_rw_process_info *offset_info = sbi->ll_rw_offset_info;
1640 int value = 1, rc = 0;
1645 rc = lprocfs_write_helper(buf, len, &value);
1647 if (rc < 0 && len < 16) {
1650 if (copy_from_user(kernbuf, buf, len))
1654 if (kernbuf[len - 1] == '\n')
1655 kernbuf[len - 1] = 0;
1657 if (strcmp(kernbuf, "disabled") == 0 ||
1658 strcmp(kernbuf, "Disabled") == 0)
1663 sbi->ll_rw_stats_on = 0;
1665 sbi->ll_rw_stats_on = 1;
1667 spin_lock(&sbi->ll_process_lock);
1668 sbi->ll_offset_process_count = 0;
1669 sbi->ll_rw_offset_entry_count = 0;
1670 memset(process_info, 0, sizeof(struct ll_rw_process_info) *
1671 LL_PROCESS_HIST_MAX);
1672 memset(offset_info, 0, sizeof(struct ll_rw_process_info) *
1673 LL_OFFSET_HIST_MAX);
1674 spin_unlock(&sbi->ll_process_lock);
1679 LPROC_SEQ_FOPS(ll_rw_offset_stats);
1681 void lprocfs_llite_init_vars(struct lprocfs_static_vars *lvars)
1683 lvars->obd_vars = lprocfs_llite_obd_vars;