LCOV - code coverage report
Current view: top level - drivers/base - cacheinfo.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 81 213 38.0 %
Date: 2021-04-22 12:43:58 Functions: 10 29 34.5 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * cacheinfo support - processor cache information via sysfs
       4             :  *
       5             :  * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
       6             :  * Author: Sudeep Holla <sudeep.holla@arm.com>
       7             :  */
       8             : #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
       9             : 
      10             : #include <linux/acpi.h>
      11             : #include <linux/bitops.h>
      12             : #include <linux/cacheinfo.h>
      13             : #include <linux/compiler.h>
      14             : #include <linux/cpu.h>
      15             : #include <linux/device.h>
      16             : #include <linux/init.h>
      17             : #include <linux/of.h>
      18             : #include <linux/sched.h>
      19             : #include <linux/slab.h>
      20             : #include <linux/smp.h>
      21             : #include <linux/sysfs.h>
      22             : 
      23             : /* pointer to per cpu cacheinfo */
      24             : static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
      25             : #define ci_cacheinfo(cpu)       (&per_cpu(ci_cpu_cacheinfo, cpu))
      26             : #define cache_leaves(cpu)       (ci_cacheinfo(cpu)->num_leaves)
      27             : #define per_cpu_cacheinfo(cpu)  (ci_cacheinfo(cpu)->info_list)
      28             : 
      29          32 : struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
      30             : {
      31          32 :         return ci_cacheinfo(cpu);
      32             : }
      33             : 
      34             : #ifdef CONFIG_OF
      35             : static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
      36             :                                            struct cacheinfo *sib_leaf)
      37             : {
      38             :         return sib_leaf->fw_token == this_leaf->fw_token;
      39             : }
      40             : 
      41             : /* OF properties to query for a given cache type */
      42             : struct cache_type_info {
      43             :         const char *size_prop;
      44             :         const char *line_size_props[2];
      45             :         const char *nr_sets_prop;
      46             : };
      47             : 
      48             : static const struct cache_type_info cache_type_info[] = {
      49             :         {
      50             :                 .size_prop       = "cache-size",
      51             :                 .line_size_props = { "cache-line-size",
      52             :                                      "cache-block-size", },
      53             :                 .nr_sets_prop    = "cache-sets",
      54             :         }, {
      55             :                 .size_prop       = "i-cache-size",
      56             :                 .line_size_props = { "i-cache-line-size",
      57             :                                      "i-cache-block-size", },
      58             :                 .nr_sets_prop    = "i-cache-sets",
      59             :         }, {
      60             :                 .size_prop       = "d-cache-size",
      61             :                 .line_size_props = { "d-cache-line-size",
      62             :                                      "d-cache-block-size", },
      63             :                 .nr_sets_prop    = "d-cache-sets",
      64             :         },
      65             : };
      66             : 
      67             : static inline int get_cacheinfo_idx(enum cache_type type)
      68             : {
      69             :         if (type == CACHE_TYPE_UNIFIED)
      70             :                 return 0;
      71             :         return type;
      72             : }
      73             : 
      74             : static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
      75             : {
      76             :         const char *propname;
      77             :         int ct_idx;
      78             : 
      79             :         ct_idx = get_cacheinfo_idx(this_leaf->type);
      80             :         propname = cache_type_info[ct_idx].size_prop;
      81             : 
      82             :         of_property_read_u32(np, propname, &this_leaf->size);
      83             : }
      84             : 
      85             : /* not cache_line_size() because that's a macro in include/linux/cache.h */
      86             : static void cache_get_line_size(struct cacheinfo *this_leaf,
      87             :                                 struct device_node *np)
      88             : {
      89             :         int i, lim, ct_idx;
      90             : 
      91             :         ct_idx = get_cacheinfo_idx(this_leaf->type);
      92             :         lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
      93             : 
      94             :         for (i = 0; i < lim; i++) {
      95             :                 int ret;
      96             :                 u32 line_size;
      97             :                 const char *propname;
      98             : 
      99             :                 propname = cache_type_info[ct_idx].line_size_props[i];
     100             :                 ret = of_property_read_u32(np, propname, &line_size);
     101             :                 if (!ret) {
     102             :                         this_leaf->coherency_line_size = line_size;
     103             :                         break;
     104             :                 }
     105             :         }
     106             : }
     107             : 
     108             : static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
     109             : {
     110             :         const char *propname;
     111             :         int ct_idx;
     112             : 
     113             :         ct_idx = get_cacheinfo_idx(this_leaf->type);
     114             :         propname = cache_type_info[ct_idx].nr_sets_prop;
     115             : 
     116             :         of_property_read_u32(np, propname, &this_leaf->number_of_sets);
     117             : }
     118             : 
     119             : static void cache_associativity(struct cacheinfo *this_leaf)
     120             : {
     121             :         unsigned int line_size = this_leaf->coherency_line_size;
     122             :         unsigned int nr_sets = this_leaf->number_of_sets;
     123             :         unsigned int size = this_leaf->size;
     124             : 
     125             :         /*
     126             :          * If the cache is fully associative, there is no need to
     127             :          * check the other properties.
     128             :          */
     129             :         if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
     130             :                 this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
     131             : }
     132             : 
     133             : static bool cache_node_is_unified(struct cacheinfo *this_leaf,
     134             :                                   struct device_node *np)
     135             : {
     136             :         return of_property_read_bool(np, "cache-unified");
     137             : }
     138             : 
     139             : static void cache_of_set_props(struct cacheinfo *this_leaf,
     140             :                                struct device_node *np)
     141             : {
     142             :         /*
     143             :          * init_cache_level must setup the cache level correctly
     144             :          * overriding the architecturally specified levels, so
     145             :          * if type is NONE at this stage, it should be unified
     146             :          */
     147             :         if (this_leaf->type == CACHE_TYPE_NOCACHE &&
     148             :             cache_node_is_unified(this_leaf, np))
     149             :                 this_leaf->type = CACHE_TYPE_UNIFIED;
     150             :         cache_size(this_leaf, np);
     151             :         cache_get_line_size(this_leaf, np);
     152             :         cache_nr_sets(this_leaf, np);
     153             :         cache_associativity(this_leaf);
     154             : }
     155             : 
     156             : static int cache_setup_of_node(unsigned int cpu)
     157             : {
     158             :         struct device_node *np;
     159             :         struct cacheinfo *this_leaf;
     160             :         struct device *cpu_dev = get_cpu_device(cpu);
     161             :         struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
     162             :         unsigned int index = 0;
     163             : 
     164             :         /* skip if fw_token is already populated */
     165             :         if (this_cpu_ci->info_list->fw_token) {
     166             :                 return 0;
     167             :         }
     168             : 
     169             :         if (!cpu_dev) {
     170             :                 pr_err("No cpu device for CPU %d\n", cpu);
     171             :                 return -ENODEV;
     172             :         }
     173             :         np = cpu_dev->of_node;
     174             :         if (!np) {
     175             :                 pr_err("Failed to find cpu%d device node\n", cpu);
     176             :                 return -ENOENT;
     177             :         }
     178             : 
     179             :         while (index < cache_leaves(cpu)) {
     180             :                 this_leaf = this_cpu_ci->info_list + index;
     181             :                 if (this_leaf->level != 1)
     182             :                         np = of_find_next_cache_node(np);
     183             :                 else
     184             :                         np = of_node_get(np);/* cpu node itself */
     185             :                 if (!np)
     186             :                         break;
     187             :                 cache_of_set_props(this_leaf, np);
     188             :                 this_leaf->fw_token = np;
     189             :                 index++;
     190             :         }
     191             : 
     192             :         if (index != cache_leaves(cpu)) /* not all OF nodes populated */
     193             :                 return -ENOENT;
     194             : 
     195             :         return 0;
     196             : }
     197             : #else
     198             : static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
     199           0 : static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
     200             :                                            struct cacheinfo *sib_leaf)
     201             : {
     202             :         /*
     203             :          * For non-DT/ACPI systems, assume unique level 1 caches, system-wide
     204             :          * shared caches for all other levels. This will be used only if
     205             :          * arch specific code has not populated shared_cpu_map
     206             :          */
     207           0 :         return !(this_leaf->level == 1);
     208             : }
     209             : #endif
     210             : 
     211           0 : int __weak cache_setup_acpi(unsigned int cpu)
     212             : {
     213           0 :         return -ENOTSUPP;
     214             : }
     215             : 
     216             : unsigned int coherency_max_size;
     217             : 
     218           4 : static int cache_shared_cpu_map_setup(unsigned int cpu)
     219             : {
     220           4 :         struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
     221           4 :         struct cacheinfo *this_leaf, *sib_leaf;
     222           4 :         unsigned int index;
     223           4 :         int ret = 0;
     224             : 
     225           4 :         if (this_cpu_ci->cpu_map_populated)
     226             :                 return 0;
     227             : 
     228             :         if (of_have_populated_dt())
     229             :                 ret = cache_setup_of_node(cpu);
     230             :         else if (!acpi_disabled)
     231             :                 ret = cache_setup_acpi(cpu);
     232             : 
     233             :         if (ret)
     234             :                 return ret;
     235             : 
     236           0 :         for (index = 0; index < cache_leaves(cpu); index++) {
     237           0 :                 unsigned int i;
     238             : 
     239           0 :                 this_leaf = this_cpu_ci->info_list + index;
     240             :                 /* skip if shared_cpu_map is already populated */
     241           0 :                 if (!cpumask_empty(&this_leaf->shared_cpu_map))
     242           0 :                         continue;
     243             : 
     244           0 :                 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
     245           0 :                 for_each_online_cpu(i) {
     246           0 :                         struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
     247             : 
     248           0 :                         if (i == cpu || !sib_cpu_ci->info_list)
     249           0 :                                 continue;/* skip if itself or no cacheinfo */
     250           0 :                         sib_leaf = sib_cpu_ci->info_list + index;
     251           0 :                         if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
     252           0 :                                 cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
     253           0 :                                 cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
     254             :                         }
     255             :                 }
     256             :                 /* record the maximum cache line size */
     257           0 :                 if (this_leaf->coherency_line_size > coherency_max_size)
     258           0 :                         coherency_max_size = this_leaf->coherency_line_size;
     259             :         }
     260             : 
     261             :         return 0;
     262             : }
     263             : 
     264           0 : static void cache_shared_cpu_map_remove(unsigned int cpu)
     265             : {
     266           0 :         struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
     267           0 :         struct cacheinfo *this_leaf, *sib_leaf;
     268           0 :         unsigned int sibling, index;
     269             : 
     270           0 :         for (index = 0; index < cache_leaves(cpu); index++) {
     271           0 :                 this_leaf = this_cpu_ci->info_list + index;
     272           0 :                 for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
     273           0 :                         struct cpu_cacheinfo *sib_cpu_ci;
     274             : 
     275           0 :                         if (sibling == cpu) /* skip itself */
     276           0 :                                 continue;
     277             : 
     278           0 :                         sib_cpu_ci = get_cpu_cacheinfo(sibling);
     279           0 :                         if (!sib_cpu_ci->info_list)
     280           0 :                                 continue;
     281             : 
     282           0 :                         sib_leaf = sib_cpu_ci->info_list + index;
     283           0 :                         cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
     284           0 :                         cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
     285             :                 }
     286           0 :                 if (of_have_populated_dt())
     287           0 :                         of_node_put(this_leaf->fw_token);
     288             :         }
     289           0 : }
     290             : 
     291           0 : static void free_cache_attributes(unsigned int cpu)
     292             : {
     293           0 :         if (!per_cpu_cacheinfo(cpu))
     294             :                 return;
     295             : 
     296           0 :         cache_shared_cpu_map_remove(cpu);
     297             : 
     298           0 :         kfree(per_cpu_cacheinfo(cpu));
     299           0 :         per_cpu_cacheinfo(cpu) = NULL;
     300             : }
     301             : 
     302           0 : int __weak init_cache_level(unsigned int cpu)
     303             : {
     304           0 :         return -ENOENT;
     305             : }
     306             : 
     307           0 : int __weak populate_cache_leaves(unsigned int cpu)
     308             : {
     309           0 :         return -ENOENT;
     310             : }
     311             : 
     312           4 : static int detect_cache_attributes(unsigned int cpu)
     313             : {
     314           4 :         int ret;
     315             : 
     316           4 :         if (init_cache_level(cpu) || !cache_leaves(cpu))
     317             :                 return -ENOENT;
     318             : 
     319           4 :         per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
     320             :                                          sizeof(struct cacheinfo), GFP_KERNEL);
     321           4 :         if (per_cpu_cacheinfo(cpu) == NULL)
     322             :                 return -ENOMEM;
     323             : 
     324             :         /*
     325             :          * populate_cache_leaves() may completely setup the cache leaves and
     326             :          * shared_cpu_map or it may leave it partially setup.
     327             :          */
     328           4 :         ret = populate_cache_leaves(cpu);
     329           4 :         if (ret)
     330           0 :                 goto free_ci;
     331             :         /*
     332             :          * For systems using DT for cache hierarchy, fw_token
     333             :          * and shared_cpu_map will be set up here only if they are
     334             :          * not populated already
     335             :          */
     336           4 :         ret = cache_shared_cpu_map_setup(cpu);
     337           4 :         if (ret) {
     338           0 :                 pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
     339           0 :                 goto free_ci;
     340             :         }
     341             : 
     342             :         return 0;
     343             : 
     344           0 : free_ci:
     345           0 :         free_cache_attributes(cpu);
     346           0 :         return ret;
     347             : }
     348             : 
     349             : /* pointer to cpuX/cache device */
     350             : static DEFINE_PER_CPU(struct device *, ci_cache_dev);
     351             : #define per_cpu_cache_dev(cpu)  (per_cpu(ci_cache_dev, cpu))
     352             : 
     353             : static cpumask_t cache_dev_map;
     354             : 
     355             : /* pointer to array of devices for cpuX/cache/indexY */
     356             : static DEFINE_PER_CPU(struct device **, ci_index_dev);
     357             : #define per_cpu_index_dev(cpu)  (per_cpu(ci_index_dev, cpu))
     358             : #define per_cache_index_dev(cpu, idx)   ((per_cpu_index_dev(cpu))[idx])
     359             : 
     360             : #define show_one(file_name, object)                             \
     361             : static ssize_t file_name##_show(struct device *dev,             \
     362             :                 struct device_attribute *attr, char *buf)       \
     363             : {                                                               \
     364             :         struct cacheinfo *this_leaf = dev_get_drvdata(dev);     \
     365             :         return sysfs_emit(buf, "%u\n", this_leaf->object); \
     366             : }
     367             : 
     368           0 : show_one(id, id);
     369           0 : show_one(level, level);
     370           0 : show_one(coherency_line_size, coherency_line_size);
     371           0 : show_one(number_of_sets, number_of_sets);
     372           0 : show_one(physical_line_partition, physical_line_partition);
     373           0 : show_one(ways_of_associativity, ways_of_associativity);
     374             : 
     375           0 : static ssize_t size_show(struct device *dev,
     376             :                          struct device_attribute *attr, char *buf)
     377             : {
     378           0 :         struct cacheinfo *this_leaf = dev_get_drvdata(dev);
     379             : 
     380           0 :         return sysfs_emit(buf, "%uK\n", this_leaf->size >> 10);
     381             : }
     382             : 
     383           0 : static ssize_t shared_cpu_map_show(struct device *dev,
     384             :                                    struct device_attribute *attr, char *buf)
     385             : {
     386           0 :         struct cacheinfo *this_leaf = dev_get_drvdata(dev);
     387           0 :         const struct cpumask *mask = &this_leaf->shared_cpu_map;
     388             : 
     389           0 :         return sysfs_emit(buf, "%*pb\n", nr_cpu_ids, mask);
     390             : }
     391             : 
     392           0 : static ssize_t shared_cpu_list_show(struct device *dev,
     393             :                                     struct device_attribute *attr, char *buf)
     394             : {
     395           0 :         struct cacheinfo *this_leaf = dev_get_drvdata(dev);
     396           0 :         const struct cpumask *mask = &this_leaf->shared_cpu_map;
     397             : 
     398           0 :         return sysfs_emit(buf, "%*pbl\n", nr_cpu_ids, mask);
     399             : }
     400             : 
     401           0 : static ssize_t type_show(struct device *dev,
     402             :                          struct device_attribute *attr, char *buf)
     403             : {
     404           0 :         struct cacheinfo *this_leaf = dev_get_drvdata(dev);
     405           0 :         const char *output;
     406             : 
     407           0 :         switch (this_leaf->type) {
     408             :         case CACHE_TYPE_DATA:
     409             :                 output = "Data";
     410             :                 break;
     411           0 :         case CACHE_TYPE_INST:
     412           0 :                 output = "Instruction";
     413           0 :                 break;
     414           0 :         case CACHE_TYPE_UNIFIED:
     415           0 :                 output = "Unified";
     416           0 :                 break;
     417             :         default:
     418             :                 return -EINVAL;
     419             :         }
     420             : 
     421           0 :         return sysfs_emit(buf, "%s\n", output);
     422             : }
     423             : 
     424           0 : static ssize_t allocation_policy_show(struct device *dev,
     425             :                                       struct device_attribute *attr, char *buf)
     426             : {
     427           0 :         struct cacheinfo *this_leaf = dev_get_drvdata(dev);
     428           0 :         unsigned int ci_attr = this_leaf->attributes;
     429           0 :         const char *output;
     430             : 
     431           0 :         if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
     432             :                 output = "ReadWriteAllocate";
     433           0 :         else if (ci_attr & CACHE_READ_ALLOCATE)
     434             :                 output = "ReadAllocate";
     435           0 :         else if (ci_attr & CACHE_WRITE_ALLOCATE)
     436             :                 output = "WriteAllocate";
     437             :         else
     438             :                 return 0;
     439             : 
     440           0 :         return sysfs_emit(buf, "%s\n", output);
     441             : }
     442             : 
     443           0 : static ssize_t write_policy_show(struct device *dev,
     444             :                                  struct device_attribute *attr, char *buf)
     445             : {
     446           0 :         struct cacheinfo *this_leaf = dev_get_drvdata(dev);
     447           0 :         unsigned int ci_attr = this_leaf->attributes;
     448           0 :         int n = 0;
     449             : 
     450           0 :         if (ci_attr & CACHE_WRITE_THROUGH)
     451           0 :                 n = sysfs_emit(buf, "WriteThrough\n");
     452           0 :         else if (ci_attr & CACHE_WRITE_BACK)
     453           0 :                 n = sysfs_emit(buf, "WriteBack\n");
     454           0 :         return n;
     455             : }
     456             : 
     457             : static DEVICE_ATTR_RO(id);
     458             : static DEVICE_ATTR_RO(level);
     459             : static DEVICE_ATTR_RO(type);
     460             : static DEVICE_ATTR_RO(coherency_line_size);
     461             : static DEVICE_ATTR_RO(ways_of_associativity);
     462             : static DEVICE_ATTR_RO(number_of_sets);
     463             : static DEVICE_ATTR_RO(size);
     464             : static DEVICE_ATTR_RO(allocation_policy);
     465             : static DEVICE_ATTR_RO(write_policy);
     466             : static DEVICE_ATTR_RO(shared_cpu_map);
     467             : static DEVICE_ATTR_RO(shared_cpu_list);
     468             : static DEVICE_ATTR_RO(physical_line_partition);
     469             : 
     470             : static struct attribute *cache_default_attrs[] = {
     471             :         &dev_attr_id.attr,
     472             :         &dev_attr_type.attr,
     473             :         &dev_attr_level.attr,
     474             :         &dev_attr_shared_cpu_map.attr,
     475             :         &dev_attr_shared_cpu_list.attr,
     476             :         &dev_attr_coherency_line_size.attr,
     477             :         &dev_attr_ways_of_associativity.attr,
     478             :         &dev_attr_number_of_sets.attr,
     479             :         &dev_attr_size.attr,
     480             :         &dev_attr_allocation_policy.attr,
     481             :         &dev_attr_write_policy.attr,
     482             :         &dev_attr_physical_line_partition.attr,
     483             :         NULL
     484             : };
     485             : 
     486             : static umode_t
     487         192 : cache_default_attrs_is_visible(struct kobject *kobj,
     488             :                                struct attribute *attr, int unused)
     489             : {
     490         192 :         struct device *dev = kobj_to_dev(kobj);
     491         192 :         struct cacheinfo *this_leaf = dev_get_drvdata(dev);
     492         192 :         const struct cpumask *mask = &this_leaf->shared_cpu_map;
     493         192 :         umode_t mode = attr->mode;
     494             : 
     495         192 :         if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
     496             :                 return mode;
     497         176 :         if ((attr == &dev_attr_type.attr) && this_leaf->type)
     498             :                 return mode;
     499         160 :         if ((attr == &dev_attr_level.attr) && this_leaf->level)
     500             :                 return mode;
     501         144 :         if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
     502             :                 return mode;
     503         128 :         if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
     504             :                 return mode;
     505         112 :         if ((attr == &dev_attr_coherency_line_size.attr) &&
     506          16 :             this_leaf->coherency_line_size)
     507             :                 return mode;
     508          96 :         if ((attr == &dev_attr_ways_of_associativity.attr) &&
     509          16 :             this_leaf->size) /* allow 0 = full associativity */
     510             :                 return mode;
     511          80 :         if ((attr == &dev_attr_number_of_sets.attr) &&
     512          16 :             this_leaf->number_of_sets)
     513             :                 return mode;
     514          64 :         if ((attr == &dev_attr_size.attr) && this_leaf->size)
     515             :                 return mode;
     516          48 :         if ((attr == &dev_attr_write_policy.attr) &&
     517          16 :             (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
     518             :                 return mode;
     519          48 :         if ((attr == &dev_attr_allocation_policy.attr) &&
     520          16 :             (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
     521             :                 return mode;
     522          48 :         if ((attr == &dev_attr_physical_line_partition.attr) &&
     523          16 :             this_leaf->physical_line_partition)
     524          16 :                 return mode;
     525             : 
     526             :         return 0;
     527             : }
     528             : 
     529             : static const struct attribute_group cache_default_group = {
     530             :         .attrs = cache_default_attrs,
     531             :         .is_visible = cache_default_attrs_is_visible,
     532             : };
     533             : 
     534             : static const struct attribute_group *cache_default_groups[] = {
     535             :         &cache_default_group,
     536             :         NULL,
     537             : };
     538             : 
     539             : static const struct attribute_group *cache_private_groups[] = {
     540             :         &cache_default_group,
     541             :         NULL, /* Place holder for private group */
     542             :         NULL,
     543             : };
     544             : 
     545             : const struct attribute_group *
     546          16 : __weak cache_get_priv_group(struct cacheinfo *this_leaf)
     547             : {
     548          16 :         return NULL;
     549             : }
     550             : 
     551             : static const struct attribute_group **
     552          16 : cache_get_attribute_groups(struct cacheinfo *this_leaf)
     553             : {
     554          16 :         const struct attribute_group *priv_group =
     555          16 :                         cache_get_priv_group(this_leaf);
     556             : 
     557          16 :         if (!priv_group)
     558             :                 return cache_default_groups;
     559             : 
     560           0 :         if (!cache_private_groups[1])
     561           0 :                 cache_private_groups[1] = priv_group;
     562             : 
     563             :         return cache_private_groups;
     564             : }
     565             : 
     566             : /* Add/Remove cache interface for CPU device */
     567           0 : static void cpu_cache_sysfs_exit(unsigned int cpu)
     568             : {
     569           0 :         int i;
     570           0 :         struct device *ci_dev;
     571             : 
     572           0 :         if (per_cpu_index_dev(cpu)) {
     573           0 :                 for (i = 0; i < cache_leaves(cpu); i++) {
     574           0 :                         ci_dev = per_cache_index_dev(cpu, i);
     575           0 :                         if (!ci_dev)
     576           0 :                                 continue;
     577           0 :                         device_unregister(ci_dev);
     578             :                 }
     579           0 :                 kfree(per_cpu_index_dev(cpu));
     580           0 :                 per_cpu_index_dev(cpu) = NULL;
     581             :         }
     582           0 :         device_unregister(per_cpu_cache_dev(cpu));
     583           0 :         per_cpu_cache_dev(cpu) = NULL;
     584           0 : }
     585             : 
     586           4 : static int cpu_cache_sysfs_init(unsigned int cpu)
     587             : {
     588           4 :         struct device *dev = get_cpu_device(cpu);
     589             : 
     590           4 :         if (per_cpu_cacheinfo(cpu) == NULL)
     591             :                 return -ENOENT;
     592             : 
     593           4 :         per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
     594           4 :         if (IS_ERR(per_cpu_cache_dev(cpu)))
     595           0 :                 return PTR_ERR(per_cpu_cache_dev(cpu));
     596             : 
     597             :         /* Allocate all required memory */
     598           4 :         per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
     599             :                                          sizeof(struct device *), GFP_KERNEL);
     600           4 :         if (unlikely(per_cpu_index_dev(cpu) == NULL))
     601           0 :                 goto err_out;
     602             : 
     603             :         return 0;
     604             : 
     605           0 : err_out:
     606           0 :         cpu_cache_sysfs_exit(cpu);
     607           0 :         return -ENOMEM;
     608             : }
     609             : 
     610           4 : static int cache_add_dev(unsigned int cpu)
     611             : {
     612           4 :         unsigned int i;
     613           4 :         int rc;
     614           4 :         struct device *ci_dev, *parent;
     615           4 :         struct cacheinfo *this_leaf;
     616           4 :         struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
     617           4 :         const struct attribute_group **cache_groups;
     618             : 
     619           4 :         rc = cpu_cache_sysfs_init(cpu);
     620           4 :         if (unlikely(rc < 0))
     621             :                 return rc;
     622             : 
     623           4 :         parent = per_cpu_cache_dev(cpu);
     624          20 :         for (i = 0; i < cache_leaves(cpu); i++) {
     625          16 :                 this_leaf = this_cpu_ci->info_list + i;
     626          16 :                 if (this_leaf->disable_sysfs)
     627           0 :                         continue;
     628          16 :                 if (this_leaf->type == CACHE_TYPE_NOCACHE)
     629             :                         break;
     630          16 :                 cache_groups = cache_get_attribute_groups(this_leaf);
     631          16 :                 ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
     632             :                                            "index%1u", i);
     633          16 :                 if (IS_ERR(ci_dev)) {
     634           0 :                         rc = PTR_ERR(ci_dev);
     635           0 :                         goto err;
     636             :                 }
     637          16 :                 per_cache_index_dev(cpu, i) = ci_dev;
     638             :         }
     639           4 :         cpumask_set_cpu(cpu, &cache_dev_map);
     640             : 
     641           4 :         return 0;
     642           0 : err:
     643           0 :         cpu_cache_sysfs_exit(cpu);
     644           0 :         return rc;
     645             : }
     646             : 
     647           4 : static int cacheinfo_cpu_online(unsigned int cpu)
     648             : {
     649           4 :         int rc = detect_cache_attributes(cpu);
     650             : 
     651           4 :         if (rc)
     652             :                 return rc;
     653           4 :         rc = cache_add_dev(cpu);
     654           4 :         if (rc)
     655           0 :                 free_cache_attributes(cpu);
     656             :         return rc;
     657             : }
     658             : 
     659           0 : static int cacheinfo_cpu_pre_down(unsigned int cpu)
     660             : {
     661           0 :         if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
     662           0 :                 cpu_cache_sysfs_exit(cpu);
     663             : 
     664           0 :         free_cache_attributes(cpu);
     665           0 :         return 0;
     666             : }
     667             : 
     668           1 : static int __init cacheinfo_sysfs_init(void)
     669             : {
     670           1 :         return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE,
     671             :                                  "base/cacheinfo:online",
     672             :                                  cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
     673             : }
     674             : device_initcall(cacheinfo_sysfs_init);

Generated by: LCOV version 1.14