LCOV - code coverage report
Current view: top level - net/core - flow_offload.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 0 210 0.0 %
Date: 2021-04-22 12:43:58 Functions: 0 39 0.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #include <linux/kernel.h>
       3             : #include <linux/slab.h>
       4             : #include <net/flow_offload.h>
       5             : #include <linux/rtnetlink.h>
       6             : #include <linux/mutex.h>
       7             : #include <linux/rhashtable.h>
       8             : 
       9           0 : struct flow_rule *flow_rule_alloc(unsigned int num_actions)
      10             : {
      11           0 :         struct flow_rule *rule;
      12           0 :         int i;
      13             : 
      14           0 :         rule = kzalloc(struct_size(rule, action.entries, num_actions),
      15             :                        GFP_KERNEL);
      16           0 :         if (!rule)
      17             :                 return NULL;
      18             : 
      19           0 :         rule->action.num_entries = num_actions;
      20             :         /* Pre-fill each action hw_stats with DONT_CARE.
      21             :          * Caller can override this if it wants stats for a given action.
      22             :          */
      23           0 :         for (i = 0; i < num_actions; i++)
      24           0 :                 rule->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
      25             : 
      26             :         return rule;
      27             : }
      28             : EXPORT_SYMBOL(flow_rule_alloc);
      29             : 
      30             : #define FLOW_DISSECTOR_MATCH(__rule, __type, __out)                             \
      31             :         const struct flow_match *__m = &(__rule)->match;                 \
      32             :         struct flow_dissector *__d = (__m)->dissector;                               \
      33             :                                                                                 \
      34             :         (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key);        \
      35             :         (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask);      \
      36             : 
      37           0 : void flow_rule_match_meta(const struct flow_rule *rule,
      38             :                           struct flow_match_meta *out)
      39             : {
      40           0 :         FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_META, out);
      41           0 : }
      42             : EXPORT_SYMBOL(flow_rule_match_meta);
      43             : 
      44           0 : void flow_rule_match_basic(const struct flow_rule *rule,
      45             :                            struct flow_match_basic *out)
      46             : {
      47           0 :         FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out);
      48           0 : }
      49             : EXPORT_SYMBOL(flow_rule_match_basic);
      50             : 
      51           0 : void flow_rule_match_control(const struct flow_rule *rule,
      52             :                              struct flow_match_control *out)
      53             : {
      54           0 :         FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out);
      55           0 : }
      56             : EXPORT_SYMBOL(flow_rule_match_control);
      57             : 
      58           0 : void flow_rule_match_eth_addrs(const struct flow_rule *rule,
      59             :                                struct flow_match_eth_addrs *out)
      60             : {
      61           0 :         FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out);
      62           0 : }
      63             : EXPORT_SYMBOL(flow_rule_match_eth_addrs);
      64             : 
      65           0 : void flow_rule_match_vlan(const struct flow_rule *rule,
      66             :                           struct flow_match_vlan *out)
      67             : {
      68           0 :         FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out);
      69           0 : }
      70             : EXPORT_SYMBOL(flow_rule_match_vlan);
      71             : 
      72           0 : void flow_rule_match_cvlan(const struct flow_rule *rule,
      73             :                            struct flow_match_vlan *out)
      74             : {
      75           0 :         FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out);
      76           0 : }
      77             : EXPORT_SYMBOL(flow_rule_match_cvlan);
      78             : 
      79           0 : void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
      80             :                                 struct flow_match_ipv4_addrs *out)
      81             : {
      82           0 :         FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out);
      83           0 : }
      84             : EXPORT_SYMBOL(flow_rule_match_ipv4_addrs);
      85             : 
      86           0 : void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
      87             :                                 struct flow_match_ipv6_addrs *out)
      88             : {
      89           0 :         FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out);
      90           0 : }
      91             : EXPORT_SYMBOL(flow_rule_match_ipv6_addrs);
      92             : 
      93           0 : void flow_rule_match_ip(const struct flow_rule *rule,
      94             :                         struct flow_match_ip *out)
      95             : {
      96           0 :         FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out);
      97           0 : }
      98             : EXPORT_SYMBOL(flow_rule_match_ip);
      99             : 
     100           0 : void flow_rule_match_ports(const struct flow_rule *rule,
     101             :                            struct flow_match_ports *out)
     102             : {
     103           0 :         FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out);
     104           0 : }
     105             : EXPORT_SYMBOL(flow_rule_match_ports);
     106             : 
     107           0 : void flow_rule_match_tcp(const struct flow_rule *rule,
     108             :                          struct flow_match_tcp *out)
     109             : {
     110           0 :         FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out);
     111           0 : }
     112             : EXPORT_SYMBOL(flow_rule_match_tcp);
     113             : 
     114           0 : void flow_rule_match_icmp(const struct flow_rule *rule,
     115             :                           struct flow_match_icmp *out)
     116             : {
     117           0 :         FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out);
     118           0 : }
     119             : EXPORT_SYMBOL(flow_rule_match_icmp);
     120             : 
     121           0 : void flow_rule_match_mpls(const struct flow_rule *rule,
     122             :                           struct flow_match_mpls *out)
     123             : {
     124           0 :         FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out);
     125           0 : }
     126             : EXPORT_SYMBOL(flow_rule_match_mpls);
     127             : 
     128           0 : void flow_rule_match_enc_control(const struct flow_rule *rule,
     129             :                                  struct flow_match_control *out)
     130             : {
     131           0 :         FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out);
     132           0 : }
     133             : EXPORT_SYMBOL(flow_rule_match_enc_control);
     134             : 
     135           0 : void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
     136             :                                     struct flow_match_ipv4_addrs *out)
     137             : {
     138           0 :         FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out);
     139           0 : }
     140             : EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs);
     141             : 
     142           0 : void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
     143             :                                     struct flow_match_ipv6_addrs *out)
     144             : {
     145           0 :         FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out);
     146           0 : }
     147             : EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs);
     148             : 
     149           0 : void flow_rule_match_enc_ip(const struct flow_rule *rule,
     150             :                             struct flow_match_ip *out)
     151             : {
     152           0 :         FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out);
     153           0 : }
     154             : EXPORT_SYMBOL(flow_rule_match_enc_ip);
     155             : 
     156           0 : void flow_rule_match_enc_ports(const struct flow_rule *rule,
     157             :                                struct flow_match_ports *out)
     158             : {
     159           0 :         FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out);
     160           0 : }
     161             : EXPORT_SYMBOL(flow_rule_match_enc_ports);
     162             : 
     163           0 : void flow_rule_match_enc_keyid(const struct flow_rule *rule,
     164             :                                struct flow_match_enc_keyid *out)
     165             : {
     166           0 :         FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out);
     167           0 : }
     168             : EXPORT_SYMBOL(flow_rule_match_enc_keyid);
     169             : 
     170           0 : void flow_rule_match_enc_opts(const struct flow_rule *rule,
     171             :                               struct flow_match_enc_opts *out)
     172             : {
     173           0 :         FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out);
     174           0 : }
     175             : EXPORT_SYMBOL(flow_rule_match_enc_opts);
     176             : 
     177           0 : struct flow_action_cookie *flow_action_cookie_create(void *data,
     178             :                                                      unsigned int len,
     179             :                                                      gfp_t gfp)
     180             : {
     181           0 :         struct flow_action_cookie *cookie;
     182             : 
     183           0 :         cookie = kmalloc(sizeof(*cookie) + len, gfp);
     184           0 :         if (!cookie)
     185             :                 return NULL;
     186           0 :         cookie->cookie_len = len;
     187           0 :         memcpy(cookie->cookie, data, len);
     188           0 :         return cookie;
     189             : }
     190             : EXPORT_SYMBOL(flow_action_cookie_create);
     191             : 
     192           0 : void flow_action_cookie_destroy(struct flow_action_cookie *cookie)
     193             : {
     194           0 :         kfree(cookie);
     195           0 : }
     196             : EXPORT_SYMBOL(flow_action_cookie_destroy);
     197             : 
     198           0 : void flow_rule_match_ct(const struct flow_rule *rule,
     199             :                         struct flow_match_ct *out)
     200             : {
     201           0 :         FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CT, out);
     202           0 : }
     203             : EXPORT_SYMBOL(flow_rule_match_ct);
     204             : 
     205           0 : struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
     206             :                                           void *cb_ident, void *cb_priv,
     207             :                                           void (*release)(void *cb_priv))
     208             : {
     209           0 :         struct flow_block_cb *block_cb;
     210             : 
     211           0 :         block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
     212           0 :         if (!block_cb)
     213           0 :                 return ERR_PTR(-ENOMEM);
     214             : 
     215           0 :         block_cb->cb = cb;
     216           0 :         block_cb->cb_ident = cb_ident;
     217           0 :         block_cb->cb_priv = cb_priv;
     218           0 :         block_cb->release = release;
     219             : 
     220           0 :         return block_cb;
     221             : }
     222             : EXPORT_SYMBOL(flow_block_cb_alloc);
     223             : 
     224           0 : void flow_block_cb_free(struct flow_block_cb *block_cb)
     225             : {
     226           0 :         if (block_cb->release)
     227           0 :                 block_cb->release(block_cb->cb_priv);
     228             : 
     229           0 :         kfree(block_cb);
     230           0 : }
     231             : EXPORT_SYMBOL(flow_block_cb_free);
     232             : 
     233           0 : struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
     234             :                                            flow_setup_cb_t *cb, void *cb_ident)
     235             : {
     236           0 :         struct flow_block_cb *block_cb;
     237             : 
     238           0 :         list_for_each_entry(block_cb, &block->cb_list, list) {
     239           0 :                 if (block_cb->cb == cb &&
     240           0 :                     block_cb->cb_ident == cb_ident)
     241           0 :                         return block_cb;
     242             :         }
     243             : 
     244             :         return NULL;
     245             : }
     246             : EXPORT_SYMBOL(flow_block_cb_lookup);
     247             : 
     248           0 : void *flow_block_cb_priv(struct flow_block_cb *block_cb)
     249             : {
     250           0 :         return block_cb->cb_priv;
     251             : }
     252             : EXPORT_SYMBOL(flow_block_cb_priv);
     253             : 
     254           0 : void flow_block_cb_incref(struct flow_block_cb *block_cb)
     255             : {
     256           0 :         block_cb->refcnt++;
     257           0 : }
     258             : EXPORT_SYMBOL(flow_block_cb_incref);
     259             : 
     260           0 : unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb)
     261             : {
     262           0 :         return --block_cb->refcnt;
     263             : }
     264             : EXPORT_SYMBOL(flow_block_cb_decref);
     265             : 
     266           0 : bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
     267             :                            struct list_head *driver_block_list)
     268             : {
     269           0 :         struct flow_block_cb *block_cb;
     270             : 
     271           0 :         list_for_each_entry(block_cb, driver_block_list, driver_list) {
     272           0 :                 if (block_cb->cb == cb &&
     273           0 :                     block_cb->cb_ident == cb_ident)
     274             :                         return true;
     275             :         }
     276             : 
     277             :         return false;
     278             : }
     279             : EXPORT_SYMBOL(flow_block_cb_is_busy);
     280             : 
     281           0 : int flow_block_cb_setup_simple(struct flow_block_offload *f,
     282             :                                struct list_head *driver_block_list,
     283             :                                flow_setup_cb_t *cb,
     284             :                                void *cb_ident, void *cb_priv,
     285             :                                bool ingress_only)
     286             : {
     287           0 :         struct flow_block_cb *block_cb;
     288             : 
     289           0 :         if (ingress_only &&
     290           0 :             f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
     291             :                 return -EOPNOTSUPP;
     292             : 
     293           0 :         f->driver_block_list = driver_block_list;
     294             : 
     295           0 :         switch (f->command) {
     296             :         case FLOW_BLOCK_BIND:
     297           0 :                 if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list))
     298             :                         return -EBUSY;
     299             : 
     300           0 :                 block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL);
     301           0 :                 if (IS_ERR(block_cb))
     302           0 :                         return PTR_ERR(block_cb);
     303             : 
     304           0 :                 flow_block_cb_add(block_cb, f);
     305           0 :                 list_add_tail(&block_cb->driver_list, driver_block_list);
     306           0 :                 return 0;
     307           0 :         case FLOW_BLOCK_UNBIND:
     308           0 :                 block_cb = flow_block_cb_lookup(f->block, cb, cb_ident);
     309           0 :                 if (!block_cb)
     310             :                         return -ENOENT;
     311             : 
     312           0 :                 flow_block_cb_remove(block_cb, f);
     313           0 :                 list_del(&block_cb->driver_list);
     314           0 :                 return 0;
     315             :         default:
     316             :                 return -EOPNOTSUPP;
     317             :         }
     318             : }
     319             : EXPORT_SYMBOL(flow_block_cb_setup_simple);
     320             : 
     321             : static DEFINE_MUTEX(flow_indr_block_lock);
     322             : static LIST_HEAD(flow_block_indr_list);
     323             : static LIST_HEAD(flow_block_indr_dev_list);
     324             : 
     325             : struct flow_indr_dev {
     326             :         struct list_head                list;
     327             :         flow_indr_block_bind_cb_t       *cb;
     328             :         void                            *cb_priv;
     329             :         refcount_t                      refcnt;
     330             :         struct rcu_head                 rcu;
     331             : };
     332             : 
     333           0 : static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb,
     334             :                                                  void *cb_priv)
     335             : {
     336           0 :         struct flow_indr_dev *indr_dev;
     337             : 
     338           0 :         indr_dev = kmalloc(sizeof(*indr_dev), GFP_KERNEL);
     339           0 :         if (!indr_dev)
     340             :                 return NULL;
     341             : 
     342           0 :         indr_dev->cb         = cb;
     343           0 :         indr_dev->cb_priv    = cb_priv;
     344           0 :         refcount_set(&indr_dev->refcnt, 1);
     345             : 
     346           0 :         return indr_dev;
     347             : }
     348             : 
     349           0 : int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
     350             : {
     351           0 :         struct flow_indr_dev *indr_dev;
     352             : 
     353           0 :         mutex_lock(&flow_indr_block_lock);
     354           0 :         list_for_each_entry(indr_dev, &flow_block_indr_dev_list, list) {
     355           0 :                 if (indr_dev->cb == cb &&
     356           0 :                     indr_dev->cb_priv == cb_priv) {
     357           0 :                         refcount_inc(&indr_dev->refcnt);
     358           0 :                         mutex_unlock(&flow_indr_block_lock);
     359           0 :                         return 0;
     360             :                 }
     361             :         }
     362             : 
     363           0 :         indr_dev = flow_indr_dev_alloc(cb, cb_priv);
     364           0 :         if (!indr_dev) {
     365           0 :                 mutex_unlock(&flow_indr_block_lock);
     366           0 :                 return -ENOMEM;
     367             :         }
     368             : 
     369           0 :         list_add(&indr_dev->list, &flow_block_indr_dev_list);
     370           0 :         mutex_unlock(&flow_indr_block_lock);
     371             : 
     372           0 :         return 0;
     373             : }
     374             : EXPORT_SYMBOL(flow_indr_dev_register);
     375             : 
     376           0 : static void __flow_block_indr_cleanup(void (*release)(void *cb_priv),
     377             :                                       void *cb_priv,
     378             :                                       struct list_head *cleanup_list)
     379             : {
     380           0 :         struct flow_block_cb *this, *next;
     381             : 
     382           0 :         list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) {
     383           0 :                 if (this->release == release &&
     384           0 :                     this->indr.cb_priv == cb_priv)
     385           0 :                         list_move(&this->indr.list, cleanup_list);
     386             :         }
     387           0 : }
     388             : 
     389           0 : static void flow_block_indr_notify(struct list_head *cleanup_list)
     390             : {
     391           0 :         struct flow_block_cb *this, *next;
     392             : 
     393           0 :         list_for_each_entry_safe(this, next, cleanup_list, indr.list) {
     394           0 :                 list_del(&this->indr.list);
     395           0 :                 this->indr.cleanup(this);
     396             :         }
     397           0 : }
     398             : 
     399           0 : void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
     400             :                               void (*release)(void *cb_priv))
     401             : {
     402           0 :         struct flow_indr_dev *this, *next, *indr_dev = NULL;
     403           0 :         LIST_HEAD(cleanup_list);
     404             : 
     405           0 :         mutex_lock(&flow_indr_block_lock);
     406           0 :         list_for_each_entry_safe(this, next, &flow_block_indr_dev_list, list) {
     407           0 :                 if (this->cb == cb &&
     408           0 :                     this->cb_priv == cb_priv &&
     409           0 :                     refcount_dec_and_test(&this->refcnt)) {
     410           0 :                         indr_dev = this;
     411           0 :                         list_del(&indr_dev->list);
     412             :                         break;
     413             :                 }
     414             :         }
     415             : 
     416           0 :         if (!indr_dev) {
     417           0 :                 mutex_unlock(&flow_indr_block_lock);
     418           0 :                 return;
     419             :         }
     420             : 
     421           0 :         __flow_block_indr_cleanup(release, cb_priv, &cleanup_list);
     422           0 :         mutex_unlock(&flow_indr_block_lock);
     423             : 
     424           0 :         flow_block_indr_notify(&cleanup_list);
     425           0 :         kfree(indr_dev);
     426             : }
     427             : EXPORT_SYMBOL(flow_indr_dev_unregister);
     428             : 
     429           0 : static void flow_block_indr_init(struct flow_block_cb *flow_block,
     430             :                                  struct flow_block_offload *bo,
     431             :                                  struct net_device *dev, struct Qdisc *sch, void *data,
     432             :                                  void *cb_priv,
     433             :                                  void (*cleanup)(struct flow_block_cb *block_cb))
     434             : {
     435           0 :         flow_block->indr.binder_type = bo->binder_type;
     436           0 :         flow_block->indr.data = data;
     437           0 :         flow_block->indr.cb_priv = cb_priv;
     438           0 :         flow_block->indr.dev = dev;
     439           0 :         flow_block->indr.sch = sch;
     440           0 :         flow_block->indr.cleanup = cleanup;
     441             : }
     442             : 
     443           0 : struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
     444             :                                                void *cb_ident, void *cb_priv,
     445             :                                                void (*release)(void *cb_priv),
     446             :                                                struct flow_block_offload *bo,
     447             :                                                struct net_device *dev,
     448             :                                                struct Qdisc *sch, void *data,
     449             :                                                void *indr_cb_priv,
     450             :                                                void (*cleanup)(struct flow_block_cb *block_cb))
     451             : {
     452           0 :         struct flow_block_cb *block_cb;
     453             : 
     454           0 :         block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, release);
     455           0 :         if (IS_ERR(block_cb))
     456           0 :                 goto out;
     457             : 
     458           0 :         flow_block_indr_init(block_cb, bo, dev, sch, data, indr_cb_priv, cleanup);
     459           0 :         list_add(&block_cb->indr.list, &flow_block_indr_list);
     460             : 
     461           0 : out:
     462           0 :         return block_cb;
     463             : }
     464             : EXPORT_SYMBOL(flow_indr_block_cb_alloc);
     465             : 
     466           0 : int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
     467             :                                 enum tc_setup_type type, void *data,
     468             :                                 struct flow_block_offload *bo,
     469             :                                 void (*cleanup)(struct flow_block_cb *block_cb))
     470             : {
     471           0 :         struct flow_indr_dev *this;
     472             : 
     473           0 :         mutex_lock(&flow_indr_block_lock);
     474           0 :         list_for_each_entry(this, &flow_block_indr_dev_list, list)
     475           0 :                 this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup);
     476             : 
     477           0 :         mutex_unlock(&flow_indr_block_lock);
     478             : 
     479           0 :         return list_empty(&bo->cb_list) ? -EOPNOTSUPP : 0;
     480             : }
     481             : EXPORT_SYMBOL(flow_indr_dev_setup_offload);

Generated by: LCOV version 1.14