/* * OF helpers for IOMMU * * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #include #include #include #include #include #include #include #include #define NO_IOMMU 1 static const struct of_device_id __iommu_of_table_sentinel __used __section(__iommu_of_table_end); /** * of_get_dma_window - Parse *dma-window property and returns 0 if found. * * @dn: device node * @prefix: prefix for property name if any * @index: index to start to parse * @busno: Returns busno if supported. Otherwise pass NULL * @addr: Returns address that DMA starts * @size: Returns the range that DMA can handle * * This supports different formats flexibly. "prefix" can be * configured if any. "busno" and "index" are optionally * specified. Set 0(or NULL) if not used. */ int of_get_dma_window(struct device_node *dn, const char *prefix, int index, unsigned long *busno, dma_addr_t *addr, size_t *size) { const __be32 *dma_window, *end; int bytes, cur_index = 0; char propname[NAME_MAX], addrname[NAME_MAX], sizename[NAME_MAX]; if (!dn || !addr || !size) return -EINVAL; if (!prefix) prefix = ""; snprintf(propname, sizeof(propname), "%sdma-window", prefix); snprintf(addrname, sizeof(addrname), "%s#dma-address-cells", prefix); snprintf(sizename, sizeof(sizename), "%s#dma-size-cells", prefix); dma_window = of_get_property(dn, propname, &bytes); if (!dma_window) return -ENODEV; end = dma_window + bytes / sizeof(*dma_window); while (dma_window < end) { u32 cells; const void *prop; /* busno is one cell if supported */ if (busno) *busno = be32_to_cpup(dma_window++); prop = of_get_property(dn, addrname, NULL); if (!prop) prop = of_get_property(dn, "#address-cells", NULL); cells = prop ? be32_to_cpup(prop) : of_n_addr_cells(dn); if (!cells) return -EINVAL; *addr = of_read_number(dma_window, cells); dma_window += cells; prop = of_get_property(dn, sizename, NULL); cells = prop ? be32_to_cpup(prop) : of_n_size_cells(dn); if (!cells) return -EINVAL; *size = of_read_number(dma_window, cells); dma_window += cells; if (cur_index++ == index) break; } return 0; } EXPORT_SYMBOL_GPL(of_get_dma_window); static bool of_iommu_driver_present(struct device_node *np) { /* * If the IOMMU still isn't ready by the time we reach init, assume * it never will be. We don't want to defer indefinitely, nor attempt * to dereference __iommu_of_table after it's been freed. */ if (system_state >= SYSTEM_RUNNING) return false; return of_match_node(&__iommu_of_table, np); } static int of_iommu_xlate(struct device *dev, struct of_phandle_args *iommu_spec) { const struct iommu_ops *ops; struct fwnode_handle *fwnode = &iommu_spec->np->fwnode; int err; ops = iommu_ops_from_fwnode(fwnode); if ((ops && !ops->of_xlate) || !of_device_is_available(iommu_spec->np) || (!ops && !of_iommu_driver_present(iommu_spec->np))) return NO_IOMMU; err = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops); if (err) return err; /* * The otherwise-empty fwspec handily serves to indicate the specific * IOMMU device we're waiting for, which will be useful if we ever get * a proper probe-ordering dependency mechanism in future. */ if (!ops) return -EPROBE_DEFER; return ops->of_xlate(dev, iommu_spec); } struct of_pci_iommu_alias_info { struct device *dev; struct device_node *np; }; /** * of_map_rid - Translate a requester ID through a downstream mapping. * @np: root complex device node. * @rid: Requester ID to map. * @map_name: property name of the map to use. * @map_mask_name: optional property name of the mask to use. * @target: optional pointer to a target device node. * @id_out: optional pointer to receive the translated ID. * * Given PCI/MC requester ID, look up the appropriate implementation-defined * platform ID and/or the target device which receives transactions on that * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or * @id_out may be NULL if only the other is required. If @target points to * a non-NULL device node pointer, only entries targeting that node will be * matched; if it points to a NULL value, it will receive the device node of * the first matching target phandle, with a reference held. * * Return: 0 on success or a standard error code on failure. */ int of_map_rid(struct device_node *np, u32 rid, const char *map_name, const char *map_mask_name, struct device_node **target, u32 *id_out) { u32 map_mask, masked_rid; int map_len; const __be32 *map = NULL; if (!np || !map_name || (!target && !id_out)) return -EINVAL; map = of_get_property(np, map_name, &map_len); if (!map) { if (target) return -ENODEV; /* Otherwise, no map implies no translation */ *id_out = rid; return 0; } if (!map_len || map_len % (4 * sizeof(*map))) { pr_err("%pOF: Error: Bad %s length: %d\n", np, map_name, map_len); return -EINVAL; } /* The default is to select all bits. */ map_mask = 0xffffffff; /* * Can be overridden by "{iommu,msi}-map-mask" property. * If of_property_read_u32() fails, the default is used. */ if (map_mask_name) of_property_read_u32(np, map_mask_name, &map_mask); masked_rid = map_mask & rid; for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) { struct device_node *phandle_node; u32 rid_base = be32_to_cpup(map + 0); u32 phandle = be32_to_cpup(map + 1); u32 out_base = be32_to_cpup(map + 2); u32 rid_len = be32_to_cpup(map + 3); if (rid_base & ~map_mask) { pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores rid-base (0x%x)\n", np, map_name, map_name, map_mask, rid_base); return -EFAULT; } if (masked_rid < rid_base || masked_rid >= rid_base + rid_len) continue; phandle_node = of_find_node_by_phandle(phandle); if (!phandle_node) return -ENODEV; if (target) { if (*target) of_node_put(phandle_node); else *target = phandle_node; if (*target != phandle_node) continue; } if (id_out) *id_out = masked_rid - rid_base + out_base; pr_debug("%pOF: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n", np, map_name, map_mask, rid_base, out_base, rid_len, rid, *id_out); return 0; } pr_err("%pOF: Invalid %s translation - no match for rid 0x%x on %pOF\n", np, map_name, rid, target && *target ? *target : NULL); return -EFAULT; } static int of_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data) { struct of_pci_iommu_alias_info *info = data; struct of_phandle_args iommu_spec = { .args_count = 1 }; int err; err = of_map_rid(info->np, alias, "iommu-map", "iommu-map-mask", &iommu_spec.np, iommu_spec.args); if (err) return err == -ENODEV ? NO_IOMMU : err; err = of_iommu_xlate(info->dev, &iommu_spec); of_node_put(iommu_spec.np); return err; } static int of_fsl_mc_iommu_init(struct fsl_mc_device *mc_dev, struct device_node *master_np) { struct of_phandle_args iommu_spec = { .args_count = 1 }; int err; err = of_map_rid(master_np, mc_dev->icid, "iommu-map", "iommu-map-mask", &iommu_spec.np, iommu_spec.args); if (err) return err == -ENODEV ? NO_IOMMU : err; err = of_iommu_xlate(&mc_dev->dev, &iommu_spec); of_node_put(iommu_spec.np); return err; } const struct iommu_ops *of_iommu_configure(struct device *dev, struct device_node *master_np) { const struct iommu_ops *ops = NULL; struct iommu_fwspec *fwspec = dev->iommu_fwspec; int err = NO_IOMMU; if (!master_np) return NULL; if (fwspec) { if (fwspec->ops) return fwspec->ops; /* In the deferred case, start again from scratch */ iommu_fwspec_free(dev); } /* * We don't currently walk up the tree looking for a parent IOMMU. * See the `Notes:' section of * Documentation/devicetree/bindings/iommu/iommu.txt */ if (dev_is_pci(dev)) { struct of_pci_iommu_alias_info info = { .dev = dev, .np = master_np, }; err = pci_for_each_dma_alias(to_pci_dev(dev), of_pci_iommu_init, &info); } else if (dev_is_fsl_mc(dev)) { err = of_fsl_mc_iommu_init(to_fsl_mc_device(dev), master_np); } else { struct of_phandle_args iommu_spec; int idx = 0; while (!of_parse_phandle_with_args(master_np, "iommus", "#iommu-cells", idx, &iommu_spec)) { err = of_iommu_xlate(dev, &iommu_spec); of_node_put(iommu_spec.np); idx++; if (err) break; } } /* * Two success conditions can be represented by non-negative err here: * >0 : there is no IOMMU, or one was unavailable for non-fatal reasons * 0 : we found an IOMMU, and dev->fwspec is initialised appropriately * <0 : any actual error */ if (!err) ops = dev->iommu_fwspec->ops; /* * If we have reason to believe the IOMMU driver missed the initial * add_device callback for dev, replay it to get things in order. */ if (ops && ops->add_device && dev->bus && !dev->iommu_group) err = ops->add_device(dev); /* Ignore all other errors apart from EPROBE_DEFER */ if (err == -EPROBE_DEFER) { ops = ERR_PTR(err); } else if (err < 0) { dev_dbg(dev, "Adding to IOMMU failed: %d\n", err); ops = NULL; } return ops; } static int __init of_iommu_init(void) { struct device_node *np; const struct of_device_id *match, *matches = &__iommu_of_table; for_each_matching_node_and_match(np, matches, &match) { const of_iommu_init_fn init_fn = match->data; if (init_fn && init_fn(np)) pr_err("Failed to initialise IOMMU %pOF\n", np); } return 0; } postcore_initcall_sync(of_iommu_init);