summaryrefslogtreecommitdiff
path: root/drivers/pci/pci-uclass.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/pci-uclass.c')
-rw-r--r--drivers/pci/pci-uclass.c288
1 files changed, 246 insertions, 42 deletions
diff --git a/drivers/pci/pci-uclass.c b/drivers/pci/pci-uclass.c
index 40cc9f1090..d8a6647a1d 100644
--- a/drivers/pci/pci-uclass.c
+++ b/drivers/pci/pci-uclass.c
@@ -539,7 +539,8 @@ int pci_auto_config_devices(struct udevice *bus)
int ret;
debug("%s: device %s\n", __func__, dev->name);
- if (dev_read_bool(dev, "pci,no-autoconfig"))
+ if (dev_of_valid(dev) &&
+ dev_read_bool(dev, "pci,no-autoconfig"))
continue;
ret = dm_pciauto_config_device(dev);
if (ret < 0)
@@ -620,10 +621,19 @@ int dm_pci_hose_probe_bus(struct udevice *bus)
{
int sub_bus;
int ret;
+ int ea_pos;
+ u8 reg;
debug("%s\n", __func__);
- sub_bus = pci_get_bus_max() + 1;
+ ea_pos = dm_pci_find_capability(bus, PCI_CAP_ID_EA);
+ if (ea_pos) {
+ dm_pci_read_config8(bus, ea_pos + sizeof(u32) + sizeof(u8),
+ &reg);
+ sub_bus = reg;
+ } else {
+ sub_bus = pci_get_bus_max() + 1;
+ }
debug("%s: bus = %d/%s\n", __func__, sub_bus, bus->name);
dm_pciauto_prescan_setup_bridge(bus, sub_bus);
@@ -633,12 +643,15 @@ int dm_pci_hose_probe_bus(struct udevice *bus)
ret);
return ret;
}
- if (sub_bus != bus->seq) {
- printf("%s: Internal error, bus '%s' got seq %d, expected %d\n",
- __func__, bus->name, bus->seq, sub_bus);
- return -EPIPE;
+
+ if (!ea_pos) {
+ if (sub_bus != bus->seq) {
+ debug("%s: Internal error, bus '%s' got seq %d, expected %d\n",
+ __func__, bus->name, bus->seq, sub_bus);
+ return -EPIPE;
+ }
+ sub_bus = pci_get_bus_max();
}
- sub_bus = pci_get_bus_max();
dm_pciauto_postscan_setup_bridge(bus, sub_bus);
return sub_bus;
@@ -696,7 +709,8 @@ static int pci_find_and_bind_driver(struct udevice *parent,
find_id->vendor, find_id->device);
/* Determine optional OF node */
- pci_dev_find_ofnode(parent, bdf, &node);
+ if (ofnode_valid(dev_ofnode(parent)))
+ pci_dev_find_ofnode(parent, bdf, &node);
if (ofnode_valid(node) && !ofnode_is_available(node)) {
debug("%s: Ignoring disabled device\n", __func__);
@@ -785,6 +799,7 @@ int pci_bind_bus_devices(struct udevice *bus)
ulong header_type;
pci_dev_t bdf, end;
bool found_multi;
+ int ari_off;
int ret;
found_multi = false;
@@ -858,6 +873,31 @@ int pci_bind_bus_devices(struct udevice *bus)
pplat->vendor = vendor;
pplat->device = device;
pplat->class = class;
+
+ if (IS_ENABLED(CONFIG_PCI_ARID)) {
+ ari_off = dm_pci_find_ext_capability(dev,
+ PCI_EXT_CAP_ID_ARI);
+ if (ari_off) {
+ u16 ari_cap;
+
+ /*
+ * Read Next Function number in ARI Cap
+ * Register
+ */
+ dm_pci_read_config16(dev, ari_off + 4,
+ &ari_cap);
+ /*
+ * Update next scan on this function number,
+ * subtract 1 in BDF to satisfy loop increment.
+ */
+ if (ari_cap & 0xff00) {
+ bdf = PCI_BDF(PCI_BUS(bdf),
+ PCI_DEV(ari_cap),
+ PCI_FUNC(ari_cap));
+ bdf = bdf - 0x100;
+ }
+ }
+ }
}
return 0;
@@ -871,8 +911,10 @@ static void decode_regions(struct pci_controller *hose, ofnode parent_node,
ofnode node)
{
int pci_addr_cells, addr_cells, size_cells;
+ struct bd_info *bd = gd->bd;
int cells_per_record;
const u32 *prop;
+ int max_regions;
int len;
int i;
@@ -892,7 +934,13 @@ static void decode_regions(struct pci_controller *hose, ofnode parent_node,
hose->region_count = 0;
debug("%s: len=%d, cells_per_record=%d\n", __func__, len,
cells_per_record);
- for (i = 0; i < MAX_PCI_REGIONS; i++, len -= cells_per_record) {
+
+ /* Dynamically allocate the regions array */
+ max_regions = len / cells_per_record + CONFIG_NR_DRAM_BANKS;
+ hose->regions = (struct pci_region *)
+ calloc(1, max_regions * sizeof(struct pci_region));
+
+ for (i = 0; i < max_regions; i++, len -= cells_per_record) {
u64 pci_addr, addr, size;
int space_code;
u32 flags;
@@ -927,10 +975,13 @@ static void decode_regions(struct pci_controller *hose, ofnode parent_node,
}
pos = -1;
- for (i = 0; i < hose->region_count; i++) {
- if (hose->regions[i].flags == type)
- pos = i;
+ if (!IS_ENABLED(CONFIG_PCI_REGION_MULTI_ENTRY)) {
+ for (i = 0; i < hose->region_count; i++) {
+ if (hose->regions[i].flags == type)
+ pos = i;
+ }
}
+
if (pos == -1)
pos = hose->region_count++;
debug(" - type=%d, pos=%d\n", type, pos);
@@ -938,18 +989,10 @@ static void decode_regions(struct pci_controller *hose, ofnode parent_node,
}
/* Add a region for our local memory */
-#ifdef CONFIG_NR_DRAM_BANKS
- struct bd_info *bd = gd->bd;
-
if (!bd)
return;
for (i = 0; i < CONFIG_NR_DRAM_BANKS; ++i) {
- if (hose->region_count == MAX_PCI_REGIONS) {
- pr_err("maximum number of regions parsed, aborting\n");
- break;
- }
-
if (bd->bi_dram[i].size) {
pci_set_region(hose->regions + hose->region_count++,
bd->bi_dram[i].start,
@@ -958,19 +1001,6 @@ static void decode_regions(struct pci_controller *hose, ofnode parent_node,
PCI_REGION_MEM | PCI_REGION_SYS_MEMORY);
}
}
-#else
- phys_addr_t base = 0, size;
-
- size = gd->ram_size;
-#ifdef CONFIG_SYS_SDRAM_BASE
- base = CONFIG_SYS_SDRAM_BASE;
-#endif
- if (gd->pci_ram_top && gd->pci_ram_top < base + size)
- size = gd->pci_ram_top - base;
- if (size)
- pci_set_region(hose->regions + hose->region_count++, base,
- base, size, PCI_REGION_MEM | PCI_REGION_SYS_MEMORY);
-#endif
return;
}
@@ -996,8 +1026,11 @@ static int pci_uclass_pre_probe(struct udevice *bus)
hose->bus = bus;
hose->first_busno = bus->seq;
hose->last_busno = bus->seq;
- hose->skip_auto_config_until_reloc =
- dev_read_bool(bus, "u-boot,skip-auto-config-until-reloc");
+ if (dev_of_valid(bus)) {
+ hose->skip_auto_config_until_reloc =
+ dev_read_bool(bus,
+ "u-boot,skip-auto-config-until-reloc");
+ }
return 0;
}
@@ -1406,14 +1439,55 @@ pci_addr_t dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t phys_addr,
return bus_addr;
}
+static phys_addr_t dm_pci_map_ea_virt(struct udevice *dev, int ea_off,
+ struct pci_child_platdata *pdata)
+{
+ phys_addr_t addr = 0;
+
+ /*
+ * In the case of a Virtual Function device using BAR
+ * base and size, add offset for VFn BAR(1, 2, 3...n)
+ */
+ if (pdata->is_virtfn) {
+ size_t sz;
+ u32 ea_entry;
+
+ /* MaxOffset, 1st DW */
+ dm_pci_read_config32(dev, ea_off + 8, &ea_entry);
+ sz = ea_entry & PCI_EA_FIELD_MASK;
+ /* Fill up lower 2 bits */
+ sz |= (~PCI_EA_FIELD_MASK);
+
+ if (ea_entry & PCI_EA_IS_64) {
+ /* MaxOffset 2nd DW */
+ dm_pci_read_config32(dev, ea_off + 16, &ea_entry);
+ sz |= ((u64)ea_entry) << 32;
+ }
+
+ addr = (pdata->virtid - 1) * (sz + 1);
+ }
+
+ return addr;
+}
+
static void *dm_pci_map_ea_bar(struct udevice *dev, int bar, int flags,
- int ea_off)
+ int ea_off, struct pci_child_platdata *pdata)
{
int ea_cnt, i, entry_size;
int bar_id = (bar - PCI_BASE_ADDRESS_0) >> 2;
u32 ea_entry;
phys_addr_t addr;
+ if (IS_ENABLED(CONFIG_PCI_SRIOV)) {
+ /*
+ * In the case of a Virtual Function device, device is
+ * Physical function, so pdata will point to required VF
+ * specific data.
+ */
+ if (pdata->is_virtfn)
+ bar_id += PCI_EA_BEI_VF_BAR0;
+ }
+
/* EA capability structure header */
dm_pci_read_config32(dev, ea_off, &ea_entry);
ea_cnt = (ea_entry >> 16) & PCI_EA_NUM_ENT_MASK;
@@ -1436,8 +1510,11 @@ static void *dm_pci_map_ea_bar(struct udevice *dev, int bar, int flags,
addr |= ((u64)ea_entry) << 32;
}
+ if (IS_ENABLED(CONFIG_PCI_SRIOV))
+ addr += dm_pci_map_ea_virt(dev, ea_off, pdata);
+
/* size ignored for now */
- return map_physmem(addr, flags, 0);
+ return map_physmem(addr, 0, flags);
}
return 0;
@@ -1445,29 +1522,42 @@ static void *dm_pci_map_ea_bar(struct udevice *dev, int bar, int flags,
void *dm_pci_map_bar(struct udevice *dev, int bar, int flags)
{
+ struct pci_child_platdata *pdata = dev_get_parent_platdata(dev);
+ struct udevice *udev = dev;
pci_addr_t pci_bus_addr;
u32 bar_response;
int ea_off;
+ if (IS_ENABLED(CONFIG_PCI_SRIOV)) {
+ /*
+ * In case of Virtual Function devices, use PF udevice
+ * as EA capability is defined in Physical Function
+ */
+ if (pdata->is_virtfn)
+ udev = pdata->pfdev;
+ }
+
/*
* if the function supports Enhanced Allocation use that instead of
* BARs
+ * Incase of virtual functions, pdata will help read VF BEI
+ * and EA entry size.
*/
- ea_off = dm_pci_find_capability(dev, PCI_CAP_ID_EA);
+ ea_off = dm_pci_find_capability(udev, PCI_CAP_ID_EA);
if (ea_off)
- return dm_pci_map_ea_bar(dev, bar, flags, ea_off);
+ return dm_pci_map_ea_bar(udev, bar, flags, ea_off, pdata);
/* read BAR address */
- dm_pci_read_config32(dev, bar, &bar_response);
+ dm_pci_read_config32(udev, bar, &bar_response);
pci_bus_addr = (pci_addr_t)(bar_response & ~0xf);
/*
* Pass "0" as the length argument to pci_bus_to_virt. The arg
- * isn't actualy used on any platform because u-boot assumes a static
+ * isn't actually used on any platform because U-Boot assumes a static
* linear mapping. In the future, this could read the BAR size
* and pass that as the size if needed.
*/
- return dm_pci_bus_to_virt(dev, pci_bus_addr, flags, 0, MAP_NOCACHE);
+ return dm_pci_bus_to_virt(udev, pci_bus_addr, flags, 0, MAP_NOCACHE);
}
static int _dm_pci_find_next_capability(struct udevice *dev, u8 pos, int cap)
@@ -1583,6 +1673,120 @@ int dm_pci_flr(struct udevice *dev)
return 0;
}
+#if defined(CONFIG_PCI_SRIOV)
+int pci_sriov_init(struct udevice *pdev, int vf_en)
+{
+ u16 vendor, device;
+ struct udevice *bus;
+ struct udevice *dev;
+ pci_dev_t bdf;
+ u16 ctrl;
+ u16 num_vfs;
+ u16 total_vf;
+ u16 vf_offset;
+ u16 vf_stride;
+ int vf, ret;
+ int pos;
+
+ pos = dm_pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos) {
+ debug("Error: SRIOV capability not found\n");
+ return -ENOENT;
+ }
+
+ dm_pci_read_config16(pdev, pos + PCI_SRIOV_CTRL, &ctrl);
+
+ dm_pci_read_config16(pdev, pos + PCI_SRIOV_TOTAL_VF, &total_vf);
+ if (vf_en > total_vf)
+ vf_en = total_vf;
+ dm_pci_write_config16(pdev, pos + PCI_SRIOV_NUM_VF, vf_en);
+
+ ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
+ dm_pci_write_config16(pdev, pos + PCI_SRIOV_CTRL, ctrl);
+
+ dm_pci_read_config16(pdev, pos + PCI_SRIOV_NUM_VF, &num_vfs);
+ if (num_vfs > vf_en)
+ num_vfs = vf_en;
+
+ dm_pci_read_config16(pdev, pos + PCI_SRIOV_VF_OFFSET, &vf_offset);
+ dm_pci_read_config16(pdev, pos + PCI_SRIOV_VF_STRIDE, &vf_stride);
+
+ dm_pci_read_config16(pdev, PCI_VENDOR_ID, &vendor);
+ dm_pci_read_config16(pdev, pos + PCI_SRIOV_VF_DID, &device);
+
+ bdf = dm_pci_get_bdf(pdev);
+
+ pci_get_bus(PCI_BUS(bdf), &bus);
+
+ if (!bus)
+ return -ENODEV;
+
+ bdf += PCI_BDF(0, 0, vf_offset);
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ struct pci_child_platdata *pplat;
+ ulong class;
+
+ pci_bus_read_config(bus, bdf, PCI_CLASS_DEVICE,
+ &class, PCI_SIZE_16);
+
+ debug("%s: bus %d/%s: found VF %x:%x\n", __func__,
+ bus->seq, bus->name, PCI_DEV(bdf), PCI_FUNC(bdf));
+
+ /* Find this device in the device tree */
+ ret = pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), &dev);
+
+ if (ret == -ENODEV) {
+ struct pci_device_id find_id;
+
+ memset(&find_id, '\0', sizeof(find_id));
+ find_id.vendor = vendor;
+ find_id.device = device;
+ find_id.class = class;
+
+ ret = pci_find_and_bind_driver(bus, &find_id,
+ bdf, &dev);
+
+ if (ret)
+ return ret;
+ }
+
+ /* Update the platform data */
+ pplat = dev_get_parent_platdata(dev);
+ pplat->devfn = PCI_MASK_BUS(bdf);
+ pplat->vendor = vendor;
+ pplat->device = device;
+ pplat->class = class;
+ pplat->is_virtfn = true;
+ pplat->pfdev = pdev;
+ pplat->virtid = vf * vf_stride + vf_offset;
+
+ debug("%s: bus %d/%s: found VF %x:%x %x:%x class %lx id %x\n",
+ __func__, dev->seq, dev->name, PCI_DEV(bdf),
+ PCI_FUNC(bdf), vendor, device, class, pplat->virtid);
+ bdf += PCI_BDF(0, 0, vf_stride);
+ }
+
+ return 0;
+}
+
+int pci_sriov_get_totalvfs(struct udevice *pdev)
+{
+ u16 total_vf;
+ int pos;
+
+ pos = dm_pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos) {
+ debug("Error: SRIOV capability not found\n");
+ return -ENOENT;
+ }
+
+ dm_pci_read_config16(pdev, pos + PCI_SRIOV_TOTAL_VF, &total_vf);
+
+ return total_vf;
+}
+#endif /* SRIOV */
+
UCLASS_DRIVER(pci) = {
.id = UCLASS_PCI,
.name = "pci",