Skip to content

Commit a91eb52

Browse files
Yuval Mintzdavem330
authored andcommitted
qed: Revisit chain implementation
RoCE driver is going to need a 32-bit chain [current chain implementation for qed* currently supports only 16-bit producer/consumer chains]. This patch adds said support, as well as doing other slight tweaks and modifications to qed's chain API. Signed-off-by: Yuval Mintz <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 330348d commit a91eb52

File tree

7 files changed

+623
-306
lines changed

7 files changed

+623
-306
lines changed

drivers/net/ethernet/qlogic/qed/qed_dev.c

Lines changed: 254 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
#include <linux/pci.h>
1818
#include <linux/slab.h>
1919
#include <linux/string.h>
20+
#include <linux/vmalloc.h>
2021
#include <linux/etherdevice.h>
2122
#include <linux/qed/qed_chain.h>
2223
#include <linux/qed/qed_if.h>
@@ -1779,92 +1780,285 @@ void qed_hw_remove(struct qed_dev *cdev)
17791780
qed_iov_free_hw_info(cdev);
17801781
}
17811782

1782-
int qed_chain_alloc(struct qed_dev *cdev,
1783-
enum qed_chain_use_mode intended_use,
1784-
enum qed_chain_mode mode,
1785-
u16 num_elems,
1786-
size_t elem_size,
1787-
struct qed_chain *p_chain)
1783+
static void qed_chain_free_next_ptr(struct qed_dev *cdev,
1784+
struct qed_chain *p_chain)
1785+
{
1786+
void *p_virt = p_chain->p_virt_addr, *p_virt_next = NULL;
1787+
dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0;
1788+
struct qed_chain_next *p_next;
1789+
u32 size, i;
1790+
1791+
if (!p_virt)
1792+
return;
1793+
1794+
size = p_chain->elem_size * p_chain->usable_per_page;
1795+
1796+
for (i = 0; i < p_chain->page_cnt; i++) {
1797+
if (!p_virt)
1798+
break;
1799+
1800+
p_next = (struct qed_chain_next *)((u8 *)p_virt + size);
1801+
p_virt_next = p_next->next_virt;
1802+
p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys);
1803+
1804+
dma_free_coherent(&cdev->pdev->dev,
1805+
QED_CHAIN_PAGE_SIZE, p_virt, p_phys);
1806+
1807+
p_virt = p_virt_next;
1808+
p_phys = p_phys_next;
1809+
}
1810+
}
1811+
1812+
static void qed_chain_free_single(struct qed_dev *cdev,
1813+
struct qed_chain *p_chain)
1814+
{
1815+
if (!p_chain->p_virt_addr)
1816+
return;
1817+
1818+
dma_free_coherent(&cdev->pdev->dev,
1819+
QED_CHAIN_PAGE_SIZE,
1820+
p_chain->p_virt_addr, p_chain->p_phys_addr);
1821+
}
1822+
1823+
static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
1824+
{
1825+
void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
1826+
u32 page_cnt = p_chain->page_cnt, i, pbl_size;
1827+
u8 *p_pbl_virt = p_chain->pbl.p_virt_table;
1828+
1829+
if (!pp_virt_addr_tbl)
1830+
return;
1831+
1832+
if (!p_chain->pbl.p_virt_table)
1833+
goto out;
1834+
1835+
for (i = 0; i < page_cnt; i++) {
1836+
if (!pp_virt_addr_tbl[i])
1837+
break;
1838+
1839+
dma_free_coherent(&cdev->pdev->dev,
1840+
QED_CHAIN_PAGE_SIZE,
1841+
pp_virt_addr_tbl[i],
1842+
*(dma_addr_t *)p_pbl_virt);
1843+
1844+
p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
1845+
}
1846+
1847+
pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1848+
dma_free_coherent(&cdev->pdev->dev,
1849+
pbl_size,
1850+
p_chain->pbl.p_virt_table, p_chain->pbl.p_phys_table);
1851+
out:
1852+
vfree(p_chain->pbl.pp_virt_addr_tbl);
1853+
}
1854+
1855+
void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain)
1856+
{
1857+
switch (p_chain->mode) {
1858+
case QED_CHAIN_MODE_NEXT_PTR:
1859+
qed_chain_free_next_ptr(cdev, p_chain);
1860+
break;
1861+
case QED_CHAIN_MODE_SINGLE:
1862+
qed_chain_free_single(cdev, p_chain);
1863+
break;
1864+
case QED_CHAIN_MODE_PBL:
1865+
qed_chain_free_pbl(cdev, p_chain);
1866+
break;
1867+
}
1868+
}
1869+
1870+
static int
1871+
qed_chain_alloc_sanity_check(struct qed_dev *cdev,
1872+
enum qed_chain_cnt_type cnt_type,
1873+
size_t elem_size, u32 page_cnt)
1874+
{
1875+
u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
1876+
1877+
/* The actual chain size can be larger than the maximal possible value
1878+
* after rounding up the requested elements number to pages, and after
1879+
* taking into acount the unusuable elements (next-ptr elements).
1880+
* The size of a "u16" chain can be (U16_MAX + 1) since the chain
1881+
* size/capacity fields are of a u32 type.
1882+
*/
1883+
if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 &&
1884+
chain_size > 0x10000) ||
1885+
(cnt_type == QED_CHAIN_CNT_TYPE_U32 &&
1886+
chain_size > 0x100000000ULL)) {
1887+
DP_NOTICE(cdev,
1888+
"The actual chain size (0x%llx) is larger than the maximal possible value\n",
1889+
chain_size);
1890+
return -EINVAL;
1891+
}
1892+
1893+
return 0;
1894+
}
1895+
1896+
static int
1897+
qed_chain_alloc_next_ptr(struct qed_dev *cdev, struct qed_chain *p_chain)
17881898
{
1789-
dma_addr_t p_pbl_phys = 0;
1790-
void *p_pbl_virt = NULL;
1899+
void *p_virt = NULL, *p_virt_prev = NULL;
17911900
dma_addr_t p_phys = 0;
1792-
void *p_virt = NULL;
1793-
u16 page_cnt = 0;
1794-
size_t size;
1901+
u32 i;
17951902

1796-
if (mode == QED_CHAIN_MODE_SINGLE)
1797-
page_cnt = 1;
1798-
else
1799-
page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
1903+
for (i = 0; i < p_chain->page_cnt; i++) {
1904+
p_virt = dma_alloc_coherent(&cdev->pdev->dev,
1905+
QED_CHAIN_PAGE_SIZE,
1906+
&p_phys, GFP_KERNEL);
1907+
if (!p_virt) {
1908+
DP_NOTICE(cdev, "Failed to allocate chain memory\n");
1909+
return -ENOMEM;
1910+
}
1911+
1912+
if (i == 0) {
1913+
qed_chain_init_mem(p_chain, p_virt, p_phys);
1914+
qed_chain_reset(p_chain);
1915+
} else {
1916+
qed_chain_init_next_ptr_elem(p_chain, p_virt_prev,
1917+
p_virt, p_phys);
1918+
}
1919+
1920+
p_virt_prev = p_virt;
1921+
}
1922+
/* Last page's next element should point to the beginning of the
1923+
* chain.
1924+
*/
1925+
qed_chain_init_next_ptr_elem(p_chain, p_virt_prev,
1926+
p_chain->p_virt_addr,
1927+
p_chain->p_phys_addr);
1928+
1929+
return 0;
1930+
}
1931+
1932+
static int
1933+
qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain)
1934+
{
1935+
dma_addr_t p_phys = 0;
1936+
void *p_virt = NULL;
18001937

1801-
size = page_cnt * QED_CHAIN_PAGE_SIZE;
18021938
p_virt = dma_alloc_coherent(&cdev->pdev->dev,
1803-
size, &p_phys, GFP_KERNEL);
1939+
QED_CHAIN_PAGE_SIZE, &p_phys, GFP_KERNEL);
18041940
if (!p_virt) {
1805-
DP_NOTICE(cdev, "Failed to allocate chain mem\n");
1806-
goto nomem;
1941+
DP_NOTICE(cdev, "Failed to allocate chain memory\n");
1942+
return -ENOMEM;
18071943
}
18081944

1809-
if (mode == QED_CHAIN_MODE_PBL) {
1810-
size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1811-
p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
1812-
size, &p_pbl_phys,
1813-
GFP_KERNEL);
1814-
if (!p_pbl_virt) {
1815-
DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n");
1816-
goto nomem;
1817-
}
1945+
qed_chain_init_mem(p_chain, p_virt, p_phys);
1946+
qed_chain_reset(p_chain);
18181947

1819-
qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt,
1820-
(u8)elem_size, intended_use,
1821-
p_pbl_phys, p_pbl_virt);
1822-
} else {
1823-
qed_chain_init(p_chain, p_virt, p_phys, page_cnt,
1824-
(u8)elem_size, intended_use, mode);
1948+
return 0;
1949+
}
1950+
1951+
static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
1952+
{
1953+
u32 page_cnt = p_chain->page_cnt, size, i;
1954+
dma_addr_t p_phys = 0, p_pbl_phys = 0;
1955+
void **pp_virt_addr_tbl = NULL;
1956+
u8 *p_pbl_virt = NULL;
1957+
void *p_virt = NULL;
1958+
1959+
size = page_cnt * sizeof(*pp_virt_addr_tbl);
1960+
pp_virt_addr_tbl = vmalloc(size);
1961+
if (!pp_virt_addr_tbl) {
1962+
DP_NOTICE(cdev,
1963+
"Failed to allocate memory for the chain virtual addresses table\n");
1964+
return -ENOMEM;
18251965
}
1966+
memset(pp_virt_addr_tbl, 0, size);
18261967

1827-
return 0;
1968+
/* The allocation of the PBL table is done with its full size, since it
1969+
* is expected to be successive.
1970+
* qed_chain_init_pbl_mem() is called even in a case of an allocation
1971+
* failure, since pp_virt_addr_tbl was previously allocated, and it
1972+
* should be saved to allow its freeing during the error flow.
1973+
*/
1974+
size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1975+
p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
1976+
size, &p_pbl_phys, GFP_KERNEL);
1977+
qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
1978+
pp_virt_addr_tbl);
1979+
if (!p_pbl_virt) {
1980+
DP_NOTICE(cdev, "Failed to allocate chain pbl memory\n");
1981+
return -ENOMEM;
1982+
}
18281983

1829-
nomem:
1830-
dma_free_coherent(&cdev->pdev->dev,
1831-
page_cnt * QED_CHAIN_PAGE_SIZE,
1832-
p_virt, p_phys);
1833-
dma_free_coherent(&cdev->pdev->dev,
1834-
page_cnt * QED_CHAIN_PBL_ENTRY_SIZE,
1835-
p_pbl_virt, p_pbl_phys);
1984+
for (i = 0; i < page_cnt; i++) {
1985+
p_virt = dma_alloc_coherent(&cdev->pdev->dev,
1986+
QED_CHAIN_PAGE_SIZE,
1987+
&p_phys, GFP_KERNEL);
1988+
if (!p_virt) {
1989+
DP_NOTICE(cdev, "Failed to allocate chain memory\n");
1990+
return -ENOMEM;
1991+
}
18361992

1837-
return -ENOMEM;
1993+
if (i == 0) {
1994+
qed_chain_init_mem(p_chain, p_virt, p_phys);
1995+
qed_chain_reset(p_chain);
1996+
}
1997+
1998+
/* Fill the PBL table with the physical address of the page */
1999+
*(dma_addr_t *)p_pbl_virt = p_phys;
2000+
/* Keep the virtual address of the page */
2001+
p_chain->pbl.pp_virt_addr_tbl[i] = p_virt;
2002+
2003+
p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
2004+
}
2005+
2006+
return 0;
18382007
}
18392008

1840-
void qed_chain_free(struct qed_dev *cdev,
1841-
struct qed_chain *p_chain)
2009+
int qed_chain_alloc(struct qed_dev *cdev,
2010+
enum qed_chain_use_mode intended_use,
2011+
enum qed_chain_mode mode,
2012+
enum qed_chain_cnt_type cnt_type,
2013+
u32 num_elems, size_t elem_size, struct qed_chain *p_chain)
18422014
{
1843-
size_t size;
2015+
u32 page_cnt;
2016+
int rc = 0;
18442017

1845-
if (!p_chain->p_virt_addr)
1846-
return;
2018+
if (mode == QED_CHAIN_MODE_SINGLE)
2019+
page_cnt = 1;
2020+
else
2021+
page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
18472022

1848-
if (p_chain->mode == QED_CHAIN_MODE_PBL) {
1849-
size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1850-
dma_free_coherent(&cdev->pdev->dev, size,
1851-
p_chain->pbl.p_virt_table,
1852-
p_chain->pbl.p_phys_table);
2023+
rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt);
2024+
if (rc) {
2025+
DP_NOTICE(cdev,
2026+
"Cannot allocate a chain with the given arguments:\n"
2027+
"[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
2028+
intended_use, mode, cnt_type, num_elems, elem_size);
2029+
return rc;
18532030
}
18542031

1855-
size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE;
1856-
dma_free_coherent(&cdev->pdev->dev, size,
1857-
p_chain->p_virt_addr,
1858-
p_chain->p_phys_addr);
2032+
qed_chain_init_params(p_chain, page_cnt, (u8) elem_size, intended_use,
2033+
mode, cnt_type);
2034+
2035+
switch (mode) {
2036+
case QED_CHAIN_MODE_NEXT_PTR:
2037+
rc = qed_chain_alloc_next_ptr(cdev, p_chain);
2038+
break;
2039+
case QED_CHAIN_MODE_SINGLE:
2040+
rc = qed_chain_alloc_single(cdev, p_chain);
2041+
break;
2042+
case QED_CHAIN_MODE_PBL:
2043+
rc = qed_chain_alloc_pbl(cdev, p_chain);
2044+
break;
2045+
}
2046+
if (rc)
2047+
goto nomem;
2048+
2049+
return 0;
2050+
2051+
nomem:
2052+
qed_chain_free(cdev, p_chain);
2053+
return rc;
18592054
}
18602055

1861-
int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
1862-
u16 src_id, u16 *dst_id)
2056+
int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id)
18632057
{
18642058
if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
18652059
u16 min, max;
18662060

1867-
min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
2061+
min = (u16) RESC_START(p_hwfn, QED_L2_QUEUE);
18682062
max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
18692063
DP_NOTICE(p_hwfn,
18702064
"l2_queue id [%d] is not valid, available indices [%d - %d]\n",

drivers/net/ethernet/qlogic/qed/qed_dev_api.h

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -245,18 +245,16 @@ int
245245
qed_chain_alloc(struct qed_dev *cdev,
246246
enum qed_chain_use_mode intended_use,
247247
enum qed_chain_mode mode,
248-
u16 num_elems,
249-
size_t elem_size,
250-
struct qed_chain *p_chain);
248+
enum qed_chain_cnt_type cnt_type,
249+
u32 num_elems, size_t elem_size, struct qed_chain *p_chain);
251250

252251
/**
253252
* @brief qed_chain_free - Free chain DMA memory
254253
*
255254
* @param p_hwfn
256255
* @param p_chain
257256
*/
258-
void qed_chain_free(struct qed_dev *cdev,
259-
struct qed_chain *p_chain);
257+
void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain);
260258

261259
/**
262260
* @@brief qed_fw_l2_queue - Get absolute L2 queue ID

drivers/net/ethernet/qlogic/qed/qed_sp_commands.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -308,6 +308,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
308308
struct qed_spq_entry *p_ent = NULL;
309309
struct qed_sp_init_data init_data;
310310
int rc = -EINVAL;
311+
u8 page_cnt;
311312

312313
/* update initial eq producer */
313314
qed_eq_prod_update(p_hwfn,
@@ -350,8 +351,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
350351
/* Place EQ address in RAMROD */
351352
DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
352353
p_hwfn->p_eq->chain.pbl.p_phys_table);
353-
p_ramrod->event_ring_num_pages = (u8)p_hwfn->p_eq->chain.page_cnt;
354-
354+
page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain);
355+
p_ramrod->event_ring_num_pages = page_cnt;
355356
DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
356357
p_hwfn->p_consq->chain.pbl.p_phys_table);
357358

0 commit comments

Comments
 (0)