aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/idpf/idpf_controlq_setup.c
blob: a942a6385d06b0ca0fa0f35d4cf47a3c5c7c52e6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2023 Intel Corporation */

#include "idpf_controlq.h"

/**
 * idpf_ctlq_alloc_desc_ring - Allocate Control Queue (CQ) rings
 * @hw: pointer to hw struct
 * @cq: pointer to the specific Control queue
 */
static int idpf_ctlq_alloc_desc_ring(struct idpf_hw *hw,
				     struct idpf_ctlq_info *cq)
{
	size_t size = cq->ring_size * sizeof(struct idpf_ctlq_desc);

	cq->desc_ring.va = idpf_alloc_dma_mem(hw, &cq->desc_ring, size);
	if (!cq->desc_ring.va)
		return -ENOMEM;

	return 0;
}

/**
 * idpf_ctlq_alloc_bufs - Allocate Control Queue (CQ) buffers
 * @hw: pointer to hw struct
 * @cq: pointer to the specific Control queue
 *
 * Allocate the buffer head for all control queues, and if it's a receive
 * queue, allocate DMA buffers
 */
static int idpf_ctlq_alloc_bufs(struct idpf_hw *hw,
				struct idpf_ctlq_info *cq)
{
	int i;

	/* Do not allocate DMA buffers for transmit queues */
	if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)
		return 0;

	/* We'll be allocating the buffer info memory first, then we can
	 * allocate the mapped buffers for the event processing
	 */
	cq->bi.rx_buff = kcalloc(cq->ring_size, sizeof(struct idpf_dma_mem *),
				 GFP_KERNEL);
	if (!cq->bi.rx_buff)
		return -ENOMEM;

	/* allocate the mapped buffers (except for the last one) */
	for (i = 0; i < cq->ring_size - 1; i++) {
		struct idpf_dma_mem *bi;
		int num = 1; /* number of idpf_dma_mem to be allocated */

		cq->bi.rx_buff[i] = kcalloc(num, sizeof(struct idpf_dma_mem),
					    GFP_KERNEL);
		if (!cq->bi.rx_buff[i])
			goto unwind_alloc_cq_bufs;

		bi = cq->bi.rx_buff[i];

		bi->va = idpf_alloc_dma_mem(hw, bi, cq->buf_size);
		if (!bi->va) {
			/* unwind will not free the failed entry */
			kfree(cq->bi.rx_buff[i]);
			goto unwind_alloc_cq_bufs;
		}
	}

	return 0;

unwind_alloc_cq_bufs:
	/* don't try to free the one that failed... */
	i--;
	for (; i >= 0; i--) {
		idpf_free_dma_mem(hw, cq->bi.rx_buff[i]);
		kfree(cq->bi.rx_buff[i]);
	}
	kfree(cq->bi.rx_buff);

	return -ENOMEM;
}

/**
 * idpf_ctlq_free_desc_ring - Free Control Queue (CQ) rings
 * @hw: pointer to hw struct
 * @cq: pointer to the specific Control queue
 *
 * This assumes the posted send buffers have already been cleaned
 * and de-allocated
 */
static void idpf_ctlq_free_desc_ring(struct idpf_hw *hw,
				     struct idpf_ctlq_info *cq)
{
	idpf_free_dma_mem(hw, &cq->desc_ring);
}

/**
 * idpf_ctlq_free_bufs - Free CQ buffer info elements
 * @hw: pointer to hw struct
 * @cq: pointer to the specific Control queue
 *
 * Free the DMA buffers for RX queues, and DMA buffer header for both RX and TX
 * queues.  The upper layers are expected to manage freeing of TX DMA buffers
 */
static void idpf_ctlq_free_bufs(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
{
	void *bi;

	if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX) {
		int i;

		/* free DMA buffers for rx queues*/
		for (i = 0; i < cq->ring_size; i++) {
			if (cq->bi.rx_buff[i]) {
				idpf_free_dma_mem(hw, cq->bi.rx_buff[i]);
				kfree(cq->bi.rx_buff[i]);
			}
		}

		bi = (void *)cq->bi.rx_buff;
	} else {
		bi = (void *)cq->bi.tx_msg;
	}

	/* free the buffer header */
	kfree(bi);
}

/**
 * idpf_ctlq_dealloc_ring_res - Free memory allocated for control queue
 * @hw: pointer to hw struct
 * @cq: pointer to the specific Control queue
 *
 * Free the memory used by the ring, buffers and other related structures
 */
void idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
{
	/* free ring buffers and the ring itself */
	idpf_ctlq_free_bufs(hw, cq);
	idpf_ctlq_free_desc_ring(hw, cq);
}

/**
 * idpf_ctlq_alloc_ring_res - allocate memory for descriptor ring and bufs
 * @hw: pointer to hw struct
 * @cq: pointer to control queue struct
 *
 * Do *NOT* hold cq_lock when calling this as the memory allocation routines
 * called are not going to be atomic context safe
 */
int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
{
	int err;

	/* allocate the ring memory */
	err = idpf_ctlq_alloc_desc_ring(hw, cq);
	if (err)
		return err;

	/* allocate buffers in the rings */
	err = idpf_ctlq_alloc_bufs(hw, cq);
	if (err)
		goto idpf_init_cq_free_ring;

	/* success! */
	return 0;

idpf_init_cq_free_ring:
	idpf_free_dma_mem(hw, &cq->desc_ring);

	return err;
}