|
| 1 | +/* |
| 2 | + * Intel MIC Platform Software Stack (MPSS) |
| 3 | + * |
| 4 | + * Copyright(c) 2014 Intel Corporation. |
| 5 | + * |
| 6 | + * This program is free software; you can redistribute it and/or modify |
| 7 | + * it under the terms of the GNU General Public License, version 2, as |
| 8 | + * published by the Free Software Foundation. |
| 9 | + * |
| 10 | + * This program is distributed in the hope that it will be useful, but |
| 11 | + * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 13 | + * General Public License for more details. |
| 14 | + * |
| 15 | + * Intel SCIF driver. |
| 16 | + * |
| 17 | + */ |
| 18 | +#include <linux/circ_buf.h> |
| 19 | +#include <linux/types.h> |
| 20 | +#include <linux/io.h> |
| 21 | + |
| 22 | +#include "scif_rb.h" |
| 23 | + |
| 24 | +#define scif_rb_ring_cnt(head, tail, size) CIRC_CNT(head, tail, size) |
| 25 | +#define scif_rb_ring_space(head, tail, size) CIRC_SPACE(head, tail, size) |
| 26 | + |
| 27 | +/** |
| 28 | + * scif_rb_init - Initializes the ring buffer |
| 29 | + * @rb: ring buffer |
| 30 | + * @read_ptr: A pointer to the read offset |
| 31 | + * @write_ptr: A pointer to the write offset |
| 32 | + * @rb_base: A pointer to the base of the ring buffer |
| 33 | + * @size: The size of the ring buffer in powers of two |
| 34 | + */ |
| 35 | +void scif_rb_init(struct scif_rb *rb, u32 *read_ptr, u32 *write_ptr, |
| 36 | + void *rb_base, u8 size) |
| 37 | +{ |
| 38 | + rb->rb_base = rb_base; |
| 39 | + rb->size = (1 << size); |
| 40 | + rb->read_ptr = read_ptr; |
| 41 | + rb->write_ptr = write_ptr; |
| 42 | + rb->current_read_offset = *read_ptr; |
| 43 | + rb->current_write_offset = *write_ptr; |
| 44 | +} |
| 45 | + |
| 46 | +/* Copies a message to the ring buffer -- handles the wrap around case */ |
| 47 | +static void memcpy_torb(struct scif_rb *rb, void *header, |
| 48 | + void *msg, u32 size) |
| 49 | +{ |
| 50 | + u32 size1, size2; |
| 51 | + |
| 52 | + if (header + size >= rb->rb_base + rb->size) { |
| 53 | + /* Need to call two copies if it wraps around */ |
| 54 | + size1 = (u32)(rb->rb_base + rb->size - header); |
| 55 | + size2 = size - size1; |
| 56 | + memcpy_toio((void __iomem __force *)header, msg, size1); |
| 57 | + memcpy_toio((void __iomem __force *)rb->rb_base, |
| 58 | + msg + size1, size2); |
| 59 | + } else { |
| 60 | + memcpy_toio((void __iomem __force *)header, msg, size); |
| 61 | + } |
| 62 | +} |
| 63 | + |
| 64 | +/* Copies a message from the ring buffer -- handles the wrap around case */ |
| 65 | +static void memcpy_fromrb(struct scif_rb *rb, void *header, |
| 66 | + void *msg, u32 size) |
| 67 | +{ |
| 68 | + u32 size1, size2; |
| 69 | + |
| 70 | + if (header + size >= rb->rb_base + rb->size) { |
| 71 | + /* Need to call two copies if it wraps around */ |
| 72 | + size1 = (u32)(rb->rb_base + rb->size - header); |
| 73 | + size2 = size - size1; |
| 74 | + memcpy_fromio(msg, (void __iomem __force *)header, size1); |
| 75 | + memcpy_fromio(msg + size1, |
| 76 | + (void __iomem __force *)rb->rb_base, size2); |
| 77 | + } else { |
| 78 | + memcpy_fromio(msg, (void __iomem __force *)header, size); |
| 79 | + } |
| 80 | +} |
| 81 | + |
| 82 | +/** |
| 83 | + * scif_rb_space - Query space available for writing to the RB |
| 84 | + * @rb: ring buffer |
| 85 | + * |
| 86 | + * Return: size available for writing to RB in bytes. |
| 87 | + */ |
| 88 | +u32 scif_rb_space(struct scif_rb *rb) |
| 89 | +{ |
| 90 | + rb->current_read_offset = *rb->read_ptr; |
| 91 | + /* |
| 92 | + * Update from the HW read pointer only once the peer has exposed the |
| 93 | + * new empty slot. This barrier is paired with the memory barrier |
| 94 | + * scif_rb_update_read_ptr() |
| 95 | + */ |
| 96 | + mb(); |
| 97 | + return scif_rb_ring_space(rb->current_write_offset, |
| 98 | + rb->current_read_offset, rb->size); |
| 99 | +} |
| 100 | + |
| 101 | +/** |
| 102 | + * scif_rb_write - Write a message to the RB |
| 103 | + * @rb: ring buffer |
| 104 | + * @msg: buffer to send the message. Must be at least size bytes long |
| 105 | + * @size: the size (in bytes) to be copied to the RB |
| 106 | + * |
| 107 | + * This API does not block if there isn't enough space in the RB. |
| 108 | + * Returns: 0 on success or -ENOMEM on failure |
| 109 | + */ |
| 110 | +int scif_rb_write(struct scif_rb *rb, void *msg, u32 size) |
| 111 | +{ |
| 112 | + void *header; |
| 113 | + |
| 114 | + if (scif_rb_space(rb) < size) |
| 115 | + return -ENOMEM; |
| 116 | + header = rb->rb_base + rb->current_write_offset; |
| 117 | + memcpy_torb(rb, header, msg, size); |
| 118 | + /* |
| 119 | + * Wait until scif_rb_commit(). Update the local ring |
| 120 | + * buffer data, not the shared data until commit. |
| 121 | + */ |
| 122 | + rb->current_write_offset = |
| 123 | + (rb->current_write_offset + size) & (rb->size - 1); |
| 124 | + return 0; |
| 125 | +} |
| 126 | + |
| 127 | +/** |
| 128 | + * scif_rb_commit - To submit the message to let the peer fetch it |
| 129 | + * @rb: ring buffer |
| 130 | + */ |
| 131 | +void scif_rb_commit(struct scif_rb *rb) |
| 132 | +{ |
| 133 | + /* |
| 134 | + * We must ensure ordering between the all the data committed |
| 135 | + * previously before we expose the new message to the peer by |
| 136 | + * updating the write_ptr. This write barrier is paired with |
| 137 | + * the read barrier in scif_rb_count(..) |
| 138 | + */ |
| 139 | + wmb(); |
| 140 | + ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset; |
| 141 | +#ifdef CONFIG_INTEL_MIC_CARD |
| 142 | + /* |
| 143 | + * X100 Si bug: For the case where a Core is performing an EXT_WR |
| 144 | + * followed by a Doorbell Write, the Core must perform two EXT_WR to the |
| 145 | + * same address with the same data before it does the Doorbell Write. |
| 146 | + * This way, if ordering is violated for the Interrupt Message, it will |
| 147 | + * fall just behind the first Posted associated with the first EXT_WR. |
| 148 | + */ |
| 149 | + ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset; |
| 150 | +#endif |
| 151 | +} |
| 152 | + |
| 153 | +/** |
| 154 | + * scif_rb_get - To get next message from the ring buffer |
| 155 | + * @rb: ring buffer |
| 156 | + * @size: Number of bytes to be read |
| 157 | + * |
| 158 | + * Return: NULL if no bytes to be read from the ring buffer, otherwise the |
| 159 | + * pointer to the next byte |
| 160 | + */ |
| 161 | +static void *scif_rb_get(struct scif_rb *rb, u32 size) |
| 162 | +{ |
| 163 | + void *header = NULL; |
| 164 | + |
| 165 | + if (scif_rb_count(rb, size) >= size) |
| 166 | + header = rb->rb_base + rb->current_read_offset; |
| 167 | + return header; |
| 168 | +} |
| 169 | + |
| 170 | +/* |
| 171 | + * scif_rb_get_next - Read from ring buffer. |
| 172 | + * @rb: ring buffer |
| 173 | + * @msg: buffer to hold the message. Must be at least size bytes long |
| 174 | + * @size: Number of bytes to be read |
| 175 | + * |
| 176 | + * Return: number of bytes read if available bytes are >= size, otherwise |
| 177 | + * returns zero. |
| 178 | + */ |
| 179 | +u32 scif_rb_get_next(struct scif_rb *rb, void *msg, u32 size) |
| 180 | +{ |
| 181 | + void *header = NULL; |
| 182 | + int read_size = 0; |
| 183 | + |
| 184 | + header = scif_rb_get(rb, size); |
| 185 | + if (header) { |
| 186 | + u32 next_cmd_offset = |
| 187 | + (rb->current_read_offset + size) & (rb->size - 1); |
| 188 | + |
| 189 | + read_size = size; |
| 190 | + rb->current_read_offset = next_cmd_offset; |
| 191 | + memcpy_fromrb(rb, header, msg, size); |
| 192 | + } |
| 193 | + return read_size; |
| 194 | +} |
| 195 | + |
| 196 | +/** |
| 197 | + * scif_rb_update_read_ptr |
| 198 | + * @rb: ring buffer |
| 199 | + */ |
| 200 | +void scif_rb_update_read_ptr(struct scif_rb *rb) |
| 201 | +{ |
| 202 | + u32 new_offset; |
| 203 | + |
| 204 | + new_offset = rb->current_read_offset; |
| 205 | + /* |
| 206 | + * We must ensure ordering between the all the data committed or read |
| 207 | + * previously before we expose the empty slot to the peer by updating |
| 208 | + * the read_ptr. This barrier is paired with the memory barrier in |
| 209 | + * scif_rb_space(..) |
| 210 | + */ |
| 211 | + mb(); |
| 212 | + ACCESS_ONCE(*rb->read_ptr) = new_offset; |
| 213 | +#ifdef CONFIG_INTEL_MIC_CARD |
| 214 | + /* |
| 215 | + * X100 Si Bug: For the case where a Core is performing an EXT_WR |
| 216 | + * followed by a Doorbell Write, the Core must perform two EXT_WR to the |
| 217 | + * same address with the same data before it does the Doorbell Write. |
| 218 | + * This way, if ordering is violated for the Interrupt Message, it will |
| 219 | + * fall just behind the first Posted associated with the first EXT_WR. |
| 220 | + */ |
| 221 | + ACCESS_ONCE(*rb->read_ptr) = new_offset; |
| 222 | +#endif |
| 223 | +} |
| 224 | + |
| 225 | +/** |
| 226 | + * scif_rb_count |
| 227 | + * @rb: ring buffer |
| 228 | + * @size: Number of bytes expected to be read |
| 229 | + * |
| 230 | + * Return: number of bytes that can be read from the RB |
| 231 | + */ |
| 232 | +u32 scif_rb_count(struct scif_rb *rb, u32 size) |
| 233 | +{ |
| 234 | + if (scif_rb_ring_cnt(rb->current_write_offset, |
| 235 | + rb->current_read_offset, |
| 236 | + rb->size) < size) { |
| 237 | + rb->current_write_offset = *rb->write_ptr; |
| 238 | + /* |
| 239 | + * Update from the HW write pointer if empty only once the peer |
| 240 | + * has exposed the new message. This read barrier is paired |
| 241 | + * with the write barrier in scif_rb_commit(..) |
| 242 | + */ |
| 243 | + smp_rmb(); |
| 244 | + } |
| 245 | + return scif_rb_ring_cnt(rb->current_write_offset, |
| 246 | + rb->current_read_offset, |
| 247 | + rb->size); |
| 248 | +} |
0 commit comments