Added
Link Here
|
1 |
/*- |
2 |
* Copyright (c) 2013 Alexander Fedorov <alexander.fedorov@rtlservice.com> |
3 |
* All rights reserved. |
4 |
* |
5 |
* Copyright (c) 2013-2014: |
6 |
* Computer Architecture Laboratory, National University of Cordoba. Cordoba, Argentina. |
7 |
* Nicolas Vidable <njvidable@gmail.com> |
8 |
* Martin Galvan <omgalvan.86@gmail.com> |
9 |
* All rights reserved. |
10 |
* |
11 |
* Redistribution and use in source and binary forms, with or without |
12 |
* modification, are permitted provided that the following conditions |
13 |
* are met: |
14 |
* 1. Redistributions of source code must retain the above copyright |
15 |
* notice, this list of conditions and the following disclaimer. |
16 |
* 2. Redistributions in binary form must reproduce the above copyright |
17 |
* notice, this list of conditions and the following disclaimer in the |
18 |
* documentation and/or other materials provided with the distribution. |
19 |
* |
20 |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
21 |
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
22 |
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
23 |
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
24 |
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
25 |
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
26 |
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
27 |
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
28 |
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
29 |
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
30 |
* SUCH DAMAGE. |
31 |
* |
32 |
* |
33 |
* MMC/SD Host Controller driver for the Allwinner A10 SoC. |
34 |
*/ |
35 |
|
36 |
#include <sys/cdefs.h> |
37 |
__FBSDID("$FreeBSD$"); |
38 |
|
39 |
#include <sys/param.h> |
40 |
#include <sys/systm.h> |
41 |
#include <sys/bio.h> |
42 |
#include <sys/bus.h> |
43 |
#include <sys/conf.h> |
44 |
#include <sys/endian.h> |
45 |
#include <sys/kernel.h> |
46 |
#include <sys/kthread.h> |
47 |
#include <sys/lock.h> |
48 |
#include <sys/malloc.h> |
49 |
#include <sys/module.h> |
50 |
#include <sys/mutex.h> |
51 |
#include <sys/queue.h> |
52 |
#include <sys/resource.h> |
53 |
#include <sys/rman.h> |
54 |
#include <sys/time.h> |
55 |
#include <sys/timetc.h> |
56 |
#include <sys/watchdog.h> |
57 |
|
58 |
#include <sys/kdb.h> |
59 |
|
60 |
#include <machine/bus.h> |
61 |
#include <machine/cpu.h> |
62 |
#include <machine/cpufunc.h> |
63 |
#include <machine/resource.h> |
64 |
#include <machine/frame.h> |
65 |
#include <machine/intr.h> |
66 |
|
67 |
#include <dev/ofw/ofw_bus.h> |
68 |
#include <dev/ofw/ofw_bus_subr.h> |
69 |
|
70 |
#include <dev/mmc/bridge.h> |
71 |
#include <dev/mmc/mmcreg.h> |
72 |
#include <dev/mmc/mmcbrvar.h> |
73 |
|
74 |
#include <arm/allwinner/a10_clk.h> |
75 |
#include <arm/allwinner/a10_mmc.h> |
76 |
|
77 |
struct a10_mmc_softc { |
78 |
device_t device; |
79 |
struct mtx mutex; |
80 |
struct mmc_host mmc_host; |
81 |
struct mmc_request * mmc_request; |
82 |
struct resource * memory_resource; |
83 |
struct resource * interrupt_resource; |
84 |
void * interrupt_handler; |
85 |
uint32_t read_interrupts; /* Interrupts we've read so far. */ |
86 |
uint32_t mod_clock; |
87 |
int bus_busy; |
88 |
}; |
89 |
|
90 |
static int a10_mmc_probe(device_t); |
91 |
static int a10_mmc_attach(device_t); |
92 |
static int a10_mmc_detach(device_t); |
93 |
static void a10_mmc_release_resources(struct a10_mmc_softc *); |
94 |
static int a10_mmc_reset_controller(struct a10_mmc_softc *); |
95 |
static void a10_mmc_clear_interrupts(struct a10_mmc_softc *); |
96 |
static void a10_mmc_enable_interrupts(struct a10_mmc_softc *); |
97 |
static void a10_mmc_interrupt_handler(void *); |
98 |
static void a10_mmc_request_ok(struct a10_mmc_softc *); |
99 |
static void a10_mmc_request_error(struct a10_mmc_softc *); |
100 |
static void a10_mmc_finalize_request(struct a10_mmc_softc *); |
101 |
static void a10_mmc_send_manual_stop(struct a10_mmc_softc *, struct mmc_request *); |
102 |
static void a10_mmc_set_bus_width(struct a10_mmc_softc *, struct mmc_ios *); |
103 |
static int a10_mmc_update_ios(device_t, device_t); |
104 |
static int a10_mmc_update_clock(struct a10_mmc_softc *); |
105 |
static int a10_mmc_set_clock_enabled(struct a10_mmc_softc *, int); |
106 |
static int a10_mmc_set_clock_rate(struct a10_mmc_softc *, struct mmc_ios *); |
107 |
static int a10_mmc_request(device_t, device_t, struct mmc_request *); |
108 |
static int a10_mmc_get_ro(device_t, device_t); |
109 |
static int a10_mmc_acquire_host(device_t, device_t); |
110 |
static int a10_mmc_release_host(device_t, device_t); |
111 |
static int a10_mmc_do_programmed_io(struct a10_mmc_softc *, struct mmc_data *); |
112 |
static int a10_mmc_timeout_without_command_done(struct a10_mmc_softc *); |
113 |
static int a10_mmc_interrupt_error(struct a10_mmc_softc *); |
114 |
static int a10_mmc_interrupt_done(uint32_t, struct mmc_data *); |
115 |
static int a10_mmc_fifo_wait(struct a10_mmc_softc *, uint32_t, uint32_t); |
116 |
|
117 |
static inline void |
118 |
a10_mmc_lock(struct a10_mmc_softc *sc) |
119 |
{ |
120 |
mtx_lock(&sc->mutex); |
121 |
} |
122 |
|
123 |
static inline void |
124 |
a10_mmc_unlock(struct a10_mmc_softc *sc) |
125 |
{ |
126 |
mtx_unlock(&sc->mutex); |
127 |
} |
128 |
|
129 |
static inline uint32_t |
130 |
a10_mmc_read_4(struct a10_mmc_softc *sc, bus_size_t offset) |
131 |
{ |
132 |
return (bus_read_4(sc->memory_resource, offset)); |
133 |
} |
134 |
|
135 |
static inline void |
136 |
a10_mmc_write_4(struct a10_mmc_softc *sc, bus_size_t offset, uint32_t value) |
137 |
{ |
138 |
bus_write_4(sc->memory_resource, offset, value); |
139 |
} |
140 |
|
141 |
static int |
142 |
a10_mmc_probe(device_t device) |
143 |
{ |
144 |
int result = ENXIO; |
145 |
|
146 |
if (ofw_bus_is_compatible(device, "allwinner,sun4i-mmc")) { |
147 |
device_set_desc(device, "Allwinner Integrated MMC/SD controller"); |
148 |
result = BUS_PROBE_DEFAULT; |
149 |
} |
150 |
|
151 |
return (result); |
152 |
} |
153 |
|
154 |
static int |
155 |
a10_mmc_attach(device_t device) |
156 |
{ |
157 |
struct a10_mmc_softc *sc; |
158 |
device_t child; |
159 |
int resource_id; |
160 |
int error; |
161 |
|
162 |
sc = device_get_softc(device); |
163 |
sc->device = device; |
164 |
sc->mmc_request = NULL; /* Set the current request to NULL. */ |
165 |
|
166 |
mtx_init(&sc->mutex, device_get_nameunit(sc->device), "a10_mmc", MTX_DEF); |
167 |
|
168 |
/* Allocate a memory window resource. */ |
169 |
resource_id = 0; |
170 |
sc->memory_resource = bus_alloc_resource_any(device, SYS_RES_MEMORY, |
171 |
&resource_id, RF_ACTIVE); |
172 |
|
173 |
if (sc->memory_resource == NULL) { |
174 |
device_printf(device, "Cannot allocate memory window!\n"); |
175 |
|
176 |
return (ENXIO); |
177 |
} |
178 |
|
179 |
/* Allocate an interrupt resource. */ |
180 |
resource_id = 0; |
181 |
sc->interrupt_resource = bus_alloc_resource_any(device, SYS_RES_IRQ, &resource_id, |
182 |
RF_ACTIVE | RF_SHAREABLE); |
183 |
|
184 |
if (sc->interrupt_resource == NULL) { |
185 |
device_printf(device, "Cannot allocate interrupt resource!\n"); |
186 |
a10_mmc_release_resources(sc); |
187 |
|
188 |
return (ENXIO); |
189 |
} |
190 |
|
191 |
/* Set the ithread interrupt handler for our interrupt resource. */ |
192 |
error = bus_setup_intr(device, sc->interrupt_resource, INTR_TYPE_MISC | |
193 |
INTR_MPSAFE, NULL, a10_mmc_interrupt_handler, sc, |
194 |
&sc->interrupt_handler); |
195 |
|
196 |
if (error) { |
197 |
device_printf(device, "Cannot setup interrupt handler!\n"); |
198 |
a10_mmc_release_resources(sc); |
199 |
|
200 |
return (ENXIO); |
201 |
} |
202 |
|
203 |
/* Activate the MMC clock. */ |
204 |
error = a10_clk_mmc_activate(&sc->mod_clock); |
205 |
|
206 |
if (error) { |
207 |
device_printf(device, "Cannot activate clock!\n"); |
208 |
a10_mmc_release_resources(sc); |
209 |
|
210 |
return (ENXIO); |
211 |
} |
212 |
|
213 |
/* Reset controller. */ |
214 |
error = a10_mmc_reset_controller(sc); |
215 |
|
216 |
if (error) { |
217 |
a10_mmc_release_resources(sc); |
218 |
|
219 |
return (ENXIO); |
220 |
} |
221 |
|
222 |
/* Configure timeout register. */ |
223 |
a10_mmc_write_4(sc, A10_MMC_TIMEOUT_REG, 0xFFFFFFFF); |
224 |
|
225 |
/* Clear interrupt flags. */ |
226 |
a10_mmc_clear_interrupts(sc); |
227 |
|
228 |
/* Initialization black magic: there's no documentation on why these |
229 |
* values must be written here. |
230 |
*/ |
231 |
a10_mmc_write_4(sc, A10_MMC_DEBUG_ENABLE_REG, 0xDEB); |
232 |
a10_mmc_write_4(sc, A10_MMC_FUNCTION_SELECT_REG, A10_MMC_CE_ATA_ON); |
233 |
|
234 |
/* Set minimum and maximum operating frequencies (400kHz-50MHz). */ |
235 |
sc->mmc_host.f_min = 400000; |
236 |
sc->mmc_host.f_max = 52000000; |
237 |
|
238 |
/* Set operation conditions (voltage). */ |
239 |
sc->mmc_host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340; |
240 |
|
241 |
/* Set additional host controller capabilities. */ |
242 |
sc->mmc_host.caps = MMC_CAP_4_BIT_DATA | MMC_CAP_HSPEED; |
243 |
|
244 |
/* Set mode. */ |
245 |
sc->mmc_host.mode = mode_sd; |
246 |
|
247 |
device_set_ivars(device, &sc->mmc_host); |
248 |
|
249 |
child = device_add_child(device, "mmc", 0); |
250 |
|
251 |
if (child == NULL) { |
252 |
device_printf(device, "Attaching MMC bus failed!\n"); |
253 |
a10_mmc_release_resources(sc); |
254 |
|
255 |
return (ENXIO); |
256 |
} |
257 |
|
258 |
device_set_ivars(device, &sc->mmc_host); |
259 |
bus_generic_attach(device); |
260 |
|
261 |
return (0); |
262 |
} |
263 |
|
264 |
static int |
265 |
a10_mmc_detach(device_t device) |
266 |
{ |
267 |
a10_mmc_release_resources(device_get_softc(device)); |
268 |
|
269 |
return (EBUSY); |
270 |
} |
271 |
|
272 |
/* Releases allocated resources. */ |
273 |
static void |
274 |
a10_mmc_release_resources(struct a10_mmc_softc *sc) |
275 |
{ |
276 |
if (sc->interrupt_handler != NULL) { |
277 |
bus_teardown_intr(sc->device, sc->interrupt_resource, sc->interrupt_handler); |
278 |
sc->interrupt_handler = NULL; |
279 |
} |
280 |
|
281 |
if (sc->memory_resource != NULL) { |
282 |
bus_release_resource(sc->device, SYS_RES_MEMORY, 0, sc->memory_resource); |
283 |
sc->memory_resource = NULL; |
284 |
} |
285 |
|
286 |
if (sc->interrupt_resource != NULL) { |
287 |
bus_release_resource(sc->device, SYS_RES_IRQ, 0, sc->interrupt_resource); |
288 |
sc->interrupt_resource = NULL; |
289 |
} |
290 |
} |
291 |
|
292 |
/* Writes the hardware reset bits to the global control register. */ |
293 |
static int |
294 |
a10_mmc_reset_controller(struct a10_mmc_softc *sc) |
295 |
{ |
296 |
int error = 0; |
297 |
uint32_t time_left = 0xFFFF; |
298 |
uint32_t reg_value; |
299 |
|
300 |
reg_value = a10_mmc_read_4(sc, A10_MMC_GLOBAL_CONTROL_REG); |
301 |
reg_value |= A10_MMC_HARDWARE_RESET_BITS; |
302 |
|
303 |
a10_mmc_write_4(sc, A10_MMC_GLOBAL_CONTROL_REG, reg_value); |
304 |
|
305 |
/* Wait until the reset is done or a timeout occurs. */ |
306 |
do { |
307 |
reg_value = a10_mmc_read_4(sc, A10_MMC_GLOBAL_CONTROL_REG); |
308 |
} while ((reg_value & A10_MMC_HARDWARE_RESET_BITS) && (--time_left)); |
309 |
|
310 |
if (!time_left) { |
311 |
device_printf(sc->device, "Reset timeout!\n"); |
312 |
error = EIO; |
313 |
} |
314 |
|
315 |
return (error); |
316 |
} |
317 |
|
318 |
/* Clear the interrupt status flags. */ |
319 |
static void |
320 |
a10_mmc_clear_interrupts(struct a10_mmc_softc *sc) |
321 |
{ |
322 |
sc->read_interrupts = 0; |
323 |
a10_mmc_write_4(sc, A10_MMC_RAW_INTERRUPT_STATUS_REG, 0xFFFFFFFF); |
324 |
a10_mmc_write_4(sc, A10_MMC_INTERRUPT_MASK_REG, 0); |
325 |
} |
326 |
|
327 |
/* Enable interrupts. */ |
328 |
static void |
329 |
a10_mmc_enable_interrupts(struct a10_mmc_softc *sc) |
330 |
{ |
331 |
uint32_t reg_value; |
332 |
|
333 |
reg_value = a10_mmc_read_4(sc, A10_MMC_GLOBAL_CONTROL_REG); |
334 |
reg_value |= A10_MMC_INTERRUPT_ENABLE; |
335 |
|
336 |
a10_mmc_write_4(sc, A10_MMC_GLOBAL_CONTROL_REG, reg_value); |
337 |
} |
338 |
|
339 |
/* Service an MMC request by sending its associated command to the card and |
340 |
* enabling the appropriate interrupts. */ |
341 |
static int |
342 |
a10_mmc_request(device_t bus, device_t child, struct mmc_request *request) |
343 |
{ |
344 |
struct a10_mmc_softc *sc = device_get_softc(bus); |
345 |
struct mmc_command *command = request->cmd; |
346 |
uint32_t command_reg_value = A10_MMC_START; |
347 |
uint32_t interrupt_mask = A10_MMC_COMMAND_DONE | A10_MMC_INTERRUPT_ERROR_BITS; |
348 |
uint32_t block_size; |
349 |
int error = 0; |
350 |
|
351 |
a10_mmc_lock(sc); |
352 |
|
353 |
if (sc->mmc_request) { |
354 |
a10_mmc_unlock(sc); |
355 |
return (EBUSY); |
356 |
} |
357 |
|
358 |
sc->mmc_request = request; |
359 |
|
360 |
if (command->opcode == MMC_GO_IDLE_STATE) { |
361 |
command_reg_value |= A10_MMC_SEND_INIT_SEQUENCE; |
362 |
} |
363 |
|
364 |
if (command->flags & MMC_RSP_PRESENT) { |
365 |
command_reg_value |= A10_MMC_RESPONSE_EXPECTED; |
366 |
} |
367 |
|
368 |
if (command->flags & MMC_RSP_136) { |
369 |
command_reg_value |= A10_MMC_LONG_RESPONSE; |
370 |
} |
371 |
|
372 |
if (command->flags & MMC_RSP_CRC){ |
373 |
command_reg_value |= A10_MMC_CHECK_RESPONSE_CRC; |
374 |
} |
375 |
|
376 |
if (command->flags & MMC_RSP_BUSY) { |
377 |
interrupt_mask |= A10_MMC_DATA_READ_TIMEOUT; |
378 |
} |
379 |
|
380 |
if (command->data != NULL) { |
381 |
command_reg_value |= A10_MMC_DATA_EXPECTED | A10_MMC_WAIT_PREVIOUS_DATA_OVER; |
382 |
|
383 |
if (request->stop != NULL) { /* Multiple block transfer. */ |
384 |
command_reg_value |= A10_MMC_SEND_AUTO_STOP; |
385 |
interrupt_mask |= A10_MMC_AUTO_COMMAND_DONE; |
386 |
} else { /* Single-block transfer. */ |
387 |
interrupt_mask |= A10_MMC_DATA_TRANSFER_OVER; |
388 |
} |
389 |
|
390 |
if (command->data->flags & MMC_DATA_WRITE) { |
391 |
command_reg_value |= A10_MMC_WRITE; |
392 |
} else { /* MMC_DATA_READ */ |
393 |
command_reg_value &= ~A10_MMC_WRITE; |
394 |
} |
395 |
|
396 |
block_size = min(command->data->len, 512); |
397 |
|
398 |
a10_mmc_write_4(sc, A10_MMC_BLOCK_SIZE_REG, block_size); |
399 |
a10_mmc_write_4(sc, A10_MMC_BYTE_COUNT_REG, command->data->len); |
400 |
|
401 |
/* Choose access by AHB. */ |
402 |
a10_mmc_write_4(sc, A10_MMC_GLOBAL_CONTROL_REG, |
403 |
a10_mmc_read_4(sc, A10_MMC_GLOBAL_CONTROL_REG) | |
404 |
A10_MMC_ACCESS_BY_AHB); |
405 |
} |
406 |
|
407 |
a10_mmc_write_4(sc, A10_MMC_INTERRUPT_MASK_REG, interrupt_mask); |
408 |
a10_mmc_enable_interrupts(sc); |
409 |
|
410 |
/* Send the command to the card. */ |
411 |
a10_mmc_write_4(sc, A10_MMC_ARGUMENT_REG, command->arg); |
412 |
a10_mmc_write_4(sc, A10_MMC_COMMAND_REG, command_reg_value | command->opcode); |
413 |
|
414 |
if (command->data != NULL) { |
415 |
error = a10_mmc_do_programmed_io(sc, command->data); |
416 |
} |
417 |
|
418 |
a10_mmc_unlock(sc); |
419 |
|
420 |
return (error); |
421 |
} |
422 |
|
423 |
/* Handle an IRQ by checking the interrupt status bits and the corresponding |
424 |
* request. */ |
425 |
static void |
426 |
a10_mmc_interrupt_handler(void *arg) |
427 |
{ |
428 |
struct a10_mmc_softc *sc = (struct a10_mmc_softc *)arg; |
429 |
uint32_t masked_isr; |
430 |
|
431 |
a10_mmc_lock(sc); |
432 |
|
433 |
masked_isr = a10_mmc_read_4(sc, A10_MMC_MASKED_INTERRUPT_STATUS_REG); |
434 |
|
435 |
if (sc->mmc_request != NULL) { |
436 |
sc->read_interrupts |= masked_isr; |
437 |
|
438 |
/* After a RESPONSE_TIMEOUT, we must wait for a COMMAND_DONE. |
439 |
* Thus, if we've only read a RESPONSE_TIMEOUT from the ISR so far, |
440 |
* enable COMMAND_DONE interrupts now. |
441 |
*/ |
442 |
if (a10_mmc_timeout_without_command_done(sc)) { |
443 |
a10_mmc_write_4(sc, A10_MMC_INTERRUPT_MASK_REG, A10_MMC_COMMAND_DONE); |
444 |
} else if (a10_mmc_interrupt_error(sc)) { |
445 |
device_printf(sc->device, "IRQ error! Interrupts: 0x%08X\n", sc->read_interrupts); |
446 |
a10_mmc_request_error(sc); |
447 |
} else if (a10_mmc_interrupt_done(sc->read_interrupts, sc->mmc_request->cmd->data)) { |
448 |
a10_mmc_request_ok(sc); |
449 |
} |
450 |
} else { /* NULL request */ |
451 |
device_printf(sc->device, "NULL MMC Request; Masked ISR: 0x%08X\n", masked_isr); |
452 |
} |
453 |
|
454 |
/* Clear interrupts. */ |
455 |
a10_mmc_write_4(sc, A10_MMC_RAW_INTERRUPT_STATUS_REG, masked_isr); |
456 |
|
457 |
a10_mmc_unlock(sc); |
458 |
} |
459 |
|
460 |
/* Sometimes we may get a RESPONSE_TIMEOUT interrupt without the COMMAND_DONE |
461 |
* bit set. |
462 |
*/ |
463 |
static int |
464 |
a10_mmc_timeout_without_command_done(struct a10_mmc_softc *sc) { |
465 |
return (sc->read_interrupts & A10_MMC_RESPONSE_TIMEOUT && |
466 |
!(sc->read_interrupts & A10_MMC_COMMAND_DONE)); |
467 |
} |
468 |
|
469 |
static int |
470 |
a10_mmc_interrupt_error(struct a10_mmc_softc *sc) { |
471 |
return (sc->read_interrupts & A10_MMC_INTERRUPT_ERROR_BITS); |
472 |
} |
473 |
|
474 |
static int |
475 |
a10_mmc_interrupt_done(uint32_t read_interrupts, struct mmc_data *command_data) { |
476 |
return ((read_interrupts & A10_MMC_COMMAND_DONE) || |
477 |
((command_data != NULL) && (read_interrupts & A10_MMC_DATA_TRANSFER_OVER))); |
478 |
} |
479 |
|
480 |
/* As we don't have DMA support yet, we have to do PIO for every transfer. */ |
481 |
static int |
482 |
a10_mmc_do_programmed_io(struct a10_mmc_softc *sc, struct mmc_data *command_data) |
483 |
{ |
484 |
int i = 0; |
485 |
int error = 0; |
486 |
uint32_t *buffer = (uint32_t *)command_data->data; |
487 |
uint32_t data_length = command_data->len >> 2; |
488 |
|
489 |
if (command_data->flags & MMC_DATA_READ) { |
490 |
while (i < data_length && !error) { |
491 |
error = a10_mmc_fifo_wait(sc, 0xFFFFFFFF, A10_MMC_FIFO_EMPTY); |
492 |
|
493 |
if (!error) { |
494 |
buffer[i] = a10_mmc_read_4(sc, A10_MMC_FIFO_ACCESS_ADDRESS); |
495 |
++i; |
496 |
} |
497 |
} |
498 |
} else { /* MMC_DATA_WRITE */ |
499 |
while (i < data_length && !error) { |
500 |
error = a10_mmc_fifo_wait(sc, 0xFFFFFFFF, A10_MMC_FIFO_FULL); |
501 |
|
502 |
if (!error) { |
503 |
a10_mmc_write_4(sc, A10_MMC_FIFO_ACCESS_ADDRESS, buffer[i]); |
504 |
++i; |
505 |
} |
506 |
} |
507 |
} |
508 |
|
509 |
return (error); |
510 |
} |
511 |
|
512 |
/* Wait until the FIFO is ready. */ |
513 |
static int |
514 |
a10_mmc_fifo_wait(struct a10_mmc_softc *sc, uint32_t time_left, uint32_t status_bits) |
515 |
{ |
516 |
int error = 0; |
517 |
uint32_t status_reg_value; |
518 |
|
519 |
do { |
520 |
status_reg_value = a10_mmc_read_4(sc, A10_MMC_STATUS_REG); |
521 |
} while ((status_reg_value & status_bits) && (--time_left)); |
522 |
|
523 |
if (!time_left) { |
524 |
device_printf(sc->device, "Data transfer timeout!\n"); |
525 |
error = EIO; |
526 |
} |
527 |
|
528 |
return (error); |
529 |
} |
530 |
|
531 |
/* We successfully completed a request. */ |
532 |
static void |
533 |
a10_mmc_request_ok(struct a10_mmc_softc *sc) |
534 |
{ |
535 |
struct mmc_command *command = sc->mmc_request->cmd; |
536 |
uint32_t response_status; |
537 |
|
538 |
do { |
539 |
response_status = a10_mmc_read_4(sc, A10_MMC_STATUS_REG); |
540 |
} while (response_status & A10_MMC_CARD_DATA_BUSY); |
541 |
|
542 |
if (command->flags & MMC_RSP_136) { |
543 |
command->resp[0] = a10_mmc_read_4(sc, A10_MMC_RESPONSE_REG_3); |
544 |
command->resp[1] = a10_mmc_read_4(sc, A10_MMC_RESPONSE_REG_2); |
545 |
command->resp[2] = a10_mmc_read_4(sc, A10_MMC_RESPONSE_REG_1); |
546 |
command->resp[3] = a10_mmc_read_4(sc, A10_MMC_RESPONSE_REG_0); |
547 |
} else { |
548 |
command->resp[0] = a10_mmc_read_4(sc, A10_MMC_RESPONSE_REG_0); |
549 |
} |
550 |
|
551 |
command->error = MMC_ERR_NONE; |
552 |
|
553 |
a10_mmc_finalize_request(sc); |
554 |
} |
555 |
|
556 |
/* An error occurred when the current request was being serviced. */ |
557 |
static void |
558 |
a10_mmc_request_error(struct a10_mmc_softc *sc) |
559 |
{ |
560 |
struct mmc_command *command = sc->mmc_request->cmd; |
561 |
|
562 |
device_printf(sc->device, "Error in request.\n"); |
563 |
|
564 |
command->error = MMC_ERR_TIMEOUT; |
565 |
|
566 |
if (command->data != NULL) { |
567 |
device_printf(sc->device, "Data error, sending manual stop.\n"); |
568 |
a10_mmc_send_manual_stop(sc, sc->mmc_request); |
569 |
} |
570 |
|
571 |
if (sc->mmc_request->stop != NULL) { |
572 |
sc->mmc_request->stop->error = MMC_ERR_TIMEOUT; |
573 |
} |
574 |
|
575 |
a10_mmc_finalize_request(sc); |
576 |
} |
577 |
|
578 |
/* Finish handling a request. */ |
579 |
static void |
580 |
a10_mmc_finalize_request(struct a10_mmc_softc *sc) |
581 |
{ |
582 |
struct mmc_request *request = sc->mmc_request; |
583 |
|
584 |
a10_mmc_write_4(sc, A10_MMC_GLOBAL_CONTROL_REG, |
585 |
a10_mmc_read_4(sc, A10_MMC_GLOBAL_CONTROL_REG) | |
586 |
A10_MMC_FIFO_RESET); |
587 |
|
588 |
sc->mmc_request = NULL; |
589 |
request->done(request); |
590 |
a10_mmc_clear_interrupts(sc); |
591 |
} |
592 |
|
593 |
/* This host relies on manual stop commands being send on a data transfer error. */ |
594 |
static void |
595 |
a10_mmc_send_manual_stop(struct a10_mmc_softc *sc, struct mmc_request *request) |
596 |
{ |
597 |
uint32_t response; |
598 |
uint32_t raw_isr; |
599 |
uint32_t command_reg_value = A10_MMC_START | A10_MMC_RESPONSE_EXPECTED | |
600 |
A10_MMC_STOP_ABORT_COMMAND | A10_MMC_CHECK_RESPONSE_CRC | |
601 |
MMC_STOP_TRANSMISSION; |
602 |
|
603 |
a10_mmc_clear_interrupts(sc); |
604 |
|
605 |
a10_mmc_write_4(sc, A10_MMC_ARGUMENT_REG, 0); |
606 |
a10_mmc_write_4(sc, A10_MMC_COMMAND_REG, command_reg_value); |
607 |
|
608 |
do { |
609 |
raw_isr = a10_mmc_read_4(sc, A10_MMC_RAW_INTERRUPT_STATUS_REG); |
610 |
} while (!(raw_isr & |
611 |
(A10_MMC_COMMAND_DONE | A10_MMC_INTERRUPT_ERROR_BITS))); |
612 |
|
613 |
if (!(raw_isr & A10_MMC_COMMAND_DONE) || (raw_isr & A10_MMC_INTERRUPT_ERROR_BITS)) { |
614 |
device_printf(sc->device, "Manual stop failed.\n"); |
615 |
response = MMC_ERR_TIMEOUT; |
616 |
} else { |
617 |
response = a10_mmc_read_4(sc, A10_MMC_RESPONSE_REG_0); |
618 |
} |
619 |
|
620 |
if (request->stop != NULL) { |
621 |
request->stop->resp[0] = response; |
622 |
} |
623 |
} |
624 |
|
625 |
/* Return the current values of this device's instance variables. */ |
626 |
static int |
627 |
a10_mmc_read_ivar(device_t bus, device_t child, int which, |
628 |
uintptr_t *result) |
629 |
{ |
630 |
struct a10_mmc_softc *sc = device_get_softc(bus); |
631 |
int error = 0; |
632 |
|
633 |
switch (which) { |
634 |
case MMCBR_IVAR_BUS_MODE: |
635 |
*(int *)result = sc->mmc_host.ios.bus_mode; |
636 |
break; |
637 |
case MMCBR_IVAR_BUS_WIDTH: |
638 |
*(int *)result = sc->mmc_host.ios.bus_width; |
639 |
break; |
640 |
case MMCBR_IVAR_CHIP_SELECT: |
641 |
*(int *)result = sc->mmc_host.ios.chip_select; |
642 |
break; |
643 |
case MMCBR_IVAR_CLOCK: |
644 |
*(int *)result = sc->mmc_host.ios.clock; |
645 |
break; |
646 |
case MMCBR_IVAR_F_MIN: |
647 |
*(int *)result = sc->mmc_host.f_min; |
648 |
break; |
649 |
case MMCBR_IVAR_F_MAX: |
650 |
*(int *)result = sc->mmc_host.f_max; |
651 |
break; |
652 |
case MMCBR_IVAR_HOST_OCR: |
653 |
*(int *)result = sc->mmc_host.host_ocr; |
654 |
break; |
655 |
case MMCBR_IVAR_MODE: |
656 |
*(int *)result = sc->mmc_host.mode; |
657 |
break; |
658 |
case MMCBR_IVAR_OCR: |
659 |
*(int *)result = sc->mmc_host.ocr; |
660 |
break; |
661 |
case MMCBR_IVAR_POWER_MODE: |
662 |
*(int *)result = sc->mmc_host.ios.power_mode; |
663 |
break; |
664 |
case MMCBR_IVAR_VDD: |
665 |
*(int *)result = sc->mmc_host.ios.vdd; |
666 |
break; |
667 |
case MMCBR_IVAR_CAPS: |
668 |
*(int *)result = sc->mmc_host.caps; |
669 |
break; |
670 |
case MMCBR_IVAR_MAX_DATA: |
671 |
*(int *)result = 8192; /* This indicates we can handle multiblock transfers. */ |
672 |
break; |
673 |
default: |
674 |
error = EINVAL; |
675 |
} |
676 |
|
677 |
return (error); |
678 |
} |
679 |
|
680 |
/* Set the current values of this device's instance variables. */ |
681 |
static int |
682 |
a10_mmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value) |
683 |
{ |
684 |
struct a10_mmc_softc *sc = device_get_softc(bus); |
685 |
int result = 0; |
686 |
|
687 |
switch (which) { |
688 |
case MMCBR_IVAR_BUS_MODE: |
689 |
sc->mmc_host.ios.bus_mode = value; |
690 |
break; |
691 |
case MMCBR_IVAR_BUS_WIDTH: |
692 |
sc->mmc_host.ios.bus_width = value; |
693 |
break; |
694 |
case MMCBR_IVAR_CHIP_SELECT: |
695 |
sc->mmc_host.ios.chip_select = value; |
696 |
break; |
697 |
case MMCBR_IVAR_CLOCK: |
698 |
sc->mmc_host.ios.clock = value; |
699 |
break; |
700 |
case MMCBR_IVAR_MODE: |
701 |
sc->mmc_host.mode = value; |
702 |
break; |
703 |
case MMCBR_IVAR_OCR: |
704 |
sc->mmc_host.ocr = value; |
705 |
break; |
706 |
case MMCBR_IVAR_POWER_MODE: |
707 |
sc->mmc_host.ios.power_mode = value; |
708 |
break; |
709 |
case MMCBR_IVAR_VDD: |
710 |
sc->mmc_host.ios.vdd = value; |
711 |
break; |
712 |
/* These are read-only. */ |
713 |
case MMCBR_IVAR_CAPS: |
714 |
case MMCBR_IVAR_HOST_OCR: |
715 |
case MMCBR_IVAR_F_MIN: |
716 |
case MMCBR_IVAR_F_MAX: |
717 |
case MMCBR_IVAR_MAX_DATA: |
718 |
default: |
719 |
result = EINVAL; |
720 |
} |
721 |
|
722 |
return (result); |
723 |
} |
724 |
|
725 |
/* Update the current state of the host. */ |
726 |
static int |
727 |
a10_mmc_update_ios(device_t bus, device_t child) |
728 |
{ |
729 |
int error = 0; |
730 |
struct a10_mmc_softc *sc = device_get_softc(bus); |
731 |
struct mmc_ios *ios = &sc->mmc_host.ios; |
732 |
|
733 |
/* Set the bus width. */ |
734 |
a10_mmc_set_bus_width(sc, ios); |
735 |
|
736 |
if (ios->clock) { |
737 |
/* Disable clock. */ |
738 |
error = a10_mmc_set_clock_enabled(sc, 0); |
739 |
|
740 |
if (!error) { |
741 |
/* Set clock rate. */ |
742 |
error = a10_mmc_set_clock_rate(sc, ios); |
743 |
|
744 |
if (!error) { |
745 |
/* Enable clock. */ |
746 |
error = a10_mmc_set_clock_enabled(sc, 1); |
747 |
} |
748 |
} |
749 |
} |
750 |
|
751 |
return (error); |
752 |
} |
753 |
|
754 |
/* Enable/disable the clock. */ |
755 |
static int |
756 |
a10_mmc_set_clock_enabled(struct a10_mmc_softc *sc, int enabled) |
757 |
{ |
758 |
int error = 0; |
759 |
uint32_t clock_control_reg_value = a10_mmc_read_4(sc, A10_MMC_CLOCK_CONTROL_REG); |
760 |
|
761 |
if (enabled) { |
762 |
clock_control_reg_value |= A10_MMC_CARD_CLOCK_ON; |
763 |
} else { |
764 |
clock_control_reg_value &= ~A10_MMC_CARD_CLOCK_ON; |
765 |
} |
766 |
|
767 |
a10_mmc_write_4(sc, A10_MMC_CLOCK_CONTROL_REG, clock_control_reg_value); |
768 |
|
769 |
error = a10_mmc_update_clock(sc); |
770 |
|
771 |
return (error); |
772 |
} |
773 |
|
774 |
/* Set the clock rate. */ |
775 |
static int |
776 |
a10_mmc_set_clock_rate(struct a10_mmc_softc *sc, struct mmc_ios *ios) |
777 |
{ |
778 |
int error = 0; |
779 |
uint32_t clock_divider = 0; |
780 |
uint32_t clock_control_reg_value = a10_mmc_read_4(sc, A10_MMC_CLOCK_CONTROL_REG); |
781 |
|
782 |
clock_divider = (sc->mod_clock + (ios->clock >> 1)); |
783 |
clock_divider /= ios->clock; |
784 |
clock_divider /= 2; |
785 |
|
786 |
/* Change divider. */ |
787 |
clock_control_reg_value &= ~(0xFF); |
788 |
clock_control_reg_value |= clock_divider; |
789 |
a10_mmc_write_4(sc, A10_MMC_CLOCK_CONTROL_REG, clock_control_reg_value); |
790 |
|
791 |
error = a10_mmc_update_clock(sc); |
792 |
|
793 |
return (error); |
794 |
} |
795 |
|
796 |
/* Issue a command which will update the clock registers. */ |
797 |
static int |
798 |
a10_mmc_update_clock(struct a10_mmc_softc *sc) |
799 |
{ |
800 |
int error = 0; |
801 |
uint32_t time_left = 0xFFFF; |
802 |
uint32_t raw_isr; |
803 |
uint32_t command_reg_value = A10_MMC_START | |
804 |
A10_MMC_UPDATE_CLOCK_REGS_ONLY | |
805 |
A10_MMC_WAIT_PREVIOUS_DATA_OVER; |
806 |
|
807 |
a10_mmc_write_4(sc, A10_MMC_COMMAND_REG, command_reg_value); |
808 |
|
809 |
do { |
810 |
command_reg_value = a10_mmc_read_4(sc, A10_MMC_COMMAND_REG); |
811 |
} while ((command_reg_value & A10_MMC_START) && (--time_left)); |
812 |
|
813 |
/* Updating the clock may set some ISR bits; clear them now. */ |
814 |
raw_isr = a10_mmc_read_4(sc, A10_MMC_RAW_INTERRUPT_STATUS_REG); |
815 |
a10_mmc_write_4(sc, A10_MMC_RAW_INTERRUPT_STATUS_REG, raw_isr); |
816 |
|
817 |
if (!time_left) { |
818 |
device_printf(sc->device, "Clock update timeout!"); |
819 |
error = EIO; |
820 |
} |
821 |
|
822 |
return (error); |
823 |
} |
824 |
|
825 |
static void |
826 |
a10_mmc_set_bus_width(struct a10_mmc_softc *sc, struct mmc_ios *ios) |
827 |
{ |
828 |
switch (ios->bus_width) { |
829 |
case bus_width_1: |
830 |
a10_mmc_write_4(sc, A10_MMC_WIDTH_REG, A10_MMC_WIDTH1); |
831 |
break; |
832 |
case bus_width_4: |
833 |
a10_mmc_write_4(sc, A10_MMC_WIDTH_REG, A10_MMC_WIDTH4); |
834 |
break; |
835 |
case bus_width_8: |
836 |
a10_mmc_write_4(sc, A10_MMC_WIDTH_REG, A10_MMC_WIDTH8); |
837 |
} |
838 |
} |
839 |
|
840 |
static int |
841 |
a10_mmc_get_ro(device_t bus, device_t child) |
842 |
{ |
843 |
return (0); |
844 |
} |
845 |
|
846 |
static int |
847 |
a10_mmc_acquire_host(device_t bus, device_t child) |
848 |
{ |
849 |
struct a10_mmc_softc *sc = device_get_softc(bus); |
850 |
int error = 0; |
851 |
|
852 |
a10_mmc_lock(sc); |
853 |
|
854 |
while (sc->bus_busy) { |
855 |
error = mtx_sleep(sc, &sc->mutex, PZERO, "a10_mmc", 0); |
856 |
} |
857 |
|
858 |
sc->bus_busy++; |
859 |
a10_mmc_unlock(sc); |
860 |
|
861 |
return (error); |
862 |
} |
863 |
|
864 |
static int |
865 |
a10_mmc_release_host(device_t bus, device_t child) |
866 |
{ |
867 |
struct a10_mmc_softc *sc = device_get_softc(bus); |
868 |
|
869 |
a10_mmc_lock(sc); |
870 |
sc->bus_busy--; |
871 |
wakeup(sc); |
872 |
a10_mmc_unlock(sc); |
873 |
|
874 |
return (0); |
875 |
} |
876 |
|
877 |
static device_method_t a10_mmc_methods[] = { |
878 |
/* Device interface. */ |
879 |
DEVMETHOD(device_probe, a10_mmc_probe), |
880 |
DEVMETHOD(device_attach, a10_mmc_attach), |
881 |
DEVMETHOD(device_detach, a10_mmc_detach), |
882 |
|
883 |
/* Bus interface. */ |
884 |
DEVMETHOD(bus_read_ivar, a10_mmc_read_ivar), |
885 |
DEVMETHOD(bus_write_ivar, a10_mmc_write_ivar), |
886 |
DEVMETHOD(bus_print_child, bus_generic_print_child), |
887 |
|
888 |
/* MMC bridge interface. */ |
889 |
DEVMETHOD(mmcbr_update_ios, a10_mmc_update_ios), |
890 |
DEVMETHOD(mmcbr_request, a10_mmc_request), |
891 |
DEVMETHOD(mmcbr_get_ro, a10_mmc_get_ro), |
892 |
DEVMETHOD(mmcbr_acquire_host, a10_mmc_acquire_host), |
893 |
DEVMETHOD(mmcbr_release_host, a10_mmc_release_host), |
894 |
|
895 |
{ 0, 0 } |
896 |
}; |
897 |
|
898 |
static devclass_t a10_mmc_devclass; |
899 |
|
900 |
static driver_t a10_mmc_driver = { |
901 |
"a10_mmc", |
902 |
a10_mmc_methods, |
903 |
sizeof(struct a10_mmc_softc) |
904 |
}; |
905 |
|
906 |
DRIVER_MODULE(a10_mmc, simplebus, a10_mmc_driver, a10_mmc_devclass, 0, 0); |