1/* 2 * QEMU DMA emulation 3 * 4 * Copyright (c) 2003-2004 Vassili Karpov (malc) 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24#include "hw.h" 25#include "isa.h" 26 27/* #define DEBUG_DMA */ 28 29#define dolog(...) fprintf (stderr, "dma: " __VA_ARGS__) 30#ifdef DEBUG_DMA 31#define linfo(...) fprintf (stderr, "dma: " __VA_ARGS__) 32#define ldebug(...) fprintf (stderr, "dma: " __VA_ARGS__) 33#else 34#define linfo(...) 35#define ldebug(...) 36#endif 37 38struct dma_regs { 39 int now[2]; 40 uint16_t base[2]; 41 uint8_t mode; 42 uint8_t page; 43 uint8_t pageh; 44 uint8_t dack; 45 uint8_t eop; 46 DMA_transfer_handler transfer_handler; 47 void *opaque; 48}; 49 50#define ADDR 0 51#define COUNT 1 52 53static struct dma_cont { 54 uint8_t status; 55 uint8_t command; 56 uint8_t mask; 57 uint8_t flip_flop; 58 int dshift; 59 struct dma_regs regs[4]; 60} dma_controllers[2]; 61 62enum { 63 CMD_MEMORY_TO_MEMORY = 0x01, 64 CMD_FIXED_ADDRESS = 0x02, 65 CMD_BLOCK_CONTROLLER = 0x04, 66 CMD_COMPRESSED_TIME = 0x08, 67 CMD_CYCLIC_PRIORITY = 0x10, 68 CMD_EXTENDED_WRITE = 0x20, 69 CMD_LOW_DREQ = 0x40, 70 CMD_LOW_DACK = 0x80, 71 CMD_NOT_SUPPORTED = CMD_MEMORY_TO_MEMORY | CMD_FIXED_ADDRESS 72 | CMD_COMPRESSED_TIME | CMD_CYCLIC_PRIORITY | CMD_EXTENDED_WRITE 73 | CMD_LOW_DREQ | CMD_LOW_DACK 74 75}; 76 77static void DMA_run (void); 78 79static int channels[8] = {-1, 2, 3, 1, -1, -1, -1, 0}; 80 81static void write_page (void *opaque, uint32_t nport, uint32_t data) 82{ 83 struct dma_cont *d = opaque; 84 int ichan; 85 86 ichan = channels[nport & 7]; 87 if (-1 == ichan) { 88 dolog ("invalid channel %#x %#x\n", nport, data); 89 return; 90 } 91 d->regs[ichan].page = data; 92} 93 94static void write_pageh (void *opaque, uint32_t nport, uint32_t data) 95{ 96 struct dma_cont *d = opaque; 97 int ichan; 98 99 ichan = channels[nport & 7]; 100 if (-1 == ichan) { 101 dolog ("invalid channel %#x %#x\n", nport, data); 102 return; 103 } 104 d->regs[ichan].pageh = data; 105} 106 107static uint32_t read_page (void *opaque, uint32_t nport) 108{ 109 struct dma_cont *d = opaque; 110 int ichan; 111 112 ichan = channels[nport & 7]; 113 if (-1 == ichan) { 114 dolog ("invalid channel read %#x\n", nport); 115 return 0; 116 } 117 return d->regs[ichan].page; 118} 119 120static uint32_t read_pageh (void *opaque, uint32_t nport) 121{ 122 struct dma_cont *d = opaque; 123 int ichan; 124 125 ichan = channels[nport & 7]; 126 if (-1 == ichan) { 127 dolog ("invalid channel read %#x\n", nport); 128 return 0; 129 } 130 return d->regs[ichan].pageh; 131} 132 133static inline void init_chan (struct dma_cont *d, int ichan) 134{ 135 struct dma_regs *r; 136 137 r = d->regs + ichan; 138 r->now[ADDR] = r->base[ADDR] << d->dshift; 139 r->now[COUNT] = 0; 140} 141 142static inline int getff (struct dma_cont *d) 143{ 144 int ff; 145 146 ff = d->flip_flop; 147 d->flip_flop = !ff; 148 return ff; 149} 150 151static uint32_t read_chan (void *opaque, uint32_t nport) 152{ 153 struct dma_cont *d = opaque; 154 int ichan, nreg, iport, ff, val, dir; 155 struct dma_regs *r; 156 157 iport = (nport >> d->dshift) & 0x0f; 158 ichan = iport >> 1; 159 nreg = iport & 1; 160 r = d->regs + ichan; 161 162 dir = ((r->mode >> 5) & 1) ? -1 : 1; 163 ff = getff (d); 164 if (nreg) 165 val = (r->base[COUNT] << d->dshift) - r->now[COUNT]; 166 else 167 val = r->now[ADDR] + r->now[COUNT] * dir; 168 169 ldebug ("read_chan %#x -> %d\n", iport, val); 170 return (val >> (d->dshift + (ff << 3))) & 0xff; 171} 172 173static void write_chan (void *opaque, uint32_t nport, uint32_t data) 174{ 175 struct dma_cont *d = opaque; 176 int iport, ichan, nreg; 177 struct dma_regs *r; 178 179 iport = (nport >> d->dshift) & 0x0f; 180 ichan = iport >> 1; 181 nreg = iport & 1; 182 r = d->regs + ichan; 183 if (getff (d)) { 184 r->base[nreg] = (r->base[nreg] & 0xff) | ((data << 8) & 0xff00); 185 init_chan (d, ichan); 186 } else { 187 r->base[nreg] = (r->base[nreg] & 0xff00) | (data & 0xff); 188 } 189} 190 191static void write_cont (void *opaque, uint32_t nport, uint32_t data) 192{ 193 struct dma_cont *d = opaque; 194 int iport, ichan = 0; 195 196 iport = (nport >> d->dshift) & 0x0f; 197 switch (iport) { 198 case 0x08: /* command */ 199 if ((data != 0) && (data & CMD_NOT_SUPPORTED)) { 200 dolog ("command %#x not supported\n", data); 201 return; 202 } 203 d->command = data; 204 break; 205 206 case 0x09: 207 ichan = data & 3; 208 if (data & 4) { 209 d->status |= 1 << (ichan + 4); 210 } 211 else { 212 d->status &= ~(1 << (ichan + 4)); 213 } 214 d->status &= ~(1 << ichan); 215 DMA_run(); 216 break; 217 218 case 0x0a: /* single mask */ 219 if (data & 4) 220 d->mask |= 1 << (data & 3); 221 else 222 d->mask &= ~(1 << (data & 3)); 223 DMA_run(); 224 break; 225 226 case 0x0b: /* mode */ 227 { 228 ichan = data & 3; 229#ifdef DEBUG_DMA 230 { 231 int op, ai, dir, opmode; 232 op = (data >> 2) & 3; 233 ai = (data >> 4) & 1; 234 dir = (data >> 5) & 1; 235 opmode = (data >> 6) & 3; 236 237 linfo ("ichan %d, op %d, ai %d, dir %d, opmode %d\n", 238 ichan, op, ai, dir, opmode); 239 } 240#endif 241 d->regs[ichan].mode = data; 242 break; 243 } 244 245 case 0x0c: /* clear flip flop */ 246 d->flip_flop = 0; 247 break; 248 249 case 0x0d: /* reset */ 250 d->flip_flop = 0; 251 d->mask = ~0; 252 d->status = 0; 253 d->command = 0; 254 break; 255 256 case 0x0e: /* clear mask for all channels */ 257 d->mask = 0; 258 DMA_run(); 259 break; 260 261 case 0x0f: /* write mask for all channels */ 262 d->mask = data; 263 DMA_run(); 264 break; 265 266 default: 267 dolog ("unknown iport %#x\n", iport); 268 break; 269 } 270 271#ifdef DEBUG_DMA 272 if (0xc != iport) { 273 linfo ("write_cont: nport %#06x, ichan % 2d, val %#06x\n", 274 nport, ichan, data); 275 } 276#endif 277} 278 279static uint32_t read_cont (void *opaque, uint32_t nport) 280{ 281 struct dma_cont *d = opaque; 282 int iport, val; 283 284 iport = (nport >> d->dshift) & 0x0f; 285 switch (iport) { 286 case 0x08: /* status */ 287 val = d->status; 288 d->status &= 0xf0; 289 break; 290 case 0x0f: /* mask */ 291 val = d->mask; 292 break; 293 default: 294 val = 0; 295 break; 296 } 297 298 ldebug ("read_cont: nport %#06x, iport %#04x val %#x\n", nport, iport, val); 299 return val; 300} 301 302int DMA_get_channel_mode (int nchan) 303{ 304 return dma_controllers[nchan > 3].regs[nchan & 3].mode; 305} 306 307void DMA_hold_DREQ (int nchan) 308{ 309 int ncont, ichan; 310 311 ncont = nchan > 3; 312 ichan = nchan & 3; 313 linfo ("held cont=%d chan=%d\n", ncont, ichan); 314 dma_controllers[ncont].status |= 1 << (ichan + 4); 315 DMA_run(); 316} 317 318void DMA_release_DREQ (int nchan) 319{ 320 int ncont, ichan; 321 322 ncont = nchan > 3; 323 ichan = nchan & 3; 324 linfo ("released cont=%d chan=%d\n", ncont, ichan); 325 dma_controllers[ncont].status &= ~(1 << (ichan + 4)); 326 DMA_run(); 327} 328 329static void channel_run (int ncont, int ichan) 330{ 331 int n; 332 struct dma_regs *r = &dma_controllers[ncont].regs[ichan]; 333#ifdef DEBUG_DMA 334 int dir, opmode; 335 336 dir = (r->mode >> 5) & 1; 337 opmode = (r->mode >> 6) & 3; 338 339 if (dir) { 340 dolog ("DMA in address decrement mode\n"); 341 } 342 if (opmode != 1) { 343 dolog ("DMA not in single mode select %#x\n", opmode); 344 } 345#endif 346 347 r = dma_controllers[ncont].regs + ichan; 348 if (r->transfer_handler) { 349 n = r->transfer_handler (r->opaque, ichan + (ncont << 2), 350 r->now[COUNT], (r->base[COUNT] + 1) << ncont); 351 r->now[COUNT] = n; 352 } 353 ldebug ("dma_pos %d size %d\n", n, (r->base[COUNT] + 1) << ncont); 354} 355 356static QEMUBH *dma_bh; 357 358static void DMA_run (void) 359{ 360 struct dma_cont *d; 361 int icont, ichan; 362 int rearm = 0; 363 364 d = dma_controllers; 365 366 for (icont = 0; icont < 2; icont++, d++) { 367 for (ichan = 0; ichan < 4; ichan++) { 368 int mask; 369 370 mask = 1 << ichan; 371 372 if ((0 == (d->mask & mask)) && (0 != (d->status & (mask << 4)))) { 373 channel_run (icont, ichan); 374 rearm = 1; 375 } 376 } 377 } 378 379 if (rearm) 380 qemu_bh_schedule_idle(dma_bh); 381} 382 383static void DMA_run_bh(void *unused) 384{ 385 DMA_run(); 386} 387 388void DMA_register_channel (int nchan, 389 DMA_transfer_handler transfer_handler, 390 void *opaque) 391{ 392 struct dma_regs *r; 393 int ichan, ncont; 394 395 ncont = nchan > 3; 396 ichan = nchan & 3; 397 398 r = dma_controllers[ncont].regs + ichan; 399 r->transfer_handler = transfer_handler; 400 r->opaque = opaque; 401} 402 403int DMA_read_memory (int nchan, void *buf, int pos, int len) 404{ 405 struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3]; 406 target_phys_addr_t addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR]; 407 408 if (r->mode & 0x20) { 409 int i; 410 uint8_t *p = buf; 411 412 cpu_physical_memory_read (addr - pos - len, buf, len); 413 /* What about 16bit transfers? */ 414 for (i = 0; i < len >> 1; i++) { 415 uint8_t b = p[len - i - 1]; 416 p[i] = b; 417 } 418 } 419 else 420 cpu_physical_memory_read (addr + pos, buf, len); 421 422 return len; 423} 424 425int DMA_write_memory (int nchan, void *buf, int pos, int len) 426{ 427 struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3]; 428 target_phys_addr_t addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR]; 429 430 if (r->mode & 0x20) { 431 int i; 432 uint8_t *p = buf; 433 434 cpu_physical_memory_write (addr - pos - len, buf, len); 435 /* What about 16bit transfers? */ 436 for (i = 0; i < len; i++) { 437 uint8_t b = p[len - i - 1]; 438 p[i] = b; 439 } 440 } 441 else 442 cpu_physical_memory_write (addr + pos, buf, len); 443 444 return len; 445} 446 447/* request the emulator to transfer a new DMA memory block ASAP */ 448void DMA_schedule(int nchan) 449{ 450 CPUState *env = cpu_single_env; 451 if (env) 452 cpu_exit(env); 453} 454 455static void dma_reset(void *opaque) 456{ 457 struct dma_cont *d = opaque; 458 write_cont (d, (0x0d << d->dshift), 0); 459} 460 461static int dma_phony_handler (void *opaque, int nchan, int dma_pos, int dma_len) 462{ 463 dolog ("unregistered DMA channel used nchan=%d dma_pos=%d dma_len=%d\n", 464 nchan, dma_pos, dma_len); 465 return dma_pos; 466} 467 468/* dshift = 0: 8 bit DMA, 1 = 16 bit DMA */ 469static void dma_init2(struct dma_cont *d, int base, int dshift, 470 int page_base, int pageh_base) 471{ 472 static const int page_port_list[] = { 0x1, 0x2, 0x3, 0x7 }; 473 int i; 474 475 d->dshift = dshift; 476 for (i = 0; i < 8; i++) { 477 register_ioport_write (base + (i << dshift), 1, 1, write_chan, d); 478 register_ioport_read (base + (i << dshift), 1, 1, read_chan, d); 479 } 480 for (i = 0; i < ARRAY_SIZE (page_port_list); i++) { 481 register_ioport_write (page_base + page_port_list[i], 1, 1, 482 write_page, d); 483 register_ioport_read (page_base + page_port_list[i], 1, 1, 484 read_page, d); 485 if (pageh_base >= 0) { 486 register_ioport_write (pageh_base + page_port_list[i], 1, 1, 487 write_pageh, d); 488 register_ioport_read (pageh_base + page_port_list[i], 1, 1, 489 read_pageh, d); 490 } 491 } 492 for (i = 0; i < 8; i++) { 493 register_ioport_write (base + ((i + 8) << dshift), 1, 1, 494 write_cont, d); 495 register_ioport_read (base + ((i + 8) << dshift), 1, 1, 496 read_cont, d); 497 } 498 qemu_register_reset(dma_reset, 0, d); 499 dma_reset(d); 500 for (i = 0; i < ARRAY_SIZE (d->regs); ++i) { 501 d->regs[i].transfer_handler = dma_phony_handler; 502 } 503} 504 505static void dma_save (QEMUFile *f, void *opaque) 506{ 507 struct dma_cont *d = opaque; 508 int i; 509 510 /* qemu_put_8s (f, &d->status); */ 511 qemu_put_8s (f, &d->command); 512 qemu_put_8s (f, &d->mask); 513 qemu_put_8s (f, &d->flip_flop); 514 qemu_put_be32 (f, d->dshift); 515 516 for (i = 0; i < 4; ++i) { 517 struct dma_regs *r = &d->regs[i]; 518 qemu_put_be32 (f, r->now[0]); 519 qemu_put_be32 (f, r->now[1]); 520 qemu_put_be16s (f, &r->base[0]); 521 qemu_put_be16s (f, &r->base[1]); 522 qemu_put_8s (f, &r->mode); 523 qemu_put_8s (f, &r->page); 524 qemu_put_8s (f, &r->pageh); 525 qemu_put_8s (f, &r->dack); 526 qemu_put_8s (f, &r->eop); 527 } 528} 529 530static int dma_load (QEMUFile *f, void *opaque, int version_id) 531{ 532 struct dma_cont *d = opaque; 533 int i; 534 535 if (version_id != 1) 536 return -EINVAL; 537 538 /* qemu_get_8s (f, &d->status); */ 539 qemu_get_8s (f, &d->command); 540 qemu_get_8s (f, &d->mask); 541 qemu_get_8s (f, &d->flip_flop); 542 d->dshift=qemu_get_be32 (f); 543 544 for (i = 0; i < 4; ++i) { 545 struct dma_regs *r = &d->regs[i]; 546 r->now[0]=qemu_get_be32 (f); 547 r->now[1]=qemu_get_be32 (f); 548 qemu_get_be16s (f, &r->base[0]); 549 qemu_get_be16s (f, &r->base[1]); 550 qemu_get_8s (f, &r->mode); 551 qemu_get_8s (f, &r->page); 552 qemu_get_8s (f, &r->pageh); 553 qemu_get_8s (f, &r->dack); 554 qemu_get_8s (f, &r->eop); 555 } 556 557 DMA_run(); 558 559 return 0; 560} 561 562void DMA_init (int high_page_enable) 563{ 564 dma_init2(&dma_controllers[0], 0x00, 0, 0x80, 565 high_page_enable ? 0x480 : -1); 566 dma_init2(&dma_controllers[1], 0xc0, 1, 0x88, 567 high_page_enable ? 0x488 : -1); 568 register_savevm ("dma", 0, 1, dma_save, dma_load, &dma_controllers[0]); 569 register_savevm ("dma", 1, 1, dma_save, dma_load, &dma_controllers[1]); 570 571 dma_bh = qemu_bh_new(DMA_run_bh, NULL); 572} 573