1// -*- mode:c++ -*- 2 3// Copyright (c) 2015 RISC-V Foundation 4// Copyright (c) 2017 The University of Virginia 5// All rights reserved. 6// 7// Redistribution and use in source and binary forms, with or without 8// modification, are permitted provided that the following conditions are --- 498 unchanged lines hidden (view full) --- 507 0x2: LoadReserved::lr_w({{ 508 Rd_sd = Mem_sw; 509 }}, mem_flags=LLSC); 510 0x3: StoreCond::sc_w({{ 511 Mem_uw = Rs2_uw; 512 }}, {{ 513 Rd = result; 514 }}, inst_flags=IsStoreConditional, mem_flags=LLSC); |
515 0x0: AtomicMemOp::amoadd_w({{ 516 Rd_sd = Mem_sw; 517 }}, {{ 518 TypedAtomicOpFunctor<int32_t> *amo_op = 519 new AtomicGenericOp<int32_t>(Rs2_sw, 520 [](int32_t* b, int32_t a){ *b += a; }); 521 }}, mem_flags=ATOMIC_RETURN_OP); 522 0x1: AtomicMemOp::amoswap_w({{ 523 Rd_sd = Mem_sw; 524 }}, {{ 525 TypedAtomicOpFunctor<uint32_t> *amo_op = 526 new AtomicGenericOp<uint32_t>(Rs2_uw, 527 [](uint32_t* b, uint32_t a){ *b = a; }); 528 }}, mem_flags=ATOMIC_RETURN_OP); 529 0x4: AtomicMemOp::amoxor_w({{ 530 Rd_sd = Mem_sw; 531 }}, {{ 532 TypedAtomicOpFunctor<uint32_t> *amo_op = 533 new AtomicGenericOp<uint32_t>(Rs2_uw, 534 [](uint32_t* b, uint32_t a){ *b ^= a; }); 535 }}, mem_flags=ATOMIC_RETURN_OP); 536 0x8: AtomicMemOp::amoor_w({{ 537 Rd_sd = Mem_sw; 538 }}, {{ 539 TypedAtomicOpFunctor<uint32_t> *amo_op = 540 new AtomicGenericOp<uint32_t>(Rs2_uw, 541 [](uint32_t* b, uint32_t a){ *b |= a; }); 542 }}, mem_flags=ATOMIC_RETURN_OP); 543 0xc: AtomicMemOp::amoand_w({{ 544 Rd_sd = Mem_sw; 545 }}, {{ 546 TypedAtomicOpFunctor<uint32_t> *amo_op = 547 new AtomicGenericOp<uint32_t>(Rs2_uw, 548 [](uint32_t* b, uint32_t a){ *b &= a; }); 549 }}, mem_flags=ATOMIC_RETURN_OP); 550 0x10: AtomicMemOp::amomin_w({{ 551 Rd_sd = Mem_sw; 552 }}, {{ 553 TypedAtomicOpFunctor<int32_t> *amo_op = 554 new AtomicGenericOp<int32_t>(Rs2_sw, 555 [](int32_t* b, int32_t a){ if (a < *b) *b = a; }); 556 }}, mem_flags=ATOMIC_RETURN_OP); 557 0x14: AtomicMemOp::amomax_w({{ 558 Rd_sd = Mem_sw; 559 }}, {{ 560 TypedAtomicOpFunctor<int32_t> *amo_op = 561 new AtomicGenericOp<int32_t>(Rs2_sw, 562 [](int32_t* b, int32_t a){ if (a > *b) *b = a; }); 563 }}, mem_flags=ATOMIC_RETURN_OP); 564 0x18: AtomicMemOp::amominu_w({{ 565 Rd_sd = Mem_sw; 566 }}, {{ 567 TypedAtomicOpFunctor<uint32_t> *amo_op = 568 new AtomicGenericOp<uint32_t>(Rs2_uw, 569 [](uint32_t* b, uint32_t a){ if (a < *b) *b = a; }); 570 }}, mem_flags=ATOMIC_RETURN_OP); 571 0x1c: AtomicMemOp::amomaxu_w({{ 572 Rd_sd = Mem_sw; 573 }}, {{ 574 TypedAtomicOpFunctor<uint32_t> *amo_op = 575 new AtomicGenericOp<uint32_t>(Rs2_uw, 576 [](uint32_t* b, uint32_t a){ if (a > *b) *b = a; }); 577 }}, mem_flags=ATOMIC_RETURN_OP); |
578 } 579 0x3: decode AMOFUNCT { 580 0x2: LoadReserved::lr_d({{ 581 Rd_sd = Mem_sd; 582 }}, mem_flags=LLSC); 583 0x3: StoreCond::sc_d({{ 584 Mem = Rs2; 585 }}, {{ 586 Rd = result; 587 }}, mem_flags=LLSC, inst_flags=IsStoreConditional); |
588 0x0: AtomicMemOp::amoadd_d({{ 589 Rd_sd = Mem_sd; 590 }}, {{ 591 TypedAtomicOpFunctor<int64_t> *amo_op = 592 new AtomicGenericOp<int64_t>(Rs2_sd, 593 [](int64_t* b, int64_t a){ *b += a; }); 594 }}, mem_flags=ATOMIC_RETURN_OP); 595 0x1: AtomicMemOp::amoswap_d({{ 596 Rd_sd = Mem_sd; 597 }}, {{ 598 TypedAtomicOpFunctor<uint64_t> *amo_op = 599 new AtomicGenericOp<uint64_t>(Rs2_ud, 600 [](uint64_t* b, uint64_t a){ *b = a; }); 601 }}, mem_flags=ATOMIC_RETURN_OP); 602 0x4: AtomicMemOp::amoxor_d({{ 603 Rd_sd = Mem_sd; 604 }}, {{ 605 TypedAtomicOpFunctor<uint64_t> *amo_op = 606 new AtomicGenericOp<uint64_t>(Rs2_ud, 607 [](uint64_t* b, uint64_t a){ *b ^= a; }); 608 }}, mem_flags=ATOMIC_RETURN_OP); 609 0x8: AtomicMemOp::amoor_d({{ 610 Rd_sd = Mem_sd; 611 }}, {{ 612 TypedAtomicOpFunctor<uint64_t> *amo_op = 613 new AtomicGenericOp<uint64_t>(Rs2_ud, 614 [](uint64_t* b, uint64_t a){ *b |= a; }); 615 }}, mem_flags=ATOMIC_RETURN_OP); 616 0xc: AtomicMemOp::amoand_d({{ 617 Rd_sd = Mem_sd; 618 }}, {{ 619 TypedAtomicOpFunctor<uint64_t> *amo_op = 620 new AtomicGenericOp<uint64_t>(Rs2_ud, 621 [](uint64_t* b, uint64_t a){ *b &= a; }); 622 }}, mem_flags=ATOMIC_RETURN_OP); 623 0x10: AtomicMemOp::amomin_d({{ 624 Rd_sd = Mem_sd; 625 }}, {{ 626 TypedAtomicOpFunctor<int64_t> *amo_op = 627 new AtomicGenericOp<int64_t>(Rs2_sd, 628 [](int64_t* b, int64_t a){ if (a < *b) *b = a; }); 629 }}, mem_flags=ATOMIC_RETURN_OP); 630 0x14: AtomicMemOp::amomax_d({{ 631 Rd_sd = Mem_sd; 632 }}, {{ 633 TypedAtomicOpFunctor<int64_t> *amo_op = 634 new AtomicGenericOp<int64_t>(Rs2_sd, 635 [](int64_t* b, int64_t a){ if (a > *b) *b = a; }); 636 }}, mem_flags=ATOMIC_RETURN_OP); 637 0x18: AtomicMemOp::amominu_d({{ 638 Rd_sd = Mem_sd; 639 }}, {{ 640 TypedAtomicOpFunctor<uint64_t> *amo_op = 641 new AtomicGenericOp<uint64_t>(Rs2_ud, 642 [](uint64_t* b, uint64_t a){ if (a < *b) *b = a; }); 643 }}, mem_flags=ATOMIC_RETURN_OP); 644 0x1c: AtomicMemOp::amomaxu_d({{ 645 Rd_sd = Mem_sd; 646 }}, {{ 647 TypedAtomicOpFunctor<uint64_t> *amo_op = 648 new AtomicGenericOp<uint64_t>(Rs2_ud, 649 [](uint64_t* b, uint64_t a){ if (a > *b) *b = a; }); 650 }}, mem_flags=ATOMIC_RETURN_OP); |
651 } 652 } 653 0x0c: decode FUNCT3 { 654 format ROp { 655 0x0: decode FUNCT7 { 656 0x0: add({{ 657 Rd = Rs1_sd + Rs2_sd; 658 }}); --- 1177 unchanged lines hidden --- |