|
| 1 | +/*=======================================================================================*/ |
| 2 | +/* This Sail RISC-V architecture model, comprising all files and */ |
| 3 | +/* directories except where otherwise noted is subject the BSD */ |
| 4 | +/* two-clause license in the LICENSE file. */ |
| 5 | +/* */ |
| 6 | +/* SPDX-License-Identifier: BSD-2-Clause */ |
| 7 | +/*=======================================================================================*/ |
| 8 | + |
| 9 | +// Cache Block Operations - Management |
| 10 | + |
| 11 | +enum clause extension = Ext_Zicbom |
| 12 | +function clause extensionEnabled(Ext_Zicbom) = sys_enable_zicbom() |
| 13 | + |
| 14 | +function cbo_clean_flush_enabled(p : Privilege) -> bool = feature_enabled_for_priv(p, menvcfg[CBCFE][0], senvcfg[CBCFE][0]) |
| 15 | +function cbo_inval_enabled(p : Privilege) -> bool = feature_enabled_for_priv(p, menvcfg[CBIE][0], senvcfg[CBIE][0]) |
| 16 | +function cbo_inval_as_inval(p : Privilege) -> bool = feature_enabled_for_priv(p, menvcfg[CBIE][1], senvcfg[CBIE][1]) |
| 17 | + |
| 18 | +/* ****************************************************************** */ |
| 19 | +union clause ast = RISCV_ZICBOM : (cbop_zicbom, regidx) |
| 20 | + |
| 21 | +mapping encdec_cbop : cbop_zicbom <-> bits(12) = { |
| 22 | + CBO_CLEAN <-> 0b000000000001, |
| 23 | + CBO_FLUSH <-> 0b000000000010, |
| 24 | + CBO_INVAL <-> 0b000000000000, |
| 25 | +} |
| 26 | + |
| 27 | +mapping clause encdec = RISCV_ZICBOM(cbop, rs1) if extensionEnabled(Ext_Zicbom) |
| 28 | + <-> encdec_cbop(cbop) @ rs1 @ 0b010 @ 0b00000 @ 0b0001111 if extensionEnabled(Ext_Zicbom) |
| 29 | + |
| 30 | +mapping cbop_mnemonic : cbop_zicbom <-> string = { |
| 31 | + CBO_CLEAN <-> "cbo.clean", |
| 32 | + CBO_FLUSH <-> "cbo.flush", |
| 33 | + CBO_INVAL <-> "cbo.inval" |
| 34 | +} |
| 35 | + |
| 36 | +mapping clause assembly = RISCV_ZICBOM(cbop, rs1) |
| 37 | + <-> cbop_mnemonic(cbop) ^ spc() ^ "(" ^ opt_spc() ^ reg_name(rs1) ^ opt_spc() ^ ")" |
| 38 | + |
| 39 | +val process_clean_inval : (regidx, cbop_zicbom) -> Retired |
| 40 | +function process_clean_inval(rs1, cbop) = { |
| 41 | + let rs1_val = X(rs1); |
| 42 | + let cache_block_size_exp = plat_cache_block_size_exp(); |
| 43 | + let cache_block_size = 2 ^ cache_block_size_exp; |
| 44 | + |
| 45 | + // Offset from rs1 to the beginning of the cache block. This is 0 if rs1 |
| 46 | + // is aligned to the cache block, or negative if rs1 is misaligned. |
| 47 | + let offset = (rs1_val & ~(zero_extend(ones(cache_block_size_exp)))) - rs1_val; |
| 48 | + |
| 49 | + // TODO: This is incorrect since CHERI only requires at least one byte |
| 50 | + // to be in bounds here, whereas `ext_data_get_addr()` checks that all bytes |
| 51 | + // are in bounds. We will need to add a new function, parameter or access type. |
| 52 | + match ext_data_get_addr(rs1, offset, Read(Data), cache_block_size) { |
| 53 | + Ext_DataAddr_Error(e) => { ext_handle_data_check_error(e); RETIRE_FAIL }, |
| 54 | + Ext_DataAddr_OK(vaddr) => { |
| 55 | + let res: option(ExceptionType) = match translateAddr(vaddr, Read(Data)) { |
| 56 | + TR_Address(paddr, _) => { |
| 57 | + // "A cache-block management instruction is permitted to access the |
| 58 | + // specified cache block whenever a load instruction or store instruction |
| 59 | + // is permitted to access the corresponding physical addresses. If |
| 60 | + // neither a load instruction nor store instruction is permitted to |
| 61 | + // access the physical addresses, but an instruction fetch is permitted |
| 62 | + // to access the physical addresses, whether a cache-block management |
| 63 | + // instruction is permitted to access the cache block is UNSPECIFIED." |
| 64 | + // |
| 65 | + // In this implementation we currently don't allow access for fetches. |
| 66 | + let exc_read = phys_access_check(Read(Data), cur_privilege, paddr, cache_block_size); |
| 67 | + let exc_write = phys_access_check(Write(Data), cur_privilege, paddr, cache_block_size); |
| 68 | + match (exc_read, exc_write) { |
| 69 | + // Access is permitted if read OR write are allowed. If neither |
| 70 | + // are allowed then we always report a store exception. |
| 71 | + (Some(exc_read), Some(exc_write)) => Some(exc_write), |
| 72 | + _ => None(), |
| 73 | + } |
| 74 | + }, |
| 75 | + TR_Failure(e, _) => Some(e) |
| 76 | + }; |
| 77 | + // "If access to the cache block is not permitted, a cache-block management |
| 78 | + // instruction raises a store page fault or store guest-page fault exception |
| 79 | + // if address translation does not permit any access or raises a store access |
| 80 | + // fault exception otherwise." |
| 81 | + match res { |
| 82 | + // The model has no caches so there's no action required. |
| 83 | + None() => RETIRE_SUCCESS, |
| 84 | + Some(e) => { |
| 85 | + let e : ExceptionType = match e { |
| 86 | + E_Load_Access_Fault() => E_SAMO_Access_Fault(), |
| 87 | + E_SAMO_Access_Fault() => E_SAMO_Access_Fault(), |
| 88 | + E_Load_Page_Fault() => E_SAMO_Page_Fault(), |
| 89 | + E_SAMO_Page_Fault() => E_SAMO_Page_Fault(), |
| 90 | + // No other exceptions should be generated since we're not checking |
| 91 | + // for fetch access and it's can't be misaligned. |
| 92 | + _ => internal_error(__FILE__, __LINE__, "unexpected exception for cmo.clean/inval"), |
| 93 | + }; |
| 94 | + handle_mem_exception(vaddr, e); |
| 95 | + RETIRE_FAIL |
| 96 | + } |
| 97 | + } |
| 98 | + } |
| 99 | + } |
| 100 | +} |
| 101 | + |
| 102 | +function clause execute(RISCV_ZICBOM(cbop, rs1)) = |
| 103 | + match cbop { |
| 104 | + CBO_CLEAN if cbo_clean_flush_enabled(cur_privilege) => |
| 105 | + process_clean_inval(rs1, cbop), |
| 106 | + CBO_FLUSH if cbo_clean_flush_enabled(cur_privilege) => |
| 107 | + process_clean_inval(rs1, cbop), |
| 108 | + CBO_INVAL if cbo_inval_enabled(cur_privilege) => |
| 109 | + process_clean_inval(rs1, if cbo_inval_as_inval(cur_privilege) then CBO_INVAL else CBO_FLUSH), |
| 110 | + _ => { |
| 111 | + handle_illegal(); |
| 112 | + RETIRE_FAIL |
| 113 | + }, |
| 114 | + } |
0 commit comments