1// -*- mode:c++ -*-
2
3// Copyright (c) 2011-2014, 2017 ARM Limited
4// All rights reserved
5//
6// The license below extends only to copyright in the software and shall
7// not be construed as granting a license to any other intellectual
8// property including but not limited to intellectual property relating
9// to a hardware implementation of the functionality of the software
10// licensed hereunder.  You may use the software subject to the license
11// terms below provided that you ensure that this notice is replicated
12// unmodified and in its entirety in all distributions of the software,
13// modified or unmodified, in source code or in binary form.
14//
15// Redistribution and use in source and binary forms, with or without
16// modification, are permitted provided that the following conditions are
17// met: redistributions of source code must retain the above copyright
18// notice, this list of conditions and the following disclaimer;
19// redistributions in binary form must reproduce the above copyright
20// notice, this list of conditions and the following disclaimer in the
21// documentation and/or other materials provided with the distribution;
22// neither the name of the copyright holders nor the names of its
23// contributors may be used to endorse or promote products derived from
24// this software without specific prior written permission.
25//
26// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
27// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
28// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
29// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
30// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
31// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
32// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
36// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37//
38// Authors: Gabe Black
39
40let {{
41
42    header_output = ""
43    decoder_output = ""
44    exec_output = ""
45
46    class LoadInst64(LoadStoreInst):
47        execBase = 'Load64'
48        micro = False
49
50        def __init__(self, mnem, Name, size=4, sign=False, user=False,
51                     literal=False, flavor="normal", top=False):
52            super(LoadInst64, self).__init__()
53
54            self.name = mnem
55            self.Name = Name
56            self.size = size
57            self.sign = sign
58            self.user = user
59            self.literal = literal
60            self.flavor = flavor
61            self.top = top
62
63            self.memFlags = ["ArmISA::TLB::MustBeOne"]
64            self.instFlags = []
65            self.codeBlobs = {"postacc_code" : ""}
66
67            # Add memory request flags where necessary
68            if self.user:
69                self.memFlags.append("ArmISA::TLB::UserMode")
70
71            if self.flavor == "dprefetch":
72                self.memFlags.append("Request::PREFETCH")
73                self.instFlags = ['IsDataPrefetch']
74            elif self.flavor == "iprefetch":
75                self.memFlags.append("Request::PREFETCH")
76                self.instFlags = ['IsInstPrefetch']
77            elif self.flavor == "mprefetch":
78                self.memFlags.append("((((dest>>3)&3)==2)? \
79                     (Request::PF_EXCLUSIVE):(Request::PREFETCH))")
80                self.instFlags = ['IsDataPrefetch']
81            if self.micro:
82                self.instFlags.append("IsMicroop")
83
84            if self.flavor in ("acexp", "exp"):
85                # For exclusive pair ops alignment check is based on total size
86                self.memFlags.append("%d" % int(math.log(self.size, 2) + 1))
87            elif not (self.size == 16 and self.top):
88                # Only the first microop should perform alignment checking.
89                self.memFlags.append("%d" % int(math.log(self.size, 2)))
90
91            if self.flavor not in ("acquire", "acex", "exclusive",
92                                   "acexp", "exp"):
93                self.memFlags.append("ArmISA::TLB::AllowUnaligned")
94
95            if self.flavor in ("acquire", "acex", "acexp"):
96                self.instFlags.extend(["IsMemBarrier",
97                                       "IsWriteBarrier",
98                                       "IsReadBarrier"])
99            if self.flavor in ("acex", "exclusive", "exp", "acexp"):
100                self.memFlags.append("Request::LLSC")
101
102            # Using a different execute template for fp flavoured loads.
103            # In this specific template the memacc_code is executed
104            # conditionally depending of wether the memory load has
105            # generated any fault
106            if flavor == "fp":
107                self.fullExecTemplate = eval(self.execBase + 'FpExecute')
108
109        def buildEACode(self):
110            # Address computation code
111            eaCode = ""
112            if self.flavor == "fp":
113                eaCode += vfp64EnabledCheckCode
114
115            if self.literal:
116                eaCode += "EA = RawPC"
117            else:
118                eaCode += SPAlignmentCheckCode + "EA = XBase"
119
120            if self.size == 16:
121                if self.top:
122                    eaCode += " + (isBigEndian64(xc->tcBase()) ? 0 : 8)"
123                else:
124                    eaCode += " + (isBigEndian64(xc->tcBase()) ? 8 : 0)"
125            if not self.post:
126                eaCode += self.offset
127            eaCode += ";"
128
129            self.codeBlobs["ea_code"] = eaCode
130
131        def emitHelper(self, base='Memory64', wbDecl=None):
132            global header_output, decoder_output, exec_output
133
134            # If this is a microop itself, don't allow anything that would
135            # require further microcoding.
136            if self.micro:
137                assert not wbDecl
138
139            fa_code = None
140            if not self.micro and self.flavor in ("normal", "widen", "acquire"):
141                fa_code = '''
142                    fault->annotate(ArmFault::SAS, %s);
143                    fault->annotate(ArmFault::SSE, %s);
144                    fault->annotate(ArmFault::SRT, dest);
145                    fault->annotate(ArmFault::SF, %s);
146                    fault->annotate(ArmFault::AR, %s);
147                ''' % ("0" if self.size == 1 else
148                       "1" if self.size == 2 else
149                       "2" if self.size == 4 else "3",
150                       "true" if self.sign else "false",
151                       "true" if (self.size == 8 or
152                                  self.flavor == "widen") else "false",
153                       "true" if self.flavor == "acquire" else "false")
154
155            (newHeader, newDecoder, newExec) = \
156                self.fillTemplates(self.name, self.Name, self.codeBlobs,
157                                   self.memFlags, self.instFlags,
158                                   base, wbDecl, faCode=fa_code)
159
160            header_output += newHeader
161            decoder_output += newDecoder
162            exec_output += newExec
163
164    class LoadImmInst64(LoadInst64):
165        def __init__(self, *args, **kargs):
166            super(LoadImmInst64, self).__init__(*args, **kargs)
167            self.offset = " + imm"
168
169            self.wbDecl = "MicroAddXiUop(machInst, base, base, imm);"
170
171    class LoadRegInst64(LoadInst64):
172        def __init__(self, *args, **kargs):
173            super(LoadRegInst64, self).__init__(*args, **kargs)
174            self.offset = " + extendReg64(XOffset, type, shiftAmt, 64)"
175
176            self.wbDecl = \
177                "MicroAddXERegUop(machInst, base, base, " + \
178                "                 offset, type, shiftAmt);"
179
180    class LoadRawRegInst64(LoadInst64):
181        def __init__(self, *args, **kargs):
182            super(LoadRawRegInst64, self).__init__(*args, **kargs)
183            self.offset = ""
184
185    class LoadSingle64(LoadInst64):
186        def emit(self):
187            self.buildEACode()
188
189            accEpilogCode = None
190            # Code that actually handles the access
191            if self.flavor in ("dprefetch", "iprefetch", "mprefetch"):
192                accCode = 'uint64_t temp M5_VAR_USED = Mem%s;'
193            elif self.flavor == "fp":
194                accEpilogCode = '''
195                    ArmISA::ISA::zeroSveVecRegUpperPart(AA64FpDest,
196                        ArmStaticInst::getCurSveVecLen<uint64_t>(
197                            xc->tcBase()));
198                '''
199                if self.size in (1, 2, 4):
200                    accCode = '''
201                        AA64FpDestP0_uw = cSwap(Mem%s,
202                                                isBigEndian64(xc->tcBase()));
203                        AA64FpDestP1_uw = 0;
204                        AA64FpDestP2_uw = 0;
205                        AA64FpDestP3_uw = 0;
206                    '''
207                elif self.size == 8:
208                    accCode = '''
209                        uint64_t data = cSwap(Mem%s,
210                                              isBigEndian64(xc->tcBase()));
211                        AA64FpDestP0_uw = (uint32_t)data;
212                        AA64FpDestP1_uw = (data >> 32);
213                        AA64FpDestP2_uw = 0;
214                        AA64FpDestP3_uw = 0;
215                    '''
216                elif self.size == 16:
217                    accCode = '''
218                    auto data = cSwap(Mem%s, isBigEndian64(xc->tcBase()));
219                    AA64FpDestP0_uw = (uint32_t)data[0];
220                    AA64FpDestP1_uw = (data[0] >> 32);
221                    AA64FpDestP2_uw = (uint32_t)data[1];
222                    AA64FpDestP3_uw = (data[1] >> 32);
223                    '''
224            elif self.flavor == "widen" or self.size == 8:
225                accCode = "XDest = cSwap(Mem%s, isBigEndian64(xc->tcBase()));"
226            else:
227                accCode = "WDest = cSwap(Mem%s, isBigEndian64(xc->tcBase()));"
228
229            accCode = accCode % buildMemSuffix(self.sign, self.size)
230
231            self.codeBlobs["memacc_code"] = accCode
232            if accEpilogCode:
233                self.codeBlobs["memacc_epilog_code"] = accEpilogCode
234
235            # Push it out to the output files
236            wbDecl = None
237            if self.writeback and not self.micro:
238                wbDecl = self.wbDecl
239            self.emitHelper(self.base, wbDecl)
240
241    class LoadDouble64(LoadInst64):
242        def emit(self):
243            self.buildEACode()
244
245            accEpilogCode = None
246            # Code that actually handles the access
247            if self.flavor == "fp":
248                accEpilogCode = '''
249                    ArmISA::ISA::zeroSveVecRegUpperPart(AA64FpDest,
250                        ArmStaticInst::getCurSveVecLen<uint64_t>(
251                            xc->tcBase()));
252                    ArmISA::ISA::zeroSveVecRegUpperPart(AA64FpDest2,
253                        ArmStaticInst::getCurSveVecLen<uint64_t>(
254                            xc->tcBase()));
255                '''
256                if self.size == 4:
257                    accCode = '''
258                        uint64_t data = cSwap(Mem_ud, isBigEndian64(xc->tcBase()));
259                        AA64FpDestP0_uw = isBigEndian64(xc->tcBase())
260                                            ? (data >> 32)
261                                            : (uint32_t)data;
262                        AA64FpDestP1_uw = 0;
263                        AA64FpDestP2_uw = 0;
264                        AA64FpDestP3_uw = 0;
265                        AA64FpDest2P0_uw = isBigEndian64(xc->tcBase())
266                                            ? (uint32_t)data
267                                            : (data >> 32);
268                        AA64FpDest2P1_uw = 0;
269                        AA64FpDest2P2_uw = 0;
270                        AA64FpDest2P3_uw = 0;
271                    '''
272                elif self.size == 8:
273                    accCode = '''
274                        uint64_t data_a = cSwap(Mem_tud[0],
275                                                isBigEndian64(xc->tcBase()));
276                        uint64_t data_b = cSwap(Mem_tud[1],
277                                                isBigEndian64(xc->tcBase()));
278                        AA64FpDestP0_uw = (uint32_t)data_a;
279                        AA64FpDestP1_uw = (uint32_t)(data_a >> 32);
280                        AA64FpDestP2_uw = 0;
281                        AA64FpDestP3_uw = 0;
282                        AA64FpDest2P0_uw = (uint32_t)data_b;
283                        AA64FpDest2P1_uw = (uint32_t)(data_b >> 32);
284                        AA64FpDest2P2_uw = 0;
285                        AA64FpDest2P3_uw = 0;
286                    '''
287            else:
288                if self.sign:
289                    if self.size == 4:
290                        accCode = '''
291                            uint64_t data = cSwap(Mem_ud,
292                                                  isBigEndian64(xc->tcBase()));
293                            XDest = isBigEndian64(xc->tcBase())
294                                    ? sext<32>(data >> 32)
295                                    : sext<32>((uint32_t)data);
296                            XDest2 = isBigEndian64(xc->tcBase())
297                                     ? sext<32>((uint32_t)data)
298                                     : sext<32>(data >> 32);
299                        '''
300                    elif self.size == 8:
301                        accCode = '''
302                            XDest = cSwap(Mem_tud[0],
303                                          isBigEndian64(xc->tcBase()));
304                            XDest2 = cSwap(Mem_tud[1],
305                                           isBigEndian64(xc->tcBase()));
306                        '''
307                else:
308                    if self.size == 4:
309                        accCode = '''
310                            uint64_t data = cSwap(Mem_ud,
311                                                  isBigEndian64(xc->tcBase()));
312                            XDest = isBigEndian64(xc->tcBase())
313                                    ? (data >> 32)
314                                    : (uint32_t)data;
315                            XDest2 = isBigEndian64(xc->tcBase())
316                                    ? (uint32_t)data
317                                    : (data >> 32);
318                        '''
319                    elif self.size == 8:
320                        accCode = '''
321                            XDest = cSwap(Mem_tud[0],
322                                          isBigEndian64(xc->tcBase()));
323                            XDest2 = cSwap(Mem_tud[1],
324                                           isBigEndian64(xc->tcBase()));
325                        '''
326            self.codeBlobs["memacc_code"] = accCode
327            if accEpilogCode:
328                self.codeBlobs["memacc_epilog_code"] = accEpilogCode
329
330            # Push it out to the output files
331            wbDecl = None
332            if self.writeback and not self.micro:
333                wbDecl = self.wbDecl
334            self.emitHelper(self.base, wbDecl)
335
336    class LoadImm64(LoadImmInst64, LoadSingle64):
337        decConstBase = 'LoadStoreImm64'
338        base = 'ArmISA::MemoryImm64'
339        writeback = False
340        post = False
341
342    class LoadPre64(LoadImmInst64, LoadSingle64):
343        decConstBase = 'LoadStoreImm64'
344        base = 'ArmISA::MemoryPreIndex64'
345        writeback = True
346        post = False
347
348    class LoadPost64(LoadImmInst64, LoadSingle64):
349        decConstBase = 'LoadStoreImm64'
350        base = 'ArmISA::MemoryPostIndex64'
351        writeback = True
352        post = True
353
354    class LoadReg64(LoadRegInst64, LoadSingle64):
355        decConstBase = 'LoadStoreReg64'
356        base = 'ArmISA::MemoryReg64'
357        writeback = False
358        post = False
359
360    class LoadRaw64(LoadRawRegInst64, LoadSingle64):
361        decConstBase = 'LoadStoreRaw64'
362        base = 'ArmISA::MemoryRaw64'
363        writeback = False
364        post = False
365
366    class LoadEx64(LoadRawRegInst64, LoadSingle64):
367        decConstBase = 'LoadStoreEx64'
368        base = 'ArmISA::MemoryEx64'
369        writeback = False
370        post = False
371
372    class LoadLit64(LoadImmInst64, LoadSingle64):
373        decConstBase = 'LoadStoreLit64'
374        base = 'ArmISA::MemoryLiteral64'
375        writeback = False
376        post = False
377
378    def buildLoads64(mnem, NameBase, size, sign, flavor="normal"):
379        LoadImm64(mnem, NameBase + "_IMM", size, sign, flavor=flavor).emit()
380        LoadPre64(mnem, NameBase + "_PRE", size, sign, flavor=flavor).emit()
381        LoadPost64(mnem, NameBase + "_POST", size, sign, flavor=flavor).emit()
382        LoadReg64(mnem, NameBase + "_REG", size, sign, flavor=flavor).emit()
383
384    buildLoads64("ldrb", "LDRB64", 1, False)
385    buildLoads64("ldrsb", "LDRSBW64", 1, True)
386    buildLoads64("ldrsb", "LDRSBX64", 1, True, flavor="widen")
387    buildLoads64("ldrh", "LDRH64", 2, False)
388    buildLoads64("ldrsh", "LDRSHW64", 2, True)
389    buildLoads64("ldrsh", "LDRSHX64", 2, True, flavor="widen")
390    buildLoads64("ldrsw", "LDRSW64", 4, True, flavor="widen")
391    buildLoads64("ldr", "LDRW64", 4, False)
392    buildLoads64("ldr", "LDRX64", 8, False)
393    buildLoads64("ldr", "LDRBFP64", 1, False, flavor="fp")
394    buildLoads64("ldr", "LDRHFP64", 2, False, flavor="fp")
395    buildLoads64("ldr", "LDRSFP64", 4, False, flavor="fp")
396    buildLoads64("ldr", "LDRDFP64", 8, False, flavor="fp")
397
398    LoadImm64("prfm", "PRFM64_IMM", 8, flavor="mprefetch").emit()
399    LoadReg64("prfm", "PRFM64_REG", 8, flavor="mprefetch").emit()
400    LoadLit64("prfm", "PRFM64_LIT", 8, literal=True,
401              flavor="mprefetch").emit()
402    LoadImm64("prfum", "PRFUM64_IMM", 8, flavor="mprefetch").emit()
403
404    LoadImm64("ldurb", "LDURB64_IMM", 1, False).emit()
405    LoadImm64("ldursb", "LDURSBW64_IMM", 1, True).emit()
406    LoadImm64("ldursb", "LDURSBX64_IMM", 1, True, flavor="widen").emit()
407    LoadImm64("ldurh", "LDURH64_IMM", 2, False).emit()
408    LoadImm64("ldursh", "LDURSHW64_IMM", 2, True).emit()
409    LoadImm64("ldursh", "LDURSHX64_IMM", 2, True, flavor="widen").emit()
410    LoadImm64("ldursw", "LDURSW64_IMM", 4, True, flavor="widen").emit()
411    LoadImm64("ldur", "LDURW64_IMM", 4, False).emit()
412    LoadImm64("ldur", "LDURX64_IMM", 8, False).emit()
413    LoadImm64("ldur", "LDURBFP64_IMM", 1, flavor="fp").emit()
414    LoadImm64("ldur", "LDURHFP64_IMM", 2, flavor="fp").emit()
415    LoadImm64("ldur", "LDURSFP64_IMM", 4, flavor="fp").emit()
416    LoadImm64("ldur", "LDURDFP64_IMM", 8, flavor="fp").emit()
417
418    LoadImm64("ldtrb", "LDTRB64_IMM", 1, False, True).emit()
419    LoadImm64("ldtrsb", "LDTRSBW64_IMM", 1, True, True).emit()
420    LoadImm64("ldtrsb", "LDTRSBX64_IMM", 1, True, True, flavor="widen").emit()
421    LoadImm64("ldtrh", "LDTRH64_IMM", 2, False, True).emit()
422    LoadImm64("ldtrsh", "LDTRSHW64_IMM", 2, True, True).emit()
423    LoadImm64("ldtrsh", "LDTRSHX64_IMM", 2, True, True, flavor="widen").emit()
424    LoadImm64("ldtrsw", "LDTRSW64_IMM", 4, True, flavor="widen").emit()
425    LoadImm64("ldtr", "LDTRW64_IMM", 4, False, True).emit()
426    LoadImm64("ldtr", "LDTRX64_IMM", 8, False, True).emit()
427
428    LoadLit64("ldrsw", "LDRSWL64_LIT", 4, True, \
429              literal=True, flavor="widen").emit()
430    LoadLit64("ldr", "LDRWL64_LIT", 4, False, literal=True).emit()
431    LoadLit64("ldr", "LDRXL64_LIT", 8, False, literal=True).emit()
432    LoadLit64("ldr", "LDRSFP64_LIT", 4, literal=True, flavor="fp").emit()
433    LoadLit64("ldr", "LDRDFP64_LIT", 8, literal=True, flavor="fp").emit()
434
435    LoadRaw64("ldar", "LDARX64", 8, flavor="acquire").emit()
436    LoadRaw64("ldar", "LDARW64", 4, flavor="acquire").emit()
437    LoadRaw64("ldarh", "LDARH64", 2, flavor="acquire").emit()
438    LoadRaw64("ldarb", "LDARB64", 1, flavor="acquire").emit()
439
440    LoadEx64("ldaxr", "LDAXRX64", 8, flavor="acex").emit()
441    LoadEx64("ldaxr", "LDAXRW64", 4, flavor="acex").emit()
442    LoadEx64("ldaxrh", "LDAXRH64", 2, flavor="acex").emit()
443    LoadEx64("ldaxrb", "LDAXRB64", 1, flavor="acex").emit()
444
445    LoadEx64("ldxr", "LDXRX64", 8, flavor="exclusive").emit()
446    LoadEx64("ldxr", "LDXRW64", 4, flavor="exclusive").emit()
447    LoadEx64("ldxrh", "LDXRH64", 2, flavor="exclusive").emit()
448    LoadEx64("ldxrb", "LDXRB64", 1, flavor="exclusive").emit()
449
450    LoadRaw64("ldapr", "LDAPRX64", 8, flavor="acquire").emit()
451    LoadRaw64("ldapr", "LDAPRW64", 4, flavor="acquire").emit()
452    LoadRaw64("ldaprh", "LDAPRH64", 2, flavor="acquire").emit()
453    LoadRaw64("ldaprb", "LDAPRB64", 1, flavor="acquire").emit()
454
455    class LoadImmU64(LoadImm64):
456        decConstBase = 'LoadStoreImmU64'
457        micro = True
458
459    class LoadImmDU64(LoadImmInst64, LoadDouble64):
460        decConstBase = 'LoadStoreImmDU64'
461        base = 'ArmISA::MemoryDImm64'
462        micro = True
463        post = False
464        writeback = False
465
466    class LoadImmDouble64(LoadImmInst64, LoadDouble64):
467        decConstBase = 'LoadStoreImmDU64'
468        base = 'ArmISA::MemoryDImm64'
469        micro = False
470        post = False
471        writeback = False
472
473    class LoadRegU64(LoadReg64):
474        decConstBase = 'LoadStoreRegU64'
475        micro = True
476
477    class LoadLitU64(LoadLit64):
478        decConstBase = 'LoadStoreLitU64'
479        micro = True
480
481    LoadImmDU64("ldp_uop", "MicroLdPairUop", 8).emit()
482    LoadImmDU64("ldp_fp8_uop", "MicroLdPairFp8Uop", 8, flavor="fp").emit()
483    LoadImmU64("ldfp16_uop", "MicroLdFp16Uop", 16, flavor="fp").emit()
484    LoadReg64("ldfp16reg_uop", "MicroLdFp16RegUop", 16, flavor="fp").emit()
485
486    LoadImmDouble64("ldaxp", "LDAXPW64", 4, flavor="acexp").emit()
487    LoadImmDouble64("ldaxp", "LDAXPX64", 8, flavor="acexp").emit()
488    LoadImmDouble64("ldxp", "LDXPW64", 4, flavor="exp").emit()
489    LoadImmDouble64("ldxp", "LDXPX64", 8, flavor="exp").emit()
490
491    LoadImmU64("ldrxi_uop", "MicroLdrXImmUop", 8).emit()
492    LoadRegU64("ldrxr_uop", "MicroLdrXRegUop", 8).emit()
493    LoadLitU64("ldrxl_uop", "MicroLdrXLitUop", 8, literal=True).emit()
494    LoadImmU64("ldrfpxi_uop", "MicroLdrFpXImmUop", 8, flavor="fp").emit()
495    LoadRegU64("ldrfpxr_uop", "MicroLdrFpXRegUop", 8, flavor="fp").emit()
496    LoadLitU64("ldrfpxl_uop", "MicroLdrFpXLitUop", 8, literal=True,
497               flavor="fp").emit()
498    LoadLitU64("ldfp16_lit__uop", "MicroLdFp16LitUop",
499               16, literal=True, flavor="fp").emit()
500    LoadImmDU64("ldrduxi_uop", "MicroLdrDUXImmUop", 4, sign=False).emit()
501    LoadImmDU64("ldrdsxi_uop", "MicroLdrDSXImmUop", 4, sign=True).emit()
502    LoadImmDU64("ldrdfpxi_uop", "MicroLdrDFpXImmUop", 4, flavor="fp").emit()
503}};
504