RubyPort.cc revision 8712:7f762428a9f5
1/*
2 * Copyright (c) 2009 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "cpu/testers/rubytest/RubyTester.hh"
30#include "debug/Config.hh"
31#include "debug/Ruby.hh"
32#include "mem/protocol/AccessPermission.hh"
33#include "mem/ruby/slicc_interface/AbstractController.hh"
34#include "mem/ruby/system/RubyPort.hh"
35
36RubyPort::RubyPort(const Params *p)
37    : MemObject(p)
38{
39    m_version = p->version;
40    assert(m_version != -1);
41
42    physmem = p->physmem;
43
44    m_controller = NULL;
45    m_mandatory_q_ptr = NULL;
46
47    m_request_cnt = 0;
48    pio_port = NULL;
49    physMemPort = NULL;
50
51    m_usingRubyTester = p->using_ruby_tester;
52    access_phys_mem = p->access_phys_mem;
53
54    drainEvent = NULL;
55
56    ruby_system = p->ruby_system;
57    waitingOnSequencer = false;
58}
59
60void
61RubyPort::init()
62{
63    assert(m_controller != NULL);
64    m_mandatory_q_ptr = m_controller->getMandatoryQueue();
65}
66
67Port *
68RubyPort::getPort(const std::string &if_name, int idx)
69{
70    if (if_name == "port") {
71        M5Port* cpuPort = new M5Port(csprintf("%s-port%d", name(), idx),
72                                     this, ruby_system, access_phys_mem);
73        cpu_ports.push_back(cpuPort);
74        return cpuPort;
75    }
76
77    if (if_name == "pio_port") {
78        // ensure there is only one pio port
79        assert(pio_port == NULL);
80
81        pio_port = new PioPort(csprintf("%s-pio-port%d", name(), idx), this);
82
83        return pio_port;
84    }
85
86    if (if_name == "physMemPort") {
87        // RubyPort should only have one port to physical memory
88        assert (physMemPort == NULL);
89
90        physMemPort = new M5Port(csprintf("%s-physMemPort", name()), this,
91                                 ruby_system, access_phys_mem);
92
93        return physMemPort;
94    }
95
96    return NULL;
97}
98
99RubyPort::PioPort::PioPort(const std::string &_name,
100                           RubyPort *_port)
101    : SimpleTimingPort(_name, _port)
102{
103    DPRINTF(RubyPort, "creating port to ruby sequencer to cpu %s\n", _name);
104    ruby_port = _port;
105}
106
107RubyPort::M5Port::M5Port(const std::string &_name, RubyPort *_port,
108                         RubySystem *_system, bool _access_phys_mem)
109    : SimpleTimingPort(_name, _port)
110{
111    DPRINTF(RubyPort, "creating port from ruby sequcner to cpu %s\n", _name);
112    ruby_port = _port;
113    ruby_system = _system;
114    _onRetryList = false;
115    access_phys_mem = _access_phys_mem;
116}
117
118Tick
119RubyPort::PioPort::recvAtomic(PacketPtr pkt)
120{
121    panic("RubyPort::PioPort::recvAtomic() not implemented!\n");
122    return 0;
123}
124
125Tick
126RubyPort::M5Port::recvAtomic(PacketPtr pkt)
127{
128    panic("RubyPort::M5Port::recvAtomic() not implemented!\n");
129    return 0;
130}
131
132
133bool
134RubyPort::PioPort::recvTiming(PacketPtr pkt)
135{
136    // In FS mode, ruby memory will receive pio responses from devices
137    // and it must forward these responses back to the particular CPU.
138    DPRINTF(RubyPort,  "Pio response for address %#x\n", pkt->getAddr());
139
140    assert(pkt->isResponse());
141
142    // First we must retrieve the request port from the sender State
143    RubyPort::SenderState *senderState =
144      safe_cast<RubyPort::SenderState *>(pkt->senderState);
145    M5Port *port = senderState->port;
146    assert(port != NULL);
147
148    // pop the sender state from the packet
149    pkt->senderState = senderState->saved;
150    delete senderState;
151
152    port->sendTiming(pkt);
153
154    return true;
155}
156
157bool
158RubyPort::M5Port::recvTiming(PacketPtr pkt)
159{
160    DPRINTF(RubyPort,
161            "Timing access caught for address %#x\n", pkt->getAddr());
162
163    //dsm: based on SimpleTimingPort::recvTiming(pkt);
164
165    // The received packets should only be M5 requests, which should never
166    // get nacked.  There used to be code to hanldle nacks here, but
167    // I'm pretty sure it didn't work correctly with the drain code,
168    // so that would need to be fixed if we ever added it back.
169    assert(pkt->isRequest());
170
171    if (pkt->memInhibitAsserted()) {
172        warn("memInhibitAsserted???");
173        // snooper will supply based on copy of packet
174        // still target's responsibility to delete packet
175        delete pkt;
176        return true;
177    }
178
179    // Save the port in the sender state object to be used later to
180    // route the response
181    pkt->senderState = new SenderState(this, pkt->senderState);
182
183    // Check for pio requests and directly send them to the dedicated
184    // pio port.
185    if (!isPhysMemAddress(pkt->getAddr())) {
186        assert(ruby_port->pio_port != NULL);
187        DPRINTF(RubyPort,
188                "Request for address 0x%#x is assumed to be a pio request\n",
189                pkt->getAddr());
190
191        return ruby_port->pio_port->sendTiming(pkt);
192    }
193
194    assert(Address(pkt->getAddr()).getOffset() + pkt->getSize() <=
195           RubySystem::getBlockSizeBytes());
196
197    // Submit the ruby request
198    RequestStatus requestStatus = ruby_port->makeRequest(pkt);
199
200    // If the request successfully issued then we should return true.
201    // Otherwise, we need to delete the senderStatus we just created and return
202    // false.
203    if (requestStatus == RequestStatus_Issued) {
204        DPRINTF(RubyPort, "Request %#x issued\n", pkt->getAddr());
205        return true;
206    }
207
208    //
209    // Unless one is using the ruby tester, record the stalled M5 port for
210    // later retry when the sequencer becomes free.
211    //
212    if (!ruby_port->m_usingRubyTester) {
213        ruby_port->addToRetryList(this);
214    }
215
216    DPRINTF(RubyPort,
217            "Request for address %#x did not issue because %s\n",
218            pkt->getAddr(), RequestStatus_to_string(requestStatus));
219
220    SenderState* senderState = safe_cast<SenderState*>(pkt->senderState);
221    pkt->senderState = senderState->saved;
222    delete senderState;
223    return false;
224}
225
226bool
227RubyPort::M5Port::doFunctionalRead(PacketPtr pkt)
228{
229    Address address(pkt->getAddr());
230    Address line_address(address);
231    line_address.makeLineAddress();
232
233    AccessPermission access_perm = AccessPermission_NotPresent;
234    int num_controllers = ruby_system->m_abs_cntrl_vec.size();
235
236    DPRINTF(RubyPort, "Functional Read request for %s\n",address);
237
238    unsigned int num_ro = 0;
239    unsigned int num_rw = 0;
240    unsigned int num_busy = 0;
241    unsigned int num_backing_store = 0;
242    unsigned int num_invalid = 0;
243
244    // In this loop we count the number of controllers that have the given
245    // address in read only, read write and busy states.
246    for (int i = 0; i < num_controllers; ++i) {
247        access_perm = ruby_system->m_abs_cntrl_vec[i]->
248                                            getAccessPermission(line_address);
249        if (access_perm == AccessPermission_Read_Only)
250            num_ro++;
251        else if (access_perm == AccessPermission_Read_Write)
252            num_rw++;
253        else if (access_perm == AccessPermission_Busy)
254            num_busy++;
255        else if (access_perm == AccessPermission_Backing_Store)
256            // See RubySlicc_Exports.sm for details, but Backing_Store is meant
257            // to represent blocks in memory *for Broadcast/Snooping protocols*,
258            // where memory has no idea whether it has an exclusive copy of data
259            // or not.
260            num_backing_store++;
261        else if (access_perm == AccessPermission_Invalid ||
262                 access_perm == AccessPermission_NotPresent)
263            num_invalid++;
264    }
265    assert(num_rw <= 1);
266
267    uint8* data = pkt->getPtr<uint8_t>(true);
268    unsigned int size_in_bytes = pkt->getSize();
269    unsigned startByte = address.getAddress() - line_address.getAddress();
270
271    // This if case is meant to capture what happens in a Broadcast/Snoop
272    // protocol where the block does not exist in the cache hierarchy. You
273    // only want to read from the Backing_Store memory if there is no copy in
274    // the cache hierarchy, otherwise you want to try to read the RO or RW
275    // copies existing in the cache hierarchy (covered by the else statement).
276    // The reason is because the Backing_Store memory could easily be stale, if
277    // there are copies floating around the cache hierarchy, so you want to read
278    // it only if it's not in the cache hierarchy at all.
279    if (num_invalid == (num_controllers - 1) &&
280            num_backing_store == 1)
281    {
282        DPRINTF(RubyPort, "only copy in Backing_Store memory, read from it\n");
283        for (int i = 0; i < num_controllers; ++i) {
284            access_perm = ruby_system->m_abs_cntrl_vec[i]
285                                              ->getAccessPermission(line_address);
286            if (access_perm == AccessPermission_Backing_Store) {
287                DataBlock& block = ruby_system->m_abs_cntrl_vec[i]
288                                                 ->getDataBlock(line_address);
289
290                DPRINTF(RubyPort, "reading from %s block %s\n",
291                        ruby_system->m_abs_cntrl_vec[i]->name(), block);
292                for (unsigned i = 0; i < size_in_bytes; ++i) {
293                    data[i] = block.getByte(i + startByte);
294                }
295                return true;
296            }
297        }
298    } else {
299        // In Broadcast/Snoop protocols, this covers if you know the block
300        // exists somewhere in the caching hierarchy, then you want to read any
301        // valid RO or RW block.  In directory protocols, same thing, you want
302        // to read any valid readable copy of the block.
303        DPRINTF(RubyPort, "num_busy = %d, num_ro = %d, num_rw = %d\n",
304                num_busy, num_ro, num_rw);
305        // In this loop, we try to figure which controller has a read only or
306        // a read write copy of the given address. Any valid copy would suffice
307        // for a functional read.
308        for(int i = 0;i < num_controllers;++i) {
309            access_perm = ruby_system->m_abs_cntrl_vec[i]
310                                              ->getAccessPermission(line_address);
311            if(access_perm == AccessPermission_Read_Only ||
312               access_perm == AccessPermission_Read_Write)
313            {
314                DataBlock& block = ruby_system->m_abs_cntrl_vec[i]
315                                                     ->getDataBlock(line_address);
316
317                DPRINTF(RubyPort, "reading from %s block %s\n",
318                        ruby_system->m_abs_cntrl_vec[i]->name(), block);
319                for (unsigned i = 0; i < size_in_bytes; ++i) {
320                    data[i] = block.getByte(i + startByte);
321                }
322                return true;
323            }
324        }
325    }
326    return false;
327}
328
329bool
330RubyPort::M5Port::doFunctionalWrite(PacketPtr pkt)
331{
332    Address addr(pkt->getAddr());
333    Address line_addr = line_address(addr);
334    AccessPermission access_perm = AccessPermission_NotPresent;
335    int num_controllers = ruby_system->m_abs_cntrl_vec.size();
336
337    DPRINTF(RubyPort, "Functional Write request for %s\n",addr);
338
339    unsigned int num_ro = 0;
340    unsigned int num_rw = 0;
341    unsigned int num_busy = 0;
342    unsigned int num_backing_store = 0;
343    unsigned int num_invalid = 0;
344
345    // In this loop we count the number of controllers that have the given
346    // address in read only, read write and busy states.
347    for(int i = 0;i < num_controllers;++i) {
348        access_perm = ruby_system->m_abs_cntrl_vec[i]->
349                                            getAccessPermission(line_addr);
350        if (access_perm == AccessPermission_Read_Only)
351            num_ro++;
352        else if (access_perm == AccessPermission_Read_Write)
353            num_rw++;
354        else if (access_perm == AccessPermission_Busy)
355            num_busy++;
356        else if (access_perm == AccessPermission_Backing_Store)
357            // See RubySlicc_Exports.sm for details, but Backing_Store is meant
358            // to represent blocks in memory *for Broadcast/Snooping protocols*,
359            // where memory has no idea whether it has an exclusive copy of data
360            // or not.
361            num_backing_store++;
362        else if (access_perm == AccessPermission_Invalid ||
363                 access_perm == AccessPermission_NotPresent)
364            num_invalid++;
365    }
366
367    // If the number of read write copies is more than 1, then there is bug in
368    // coherence protocol. Otherwise, if all copies are in stable states, i.e.
369    // num_busy == 0, we update all the copies. If there is at least one copy
370    // in busy state, then we check if there is read write copy. If yes, then
371    // also we let the access go through. Or, if there is no copy in the cache
372    // hierarchy at all, we still want to do the write to the memory
373    // (Backing_Store) instead of failing.
374
375    DPRINTF(RubyPort, "num_busy = %d, num_ro = %d, num_rw = %d\n",
376            num_busy, num_ro, num_rw);
377    assert(num_rw <= 1);
378
379    uint8* data = pkt->getPtr<uint8_t>(true);
380    unsigned int size_in_bytes = pkt->getSize();
381    unsigned startByte = addr.getAddress() - line_addr.getAddress();
382
383    if ((num_busy == 0 && num_ro > 0) || num_rw == 1 ||
384            (num_invalid == (num_controllers - 1) && num_backing_store == 1))
385    {
386        for(int i = 0; i < num_controllers;++i) {
387            access_perm = ruby_system->m_abs_cntrl_vec[i]->
388                                                getAccessPermission(line_addr);
389            if(access_perm == AccessPermission_Read_Only ||
390               access_perm == AccessPermission_Read_Write||
391               access_perm == AccessPermission_Maybe_Stale ||
392               access_perm == AccessPermission_Backing_Store)
393            {
394                DataBlock& block = ruby_system->m_abs_cntrl_vec[i]
395                                                      ->getDataBlock(line_addr);
396
397                DPRINTF(RubyPort, "%s\n",block);
398                for (unsigned i = 0; i < size_in_bytes; ++i) {
399                  block.setByte(i + startByte, data[i]);
400                }
401                DPRINTF(RubyPort, "%s\n",block);
402            }
403        }
404        return true;
405    }
406    return false;
407}
408
409void
410RubyPort::M5Port::recvFunctional(PacketPtr pkt)
411{
412    DPRINTF(RubyPort, "Functional access caught for address %#x\n",
413                                                           pkt->getAddr());
414
415    // Check for pio requests and directly send them to the dedicated
416    // pio port.
417    if (!isPhysMemAddress(pkt->getAddr())) {
418        assert(ruby_port->pio_port != NULL);
419        DPRINTF(RubyPort, "Request for address 0x%#x is a pio request\n",
420                                                           pkt->getAddr());
421        panic("RubyPort::PioPort::recvFunctional() not implemented!\n");
422    }
423
424    assert(pkt->getAddr() + pkt->getSize() <=
425                line_address(Address(pkt->getAddr())).getAddress() +
426                RubySystem::getBlockSizeBytes());
427
428    bool accessSucceeded = false;
429    bool needsResponse = pkt->needsResponse();
430
431    // Do the functional access on ruby memory
432    if (pkt->isRead()) {
433        accessSucceeded = doFunctionalRead(pkt);
434    } else if (pkt->isWrite()) {
435        accessSucceeded = doFunctionalWrite(pkt);
436    } else {
437        panic("RubyPort: unsupported functional command %s\n",
438              pkt->cmdString());
439    }
440
441    // Unless the requester explicitly said otherwise, generate an error if
442    // the functional request failed
443    if (!accessSucceeded && !pkt->suppressFuncError()) {
444        fatal("Ruby functional %s failed for address %#x\n",
445              pkt->isWrite() ? "write" : "read", pkt->getAddr());
446    }
447
448    if (access_phys_mem) {
449        // The attached physmem contains the official version of data.
450        // The following command performs the real functional access.
451        // This line should be removed once Ruby supplies the official version
452        // of data.
453        ruby_port->physMemPort->sendFunctional(pkt);
454    }
455
456    // turn packet around to go back to requester if response expected
457    if (needsResponse) {
458        pkt->setFunctionalResponseStatus(accessSucceeded);
459
460        // @todo There should not be a reverse call since the response is
461        // communicated through the packet pointer
462        // DPRINTF(RubyPort, "Sending packet back over port\n");
463        // sendFunctional(pkt);
464    }
465    DPRINTF(RubyPort, "Functional access %s!\n",
466            accessSucceeded ? "successful":"failed");
467}
468
469void
470RubyPort::ruby_hit_callback(PacketPtr pkt)
471{
472    // Retrieve the request port from the sender State
473    RubyPort::SenderState *senderState =
474        safe_cast<RubyPort::SenderState *>(pkt->senderState);
475    M5Port *port = senderState->port;
476    assert(port != NULL);
477
478    // pop the sender state from the packet
479    pkt->senderState = senderState->saved;
480    delete senderState;
481
482    port->hitCallback(pkt);
483
484    //
485    // If we had to stall the M5Ports, wake them up because the sequencer
486    // likely has free resources now.
487    //
488    if (waitingOnSequencer) {
489        //
490        // Record the current list of ports to retry on a temporary list before
491        // calling sendRetry on those ports.  sendRetry will cause an
492        // immediate retry, which may result in the ports being put back on the
493        // list. Therefore we want to clear the retryList before calling
494        // sendRetry.
495        //
496        std::list<M5Port*> curRetryList(retryList);
497
498        retryList.clear();
499        waitingOnSequencer = false;
500
501        for (std::list<M5Port*>::iterator i = curRetryList.begin();
502             i != curRetryList.end(); ++i) {
503            DPRINTF(RubyPort,
504                    "Sequencer may now be free.  SendRetry to port %s\n",
505                    (*i)->name());
506            (*i)->onRetryList(false);
507            (*i)->sendRetry();
508        }
509    }
510
511    testDrainComplete();
512}
513
514void
515RubyPort::testDrainComplete()
516{
517    //If we weren't able to drain before, we might be able to now.
518    if (drainEvent != NULL) {
519        unsigned int drainCount = getDrainCount(drainEvent);
520        DPRINTF(Config, "Drain count: %u\n", drainCount);
521        if (drainCount == 0) {
522            drainEvent->process();
523            // Clear the drain event once we're done with it.
524            drainEvent = NULL;
525        }
526    }
527}
528
529unsigned int
530RubyPort::getDrainCount(Event *de)
531{
532    int count = 0;
533    //
534    // If the sequencer is not empty, then requests need to drain.
535    // The outstandingCount is the number of requests outstanding and thus the
536    // number of times M5's timing port will process the drain event.
537    //
538    count += outstandingCount();
539
540    DPRINTF(Config, "outstanding count %d\n", outstandingCount());
541
542    // To simplify the draining process, the sequencer's deadlock detection
543    // event should have been descheduled.
544    assert(isDeadlockEventScheduled() == false);
545
546    if (pio_port != NULL) {
547        count += pio_port->drain(de);
548        DPRINTF(Config, "count after pio check %d\n", count);
549    }
550    if (physMemPort != NULL) {
551        count += physMemPort->drain(de);
552        DPRINTF(Config, "count after physmem check %d\n", count);
553    }
554
555    for (CpuPortIter p_iter = cpu_ports.begin(); p_iter != cpu_ports.end();
556         p_iter++) {
557        M5Port* cpu_port = *p_iter;
558        count += cpu_port->drain(de);
559        DPRINTF(Config, "count after cpu port check %d\n", count);
560    }
561
562    DPRINTF(Config, "final count %d\n", count);
563
564    return count;
565}
566
567unsigned int
568RubyPort::drain(Event *de)
569{
570    if (isDeadlockEventScheduled()) {
571        descheduleDeadlockEvent();
572    }
573
574    int count = getDrainCount(de);
575
576    // Set status
577    if (count != 0) {
578        drainEvent = de;
579
580        changeState(SimObject::Draining);
581        return count;
582    }
583
584    changeState(SimObject::Drained);
585    return 0;
586}
587
588void
589RubyPort::M5Port::hitCallback(PacketPtr pkt)
590{
591    bool needsResponse = pkt->needsResponse();
592
593    //
594    // Unless specified at configuraiton, all responses except failed SC
595    // and Flush operations access M5 physical memory.
596    //
597    bool accessPhysMem = access_phys_mem;
598
599    if (pkt->isLLSC()) {
600        if (pkt->isWrite()) {
601            if (pkt->req->getExtraData() != 0) {
602                //
603                // Successful SC packets convert to normal writes
604                //
605                pkt->convertScToWrite();
606            } else {
607                //
608                // Failed SC packets don't access physical memory and thus
609                // the RubyPort itself must convert it to a response.
610                //
611                accessPhysMem = false;
612            }
613        } else {
614            //
615            // All LL packets convert to normal loads so that M5 PhysMem does
616            // not lock the blocks.
617            //
618            pkt->convertLlToRead();
619        }
620    }
621
622    //
623    // Flush requests don't access physical memory
624    //
625    if (pkt->isFlush()) {
626        accessPhysMem = false;
627    }
628
629    DPRINTF(RubyPort, "Hit callback needs response %d\n", needsResponse);
630
631    if (accessPhysMem) {
632        ruby_port->physMemPort->sendAtomic(pkt);
633    } else if (needsResponse) {
634        pkt->makeResponse();
635    }
636
637    // turn packet around to go back to requester if response expected
638    if (needsResponse) {
639        DPRINTF(RubyPort, "Sending packet back over port\n");
640        sendTiming(pkt);
641    } else {
642        delete pkt;
643    }
644    DPRINTF(RubyPort, "Hit callback done!\n");
645}
646
647bool
648RubyPort::M5Port::sendTiming(PacketPtr pkt)
649{
650    //minimum latency, must be > 0
651    schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock()));
652    return true;
653}
654
655bool
656RubyPort::PioPort::sendTiming(PacketPtr pkt)
657{
658    //minimum latency, must be > 0
659    schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock()));
660    return true;
661}
662
663bool
664RubyPort::M5Port::isPhysMemAddress(Addr addr)
665{
666    AddrRangeList physMemAddrList =
667        ruby_port->physMemPort->getPeer()->getAddrRanges();
668    for (AddrRangeIter iter = physMemAddrList.begin();
669         iter != physMemAddrList.end();
670         iter++) {
671        if (addr >= iter->start && addr <= iter->end) {
672            DPRINTF(RubyPort, "Request found in %#llx - %#llx range\n",
673                    iter->start, iter->end);
674            return true;
675        }
676    }
677    return false;
678}
679
680unsigned
681RubyPort::M5Port::deviceBlockSize() const
682{
683    return (unsigned) RubySystem::getBlockSizeBytes();
684}
685